aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--MAINTAINERS2
-rw-r--r--arch/x86/kernel/early-quirks.c12
-rw-r--r--drivers/gpu/drm/Kconfig73
-rw-r--r--drivers/gpu/drm/Makefile5
-rw-r--r--drivers/gpu/drm/armada/Kconfig24
-rw-r--r--drivers/gpu/drm/armada/Makefile7
-rw-r--r--drivers/gpu/drm/armada/armada_510.c87
-rw-r--r--drivers/gpu/drm/armada/armada_crtc.c1098
-rw-r--r--drivers/gpu/drm/armada/armada_crtc.h83
-rw-r--r--drivers/gpu/drm/armada/armada_debugfs.c177
-rw-r--r--drivers/gpu/drm/armada/armada_drm.h113
-rw-r--r--drivers/gpu/drm/armada/armada_drv.c421
-rw-r--r--drivers/gpu/drm/armada/armada_fb.c170
-rw-r--r--drivers/gpu/drm/armada/armada_fb.h24
-rw-r--r--drivers/gpu/drm/armada/armada_fbdev.c202
-rw-r--r--drivers/gpu/drm/armada/armada_gem.c611
-rw-r--r--drivers/gpu/drm/armada/armada_gem.h52
-rw-r--r--drivers/gpu/drm/armada/armada_hw.h318
-rw-r--r--drivers/gpu/drm/armada/armada_ioctlP.h18
-rw-r--r--drivers/gpu/drm/armada/armada_output.c158
-rw-r--r--drivers/gpu/drm/armada/armada_output.h39
-rw-r--r--drivers/gpu/drm/armada/armada_overlay.c477
-rw-r--r--drivers/gpu/drm/armada/armada_slave.c139
-rw-r--r--drivers/gpu/drm/armada/armada_slave.h26
-rw-r--r--drivers/gpu/drm/ast/Kconfig1
-rw-r--r--drivers/gpu/drm/ast/ast_drv.c1
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h1
-rw-r--r--drivers/gpu/drm/ast/ast_main.c6
-rw-r--r--drivers/gpu/drm/cirrus/Kconfig1
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.c1
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.h1
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_main.c6
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_mode.c11
-rw-r--r--drivers/gpu/drm/drm_context.c2
-rw-r--r--drivers/gpu/drm/drm_crtc.c153
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c96
-rw-r--r--drivers/gpu/drm/drm_debugfs.c6
-rw-r--r--drivers/gpu/drm/drm_dp_helper.c16
-rw-r--r--drivers/gpu/drm/drm_drv.c74
-rw-r--r--drivers/gpu/drm/drm_edid.c314
-rw-r--r--drivers/gpu/drm/drm_edid_load.c108
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c17
-rw-r--r--drivers/gpu/drm/drm_fops.c77
-rw-r--r--drivers/gpu/drm/drm_gem.c29
-rw-r--r--drivers/gpu/drm/drm_global.c2
-rw-r--r--drivers/gpu/drm/drm_info.c6
-rw-r--r--drivers/gpu/drm/drm_ioctl.c21
-rw-r--r--drivers/gpu/drm/drm_irq.c177
-rw-r--r--drivers/gpu/drm/drm_lock.c3
-rw-r--r--drivers/gpu/drm/drm_modes.c43
-rw-r--r--drivers/gpu/drm/drm_pci.c69
-rw-r--r--drivers/gpu/drm/drm_platform.c59
-rw-r--r--drivers/gpu/drm/drm_prime.c3
-rw-r--r--drivers/gpu/drm/drm_stub.c362
-rw-r--r--drivers/gpu/drm/drm_sysfs.c96
-rw-r--r--drivers/gpu/drm/drm_usb.c57
-rw-r--r--drivers/gpu/drm/drm_vm.c2
-rw-r--r--drivers/gpu/drm/exynos/Kconfig1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c8
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.h3
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c16
-rw-r--r--drivers/gpu/drm/gma500/Kconfig1
-rw-r--r--drivers/gpu/drm/gma500/cdv_device.c1
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_dp.c2
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.c2
-rw-r--r--drivers/gpu/drm/gma500/gem.c5
-rw-r--r--drivers/gpu/drm/gma500/intel_gmbus.c90
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_crtc.c433
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_device.c6
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_lvds.c30
-rw-r--r--drivers/gpu/drm/gma500/psb_device.c1
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.c39
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.h58
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_display.c2
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_sdvo.c59
-rw-r--r--drivers/gpu/drm/gma500/psb_irq.c22
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c3
-rw-r--r--drivers/gpu/drm/i810/i810_dma.c11
-rw-r--r--drivers/gpu/drm/i915/Kconfig67
-rw-r--r--drivers/gpu/drm/i915/Makefile6
-rw-r--r--drivers/gpu/drm/i915/dvo.h11
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c1205
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c118
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c187
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h437
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c558
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c64
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c50
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c401
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c508
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c8
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c46
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c1043
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h827
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c44
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c152
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h62
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c195
-rw-r--r--drivers/gpu/drm/i915/intel_bios.h121
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c29
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c251
-rw-r--r--drivers/gpu/drm/i915/intel_display.c1701
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c745
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h565
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.c620
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.h102
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_cmd.c427
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_cmd.h109
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_pll.c317
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c28
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c (renamed from drivers/gpu/drm/i915/intel_fb.c)33
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c83
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c64
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c25
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c494
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c9
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c346
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c1334
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c275
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h15
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c52
-rw-r--r--drivers/gpu/drm/i915/intel_sideband.c79
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c203
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c21
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c433
-rw-r--r--drivers/gpu/drm/mga/mga_dma.c5
-rw-r--r--drivers/gpu/drm/mga/mga_irq.c2
-rw-r--r--drivers/gpu/drm/mgag200/Kconfig1
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.c1
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.h1
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_main.c6
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c2
-rw-r--r--drivers/gpu/drm/msm/Kconfig1
-rw-r--r--drivers/gpu/drm/msm/Makefile1
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx.xml.h42
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx.xml.h46
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_common.xml.h10
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h10
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.xml.h6
-rw-r--r--drivers/gpu/drm/msm/dsi/mmss_cc.xml.h6
-rw-r--r--drivers/gpu/drm/msm/dsi/sfpb.xml.h6
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.xml.h6
-rw-r--r--drivers/gpu/drm/msm/hdmi/qfprom.xml.h6
-rw-r--r--drivers/gpu/drm/msm/mdp4/mdp4.xml.h126
-rw-r--r--drivers/gpu/drm/msm/mdp4/mdp4_crtc.c208
-rw-r--r--drivers/gpu/drm/msm/mdp4/mdp4_format.c16
-rw-r--r--drivers/gpu/drm/msm/mdp4/mdp4_kms.c19
-rw-r--r--drivers/gpu/drm/msm/mdp4/mdp4_kms.h58
-rw-r--r--drivers/gpu/drm/msm/mdp4/mdp4_plane.c30
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c60
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h37
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c160
-rw-r--r--drivers/gpu/drm/msm/msm_gem.h3
-rw-r--r--drivers/gpu/drm/msm/msm_gem_prime.c56
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c4
-rw-r--r--drivers/gpu/drm/nouveau/Kconfig1
-rw-r--r--drivers/gpu/drm/nouveau/Makefile48
-rw-r--r--drivers/gpu/drm/nouveau/core/core/event.c119
-rw-r--r--drivers/gpu/drm/nouveau/core/core/option.c11
-rw-r--r--drivers/gpu/drm/nouveau/core/core/printk.c45
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/base.c56
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/ctrl.c144
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nv04.c20
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nv10.c76
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nv20.c40
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nv30.c50
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nv40.c218
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nv50.c195
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nvc0.c118
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nve0.c99
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/priv.h8
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/dport.c52
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv04.c9
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c9
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c12
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c12
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c10
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c11
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c11
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc1.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd7.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd9.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv10.c14
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c68
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.h15
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c103
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/mpeg/nv44.c194
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/perfmon/base.c449
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/perfmon/daemon.c109
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/perfmon/nv40.c143
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/perfmon/nv40.h26
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/perfmon/nv50.c70
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/perfmon/nv84.c78
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/perfmon/nva3.c96
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/perfmon/nvc0.c173
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/perfmon/nvc0.h17
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/perfmon/nve0.c162
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/perfmon/nvf0.c71
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/perfmon/priv.h91
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/software/nv04.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/software/nv10.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/software/nv50.c96
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/software/nv50.h47
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/software/nvc0.c130
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/class.h73
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/debug.h9
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/device.h7
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/event.h22
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/option.h9
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/printk.h30
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/fifo.h16
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/mpeg.h5
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/perfmon.h39
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/software.h17
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/boost.h29
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/cstep.h28
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/gpio.h10
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/perf.h33
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/rammap.h11
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/timing.h8
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/vmap.h25
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/volt.h27
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bus.h20
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/clock.h111
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/fb.h50
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/i2c.h7
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/mc.h29
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/pwr.h80
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/therm.h2
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/volt.h60
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/boost.c127
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/cstep.c123
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/dp.c10
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/init.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/perf.c140
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/pll.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/rammap.c88
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/timing.c73
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/vmap.c112
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/volt.c137
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bus/hwsq.c145
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bus/hwsq.h113
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bus/nv04.c44
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bus/nv04.h23
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bus/nv31.c38
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bus/nv50.c60
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bus/nv94.c59
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bus/nvc0.c38
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/base.c494
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nv40.c183
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nv50.c520
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nv50.h31
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nv84.c48
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c271
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nva3.h14
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c404
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nve0.c497
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/pllnv04.c37
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/pllnva3.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/seq.h17
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nv04.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nv10.c10
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/base.c15
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/gddr5.c96
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv04.c29
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv04.h55
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv10.c45
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c45
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv20.c47
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv25.c47
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv30.c53
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv35.c47
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv36.c47
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv40.c50
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv40.h17
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv41.c50
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv44.c48
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv46.c46
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv47.c48
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv49.c48
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv4e.c45
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c51
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv50.h33
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv84.c39
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nva3.c39
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nvaa.c39
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nvaf.c39
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c33
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.h29
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nve0.c38
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/priv.h53
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramfuc.h118
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramnv40.c168
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramnv41.c19
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramnv44.c15
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramnv49.c19
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramnv50.c344
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramnva3.c447
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramnvaa.c66
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c567
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramnve0.c1264
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramseq.h18
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/sddr3.c99
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/gpio/base.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/base.c27
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/base.c89
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c44
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv04.h21
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv40.c45
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c38
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c41
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv94.c38
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c37
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c40
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nvc3.c38
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/base.c247
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/host.fuc151
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/idle.fuc84
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/kernel.fuc452
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/macros.fuc199
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/memx.fuc219
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc63
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc.h1165
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc63
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc.h1229
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc63
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc.h1229
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc63
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc.h1229
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/os.h27
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/perf.fuc57
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/test.fuc64
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/memx.c121
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/nv108.c62
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/nva3.c71
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/nvc0.c62
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/nvd0.c62
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/base.c55
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/fan.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/fantog.c7
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/ic.c54
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/nv84.c17
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/nva3.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/temp.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/timer/nv04.c10
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/volt/base.c198
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/volt/gpio.c96
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/volt/nv40.c56
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/Makefile1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/arb.c8
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/crtc.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/dfp.c22
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/disp.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/disp.h9
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/hw.c16
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/overlay.c320
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/tvnv04.c22
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.c11
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_agp.c44
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_backlight.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c25
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.h5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.c11
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c21
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.h5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c185
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.h4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.h7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c121
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.h15
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c45
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c30
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c51
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hwmon.c (renamed from drivers/gpu/drm/nouveau/nouveau_pm.c)560
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hwmon.h43
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hwsq.h115
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c647
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_perf.c416
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_pm.h283
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_prime.c10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sysfs.c162
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sysfs.h19
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_volt.c250
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fbcon.c6
-rw-r--r--drivers/gpu/drm/nouveau/nv04_pm.c146
-rw-r--r--drivers/gpu/drm/nouveau/nv40_pm.c353
-rw-r--r--drivers/gpu/drm/nouveau/nv50_pm.c855
-rw-r--r--drivers/gpu/drm/nouveau/nva3_pm.c624
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_pm.c599
-rw-r--r--drivers/gpu/drm/omapdrm/Kconfig1
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.c1
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.h1
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem.c5
-rw-r--r--drivers/gpu/drm/omapdrm/omap_irq.c17
-rw-r--r--drivers/gpu/drm/qxl/Kconfig1
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c51
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.c1
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.h3
-rw-r--r--drivers/gpu/drm/qxl/qxl_fb.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_gem.c6
-rw-r--r--drivers/gpu/drm/qxl/qxl_kms.c42
-rw-r--r--drivers/gpu/drm/qxl/qxl_ttm.c2
-rw-r--r--drivers/gpu/drm/radeon/atombios.h127
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c21
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c3
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c26
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.c58
-rw-r--r--drivers/gpu/drm/radeon/ci_smc.c4
-rw-r--r--drivers/gpu/drm/radeon/cik.c757
-rw-r--r--drivers/gpu/drm/radeon/cik_sdma.c62
-rw-r--r--drivers/gpu/drm/radeon/cikd.h103
-rw-r--r--drivers/gpu/drm/radeon/dce6_afmt.c66
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c80
-rw-r--r--drivers/gpu/drm/radeon/evergreen_hdmi.c71
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h70
-rw-r--r--drivers/gpu/drm/radeon/ni.c76
-rw-r--r--drivers/gpu/drm/radeon/ni_dma.c19
-rw-r--r--drivers/gpu/drm/radeon/r100.c2
-rw-r--r--drivers/gpu/drm/radeon/r600.c53
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c18
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c102
-rw-r--r--drivers/gpu/drm/radeon/r600d.h28
-rw-r--r--drivers/gpu/drm/radeon/radeon.h33
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c74
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h35
-rw-r--r--drivers/gpu/drm/radeon/radeon_atpx_handler.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_bios.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c116
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c298
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c78
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c83
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c173
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.h3
-rw-r--r--drivers/gpu/drm/radeon/radeon_family.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c350
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c73
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_ioc32.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c28
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c21
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_encoders.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h17
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c65
-rw-r--r--drivers/gpu/drm/radeon/radeon_trace.h24
-rw-r--r--drivers/gpu/drm/radeon/radeon_ucode.h4
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c5
-rw-r--r--drivers/gpu/drm/radeon/rs600.c64
-rw-r--r--drivers/gpu/drm/radeon/rs690.c16
-rw-r--r--drivers/gpu/drm/radeon/rv515.c8
-rw-r--r--drivers/gpu/drm/radeon/rv6xx_dpm.c6
-rw-r--r--drivers/gpu/drm/radeon/si.c99
-rw-r--r--drivers/gpu/drm/radeon/si_dma.c22
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c9
-rw-r--r--drivers/gpu/drm/radeon/sid.h47
-rw-r--r--drivers/gpu/drm/rcar-du/Kconfig1
-rw-r--r--drivers/gpu/drm/shmobile/Kconfig2
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_crtc.c4
-rw-r--r--drivers/gpu/drm/tegra/Kconfig (renamed from drivers/gpu/host1x/drm/Kconfig)13
-rw-r--r--drivers/gpu/drm/tegra/Makefile15
-rw-r--r--drivers/gpu/drm/tegra/bus.c76
-rw-r--r--drivers/gpu/drm/tegra/dc.c (renamed from drivers/gpu/host1x/drm/dc.c)108
-rw-r--r--drivers/gpu/drm/tegra/dc.h (renamed from drivers/gpu/host1x/drm/dc.h)5
-rw-r--r--drivers/gpu/drm/tegra/drm.c714
-rw-r--r--drivers/gpu/drm/tegra/drm.h (renamed from drivers/gpu/host1x/drm/drm.h)101
-rw-r--r--drivers/gpu/drm/tegra/fb.c (renamed from drivers/gpu/host1x/drm/fb.c)38
-rw-r--r--drivers/gpu/drm/tegra/gem.c (renamed from drivers/gpu/host1x/drm/gem.c)44
-rw-r--r--drivers/gpu/drm/tegra/gem.h (renamed from drivers/gpu/host1x/drm/gem.h)16
-rw-r--r--drivers/gpu/drm/tegra/gr2d.c227
-rw-r--r--drivers/gpu/drm/tegra/gr2d.h28
-rw-r--r--drivers/gpu/drm/tegra/gr3d.c338
-rw-r--r--drivers/gpu/drm/tegra/gr3d.h27
-rw-r--r--drivers/gpu/drm/tegra/hdmi.c (renamed from drivers/gpu/host1x/drm/hdmi.c)257
-rw-r--r--drivers/gpu/drm/tegra/hdmi.h (renamed from drivers/gpu/host1x/drm/hdmi.h)152
-rw-r--r--drivers/gpu/drm/tegra/output.c (renamed from drivers/gpu/host1x/drm/output.c)64
-rw-r--r--drivers/gpu/drm/tegra/rgb.c (renamed from drivers/gpu/host1x/drm/rgb.c)19
-rw-r--r--drivers/gpu/drm/tilcdc/Kconfig1
-rw-r--r--drivers/gpu/drm/ttm/Makefile6
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c46
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c30
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c92
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc_dma.c3
-rw-r--r--drivers/gpu/drm/udl/Kconfig1
-rw-r--r--drivers/gpu/drm/udl/udl_drv.c1
-rw-r--r--drivers/gpu/drm/udl/udl_drv.h1
-rw-r--r--drivers/gpu/drm/udl/udl_gem.c7
-rw-r--r--drivers/gpu/drm/via/via_mm.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c379
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c94
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h98
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c153
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c42
-rw-r--r--drivers/gpu/host1x/Kconfig2
-rw-r--r--drivers/gpu/host1x/Makefile13
-rw-r--r--drivers/gpu/host1x/bus.c550
-rw-r--r--drivers/gpu/host1x/bus.h (renamed from drivers/gpu/host1x/host1x_client.h)24
-rw-r--r--drivers/gpu/host1x/cdma.c2
-rw-r--r--drivers/gpu/host1x/channel.h6
-rw-r--r--drivers/gpu/host1x/dev.c82
-rw-r--r--drivers/gpu/host1x/dev.h11
-rw-r--r--drivers/gpu/host1x/drm/drm.c647
-rw-r--r--drivers/gpu/host1x/drm/gr2d.c343
-rw-r--r--drivers/gpu/host1x/host1x.h30
-rw-r--r--drivers/gpu/host1x/host1x_bo.h87
-rw-r--r--drivers/gpu/host1x/hw/Makefile6
-rw-r--r--drivers/gpu/host1x/hw/cdma_hw.c8
-rw-r--r--drivers/gpu/host1x/hw/channel_hw.c32
-rw-r--r--drivers/gpu/host1x/hw/debug_hw.c16
-rw-r--r--drivers/gpu/host1x/hw/host1x01.c16
-rw-r--r--drivers/gpu/host1x/hw/host1x02.c42
-rw-r--r--drivers/gpu/host1x/hw/host1x02.h26
-rw-r--r--drivers/gpu/host1x/hw/hw_host1x01_uclass.h6
-rw-r--r--drivers/gpu/host1x/hw/hw_host1x02_channel.h121
-rw-r--r--drivers/gpu/host1x/hw/hw_host1x02_sync.h243
-rw-r--r--drivers/gpu/host1x/hw/hw_host1x02_uclass.h175
-rw-r--r--drivers/gpu/host1x/hw/intr_hw.c4
-rw-r--r--drivers/gpu/host1x/hw/syncpt_hw.c4
-rw-r--r--drivers/gpu/host1x/job.c73
-rw-r--r--drivers/gpu/host1x/job.h108
-rw-r--r--drivers/gpu/host1x/syncpt.c92
-rw-r--r--drivers/gpu/host1x/syncpt.h46
-rw-r--r--drivers/staging/imx-drm/Kconfig1
-rw-r--r--drivers/staging/imx-drm/imx-drm-core.c8
-rw-r--r--drivers/video/Kconfig4
-rw-r--r--include/drm/drmP.h111
-rw-r--r--include/drm/drm_crtc.h39
-rw-r--r--include/drm/drm_crtc_helper.h2
-rw-r--r--include/drm/drm_dp_helper.h31
-rw-r--r--include/drm/drm_pciids.h12
-rw-r--r--include/drm/i915_drm.h4
-rw-r--r--include/drm/i915_pciids.h25
-rw-r--r--include/drm/ttm/ttm_page_alloc.h11
-rw-r--r--include/linux/cpufreq.h8
-rw-r--r--include/linux/host1x.h284
-rw-r--r--include/uapi/drm/armada_drm.h45
-rw-r--r--include/uapi/drm/drm.h37
-rw-r--r--include/uapi/drm/drm_mode.h45
-rw-r--r--include/uapi/drm/i915_drm.h8
-rw-r--r--include/uapi/drm/tegra_drm.h29
557 files changed, 44542 insertions, 14851 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index 583af4b72ad0..d2270c070c0f 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2849,7 +2849,9 @@ L: dri-devel@lists.freedesktop.org
2849L: linux-tegra@vger.kernel.org 2849L: linux-tegra@vger.kernel.org
2850T: git git://anongit.freedesktop.org/tegra/linux.git 2850T: git git://anongit.freedesktop.org/tegra/linux.git
2851S: Supported 2851S: Supported
2852F: drivers/gpu/drm/tegra/
2852F: drivers/gpu/host1x/ 2853F: drivers/gpu/host1x/
2854F: include/linux/host1x.h
2853F: include/uapi/drm/tegra_drm.h 2855F: include/uapi/drm/tegra_drm.h
2854F: Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt 2856F: Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt
2855 2857
diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c
index b3cd3ebae077..96f958d8cd45 100644
--- a/arch/x86/kernel/early-quirks.c
+++ b/arch/x86/kernel/early-quirks.c
@@ -313,6 +313,16 @@ static size_t __init gen6_stolen_size(int num, int slot, int func)
313 return gmch_ctrl << 25; /* 32 MB units */ 313 return gmch_ctrl << 25; /* 32 MB units */
314} 314}
315 315
316static inline size_t gen8_stolen_size(int num, int slot, int func)
317{
318 u16 gmch_ctrl;
319
320 gmch_ctrl = read_pci_config_16(num, slot, func, SNB_GMCH_CTRL);
321 gmch_ctrl >>= BDW_GMCH_GMS_SHIFT;
322 gmch_ctrl &= BDW_GMCH_GMS_MASK;
323 return gmch_ctrl << 25; /* 32 MB units */
324}
325
316typedef size_t (*stolen_size_fn)(int num, int slot, int func); 326typedef size_t (*stolen_size_fn)(int num, int slot, int func);
317 327
318static struct pci_device_id intel_stolen_ids[] __initdata = { 328static struct pci_device_id intel_stolen_ids[] __initdata = {
@@ -336,6 +346,8 @@ static struct pci_device_id intel_stolen_ids[] __initdata = {
336 INTEL_IVB_D_IDS(gen6_stolen_size), 346 INTEL_IVB_D_IDS(gen6_stolen_size),
337 INTEL_HSW_D_IDS(gen6_stolen_size), 347 INTEL_HSW_D_IDS(gen6_stolen_size),
338 INTEL_HSW_M_IDS(gen6_stolen_size), 348 INTEL_HSW_M_IDS(gen6_stolen_size),
349 INTEL_BDW_M_IDS(gen8_stolen_size),
350 INTEL_BDW_D_IDS(gen8_stolen_size)
339}; 351};
340 352
341static void __init intel_graphics_stolen(int num, int slot, int func) 353static void __init intel_graphics_stolen(int num, int slot, int func)
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 955555d6ec88..f86427591167 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -29,11 +29,17 @@ config DRM_USB
29config DRM_KMS_HELPER 29config DRM_KMS_HELPER
30 tristate 30 tristate
31 depends on DRM 31 depends on DRM
32 help
33 CRTC helpers for KMS drivers.
34
35config DRM_KMS_FB_HELPER
36 bool
37 depends on DRM_KMS_HELPER
32 select FB 38 select FB
33 select FRAMEBUFFER_CONSOLE if !EXPERT 39 select FRAMEBUFFER_CONSOLE if !EXPERT
34 select FRAMEBUFFER_CONSOLE_DETECT_PRIMARY if FRAMEBUFFER_CONSOLE 40 select FRAMEBUFFER_CONSOLE_DETECT_PRIMARY if FRAMEBUFFER_CONSOLE
35 help 41 help
36 FB and CRTC helpers for KMS drivers. 42 FBDEV helpers for KMS drivers.
37 43
38config DRM_LOAD_EDID_FIRMWARE 44config DRM_LOAD_EDID_FIRMWARE
39 bool "Allow to specify an EDID data set instead of probing for it" 45 bool "Allow to specify an EDID data set instead of probing for it"
@@ -64,6 +70,7 @@ config DRM_GEM_CMA_HELPER
64config DRM_KMS_CMA_HELPER 70config DRM_KMS_CMA_HELPER
65 bool 71 bool
66 select DRM_GEM_CMA_HELPER 72 select DRM_GEM_CMA_HELPER
73 select DRM_KMS_FB_HELPER
67 select FB_SYS_FILLRECT 74 select FB_SYS_FILLRECT
68 select FB_SYS_COPYAREA 75 select FB_SYS_COPYAREA
69 select FB_SYS_IMAGEBLIT 76 select FB_SYS_IMAGEBLIT
@@ -96,6 +103,7 @@ config DRM_RADEON
96 select FB_CFB_IMAGEBLIT 103 select FB_CFB_IMAGEBLIT
97 select FW_LOADER 104 select FW_LOADER
98 select DRM_KMS_HELPER 105 select DRM_KMS_HELPER
106 select DRM_KMS_FB_HELPER
99 select DRM_TTM 107 select DRM_TTM
100 select POWER_SUPPLY 108 select POWER_SUPPLY
101 select HWMON 109 select HWMON
@@ -120,64 +128,7 @@ config DRM_I810
120 selected, the module will be called i810. AGP support is required 128 selected, the module will be called i810. AGP support is required
121 for this driver to work. 129 for this driver to work.
122 130
123config DRM_I915 131source "drivers/gpu/drm/i915/Kconfig"
124 tristate "Intel 8xx/9xx/G3x/G4x/HD Graphics"
125 depends on DRM
126 depends on AGP
127 depends on AGP_INTEL
128 # we need shmfs for the swappable backing store, and in particular
129 # the shmem_readpage() which depends upon tmpfs
130 select SHMEM
131 select TMPFS
132 select DRM_KMS_HELPER
133 select FB_CFB_FILLRECT
134 select FB_CFB_COPYAREA
135 select FB_CFB_IMAGEBLIT
136 # i915 depends on ACPI_VIDEO when ACPI is enabled
137 # but for select to work, need to select ACPI_VIDEO's dependencies, ick
138 select BACKLIGHT_LCD_SUPPORT if ACPI
139 select BACKLIGHT_CLASS_DEVICE if ACPI
140 select VIDEO_OUTPUT_CONTROL if ACPI
141 select INPUT if ACPI
142 select THERMAL if ACPI
143 select ACPI_VIDEO if ACPI
144 select ACPI_BUTTON if ACPI
145 help
146 Choose this option if you have a system that has "Intel Graphics
147 Media Accelerator" or "HD Graphics" integrated graphics,
148 including 830M, 845G, 852GM, 855GM, 865G, 915G, 945G, 965G,
149 G35, G41, G43, G45 chipsets and Celeron, Pentium, Core i3,
150 Core i5, Core i7 as well as Atom CPUs with integrated graphics.
151 If M is selected, the module will be called i915. AGP support
152 is required for this driver to work. This driver is used by
153 the Intel driver in X.org 6.8 and XFree86 4.4 and above. It
154 replaces the older i830 module that supported a subset of the
155 hardware in older X.org releases.
156
157 Note that the older i810/i815 chipsets require the use of the
158 i810 driver instead, and the Atom z5xx series has an entirely
159 different implementation.
160
161config DRM_I915_KMS
162 bool "Enable modesetting on intel by default"
163 depends on DRM_I915
164 help
165 Choose this option if you want kernel modesetting enabled by default,
166 and you have a new enough userspace to support this. Running old
167 userspaces with this enabled will cause pain. Note that this causes
168 the driver to bind to PCI devices, which precludes loading things
169 like intelfb.
170
171config DRM_I915_PRELIMINARY_HW_SUPPORT
172 bool "Enable preliminary support for prerelease Intel hardware by default"
173 depends on DRM_I915
174 help
175 Choose this option if you have prerelease Intel hardware and want the
176 i915 driver to support it by default. You can enable such support at
177 runtime with the module option i915.preliminary_hw_support=1; this
178 option changes the default for that module option.
179
180 If in doubt, say "N".
181 132
182config DRM_MGA 133config DRM_MGA
183 tristate "Matrox g200/g400" 134 tristate "Matrox g200/g400"
@@ -225,6 +176,8 @@ source "drivers/gpu/drm/mgag200/Kconfig"
225 176
226source "drivers/gpu/drm/cirrus/Kconfig" 177source "drivers/gpu/drm/cirrus/Kconfig"
227 178
179source "drivers/gpu/drm/armada/Kconfig"
180
228source "drivers/gpu/drm/rcar-du/Kconfig" 181source "drivers/gpu/drm/rcar-du/Kconfig"
229 182
230source "drivers/gpu/drm/shmobile/Kconfig" 183source "drivers/gpu/drm/shmobile/Kconfig"
@@ -236,3 +189,5 @@ source "drivers/gpu/drm/tilcdc/Kconfig"
236source "drivers/gpu/drm/qxl/Kconfig" 189source "drivers/gpu/drm/qxl/Kconfig"
237 190
238source "drivers/gpu/drm/msm/Kconfig" 191source "drivers/gpu/drm/msm/Kconfig"
192
193source "drivers/gpu/drm/tegra/Kconfig"
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index f089adfe70ee..cc08b845f965 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -21,8 +21,9 @@ drm-$(CONFIG_PCI) += ati_pcigart.o
21 21
22drm-usb-y := drm_usb.o 22drm-usb-y := drm_usb.o
23 23
24drm_kms_helper-y := drm_fb_helper.o drm_crtc_helper.o drm_dp_helper.o 24drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o
25drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o 25drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o
26drm_kms_helper-$(CONFIG_DRM_KMS_FB_HELPER) += drm_fb_helper.o
26drm_kms_helper-$(CONFIG_DRM_KMS_CMA_HELPER) += drm_fb_cma_helper.o 27drm_kms_helper-$(CONFIG_DRM_KMS_CMA_HELPER) += drm_fb_cma_helper.o
27 28
28obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o 29obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o
@@ -49,10 +50,12 @@ obj-$(CONFIG_DRM_EXYNOS) +=exynos/
49obj-$(CONFIG_DRM_GMA500) += gma500/ 50obj-$(CONFIG_DRM_GMA500) += gma500/
50obj-$(CONFIG_DRM_UDL) += udl/ 51obj-$(CONFIG_DRM_UDL) += udl/
51obj-$(CONFIG_DRM_AST) += ast/ 52obj-$(CONFIG_DRM_AST) += ast/
53obj-$(CONFIG_DRM_ARMADA) += armada/
52obj-$(CONFIG_DRM_RCAR_DU) += rcar-du/ 54obj-$(CONFIG_DRM_RCAR_DU) += rcar-du/
53obj-$(CONFIG_DRM_SHMOBILE) +=shmobile/ 55obj-$(CONFIG_DRM_SHMOBILE) +=shmobile/
54obj-$(CONFIG_DRM_OMAP) += omapdrm/ 56obj-$(CONFIG_DRM_OMAP) += omapdrm/
55obj-$(CONFIG_DRM_TILCDC) += tilcdc/ 57obj-$(CONFIG_DRM_TILCDC) += tilcdc/
56obj-$(CONFIG_DRM_QXL) += qxl/ 58obj-$(CONFIG_DRM_QXL) += qxl/
57obj-$(CONFIG_DRM_MSM) += msm/ 59obj-$(CONFIG_DRM_MSM) += msm/
60obj-$(CONFIG_DRM_TEGRA) += tegra/
58obj-y += i2c/ 61obj-y += i2c/
diff --git a/drivers/gpu/drm/armada/Kconfig b/drivers/gpu/drm/armada/Kconfig
new file mode 100644
index 000000000000..40d371521fe1
--- /dev/null
+++ b/drivers/gpu/drm/armada/Kconfig
@@ -0,0 +1,24 @@
1config DRM_ARMADA
2 tristate "DRM support for Marvell Armada SoCs"
3 depends on DRM && HAVE_CLK && ARM
4 select FB_CFB_FILLRECT
5 select FB_CFB_COPYAREA
6 select FB_CFB_IMAGEBLIT
7 select DRM_KMS_HELPER
8 help
9 Support the "LCD" controllers found on the Marvell Armada 510
10 devices. There are two controllers on the device, each controller
11 supports graphics and video overlays.
12
13 This driver provides no built-in acceleration; acceleration is
14 performed by other IP found on the SoC. This driver provides
15 kernel mode setting and buffer management to userspace.
16
17config DRM_ARMADA_TDA1998X
18 bool "Support TDA1998X HDMI output"
19 depends on DRM_ARMADA != n
20 depends on I2C && DRM_I2C_NXP_TDA998X = y
21 default y
22 help
23 Support the TDA1998x HDMI output device found on the Solid-Run
24 CuBox.
diff --git a/drivers/gpu/drm/armada/Makefile b/drivers/gpu/drm/armada/Makefile
new file mode 100644
index 000000000000..d6f43e06150a
--- /dev/null
+++ b/drivers/gpu/drm/armada/Makefile
@@ -0,0 +1,7 @@
1armada-y := armada_crtc.o armada_drv.o armada_fb.o armada_fbdev.o \
2 armada_gem.o armada_output.o armada_overlay.o \
3 armada_slave.o
4armada-y += armada_510.o
5armada-$(CONFIG_DEBUG_FS) += armada_debugfs.o
6
7obj-$(CONFIG_DRM_ARMADA) := armada.o
diff --git a/drivers/gpu/drm/armada/armada_510.c b/drivers/gpu/drm/armada/armada_510.c
new file mode 100644
index 000000000000..59948eff6095
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_510.c
@@ -0,0 +1,87 @@
1/*
2 * Copyright (C) 2012 Russell King
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * Armada 510 (aka Dove) variant support
9 */
10#include <linux/clk.h>
11#include <linux/io.h>
12#include <drm/drmP.h>
13#include <drm/drm_crtc_helper.h>
14#include "armada_crtc.h"
15#include "armada_drm.h"
16#include "armada_hw.h"
17
18static int armada510_init(struct armada_private *priv, struct device *dev)
19{
20 priv->extclk[0] = devm_clk_get(dev, "ext_ref_clk_1");
21
22 if (IS_ERR(priv->extclk[0]) && PTR_ERR(priv->extclk[0]) == -ENOENT)
23 priv->extclk[0] = ERR_PTR(-EPROBE_DEFER);
24
25 return PTR_RET(priv->extclk[0]);
26}
27
28static int armada510_crtc_init(struct armada_crtc *dcrtc)
29{
30 /* Lower the watermark so to eliminate jitter at higher bandwidths */
31 armada_updatel(0x20, (1 << 11) | 0xff, dcrtc->base + LCD_CFG_RDREG4F);
32 return 0;
33}
34
35/*
36 * Armada510 specific SCLK register selection.
37 * This gets called with sclk = NULL to test whether the mode is
38 * supportable, and again with sclk != NULL to set the clocks up for
39 * that. The former can return an error, but the latter is expected
40 * not to.
41 *
42 * We currently are pretty rudimentary here, always selecting
43 * EXT_REF_CLK_1 for LCD0 and erroring LCD1. This needs improvement!
44 */
45static int armada510_crtc_compute_clock(struct armada_crtc *dcrtc,
46 const struct drm_display_mode *mode, uint32_t *sclk)
47{
48 struct armada_private *priv = dcrtc->crtc.dev->dev_private;
49 struct clk *clk = priv->extclk[0];
50 int ret;
51
52 if (dcrtc->num == 1)
53 return -EINVAL;
54
55 if (IS_ERR(clk))
56 return PTR_ERR(clk);
57
58 if (dcrtc->clk != clk) {
59 ret = clk_prepare_enable(clk);
60 if (ret)
61 return ret;
62 dcrtc->clk = clk;
63 }
64
65 if (sclk) {
66 uint32_t rate, ref, div;
67
68 rate = mode->clock * 1000;
69 ref = clk_round_rate(clk, rate);
70 div = DIV_ROUND_UP(ref, rate);
71 if (div < 1)
72 div = 1;
73
74 clk_set_rate(clk, ref);
75 *sclk = div | SCLK_510_EXTCLK1;
76 }
77
78 return 0;
79}
80
81const struct armada_variant armada510_ops = {
82 .has_spu_adv_reg = true,
83 .spu_adv_reg = ADV_HWC32ENABLE | ADV_HWC32ARGB | ADV_HWC32BLEND,
84 .init = armada510_init,
85 .crtc_init = armada510_crtc_init,
86 .crtc_compute_clock = armada510_crtc_compute_clock,
87};
diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c
new file mode 100644
index 000000000000..d8e398275ca8
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_crtc.c
@@ -0,0 +1,1098 @@
1/*
2 * Copyright (C) 2012 Russell King
3 * Rewritten from the dovefb driver, and Armada510 manuals.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9#include <linux/clk.h>
10#include <drm/drmP.h>
11#include <drm/drm_crtc_helper.h>
12#include "armada_crtc.h"
13#include "armada_drm.h"
14#include "armada_fb.h"
15#include "armada_gem.h"
16#include "armada_hw.h"
17
18struct armada_frame_work {
19 struct drm_pending_vblank_event *event;
20 struct armada_regs regs[4];
21 struct drm_framebuffer *old_fb;
22};
23
24enum csc_mode {
25 CSC_AUTO = 0,
26 CSC_YUV_CCIR601 = 1,
27 CSC_YUV_CCIR709 = 2,
28 CSC_RGB_COMPUTER = 1,
29 CSC_RGB_STUDIO = 2,
30};
31
32/*
33 * A note about interlacing. Let's consider HDMI 1920x1080i.
34 * The timing parameters we have from X are:
35 * Hact HsyA HsyI Htot Vact VsyA VsyI Vtot
36 * 1920 2448 2492 2640 1080 1084 1094 1125
37 * Which get translated to:
38 * Hact HsyA HsyI Htot Vact VsyA VsyI Vtot
39 * 1920 2448 2492 2640 540 542 547 562
40 *
41 * This is how it is defined by CEA-861-D - line and pixel numbers are
42 * referenced to the rising edge of VSYNC and HSYNC. Total clocks per
43 * line: 2640. The odd frame, the first active line is at line 21, and
44 * the even frame, the first active line is 584.
45 *
46 * LN: 560 561 562 563 567 568 569
47 * DE: ~~~|____________________________//__________________________
48 * HSYNC: ____|~|_____|~|_____|~|_____|~|_//__|~|_____|~|_____|~|_____
49 * VSYNC: _________________________|~~~~~~//~~~~~~~~~~~~~~~|__________
50 * 22 blanking lines. VSYNC at 1320 (referenced to the HSYNC rising edge).
51 *
52 * LN: 1123 1124 1125 1 5 6 7
53 * DE: ~~~|____________________________//__________________________
54 * HSYNC: ____|~|_____|~|_____|~|_____|~|_//__|~|_____|~|_____|~|_____
55 * VSYNC: ____________________|~~~~~~~~~~~//~~~~~~~~~~|_______________
56 * 23 blanking lines
57 *
58 * The Armada LCD Controller line and pixel numbers are, like X timings,
59 * referenced to the top left of the active frame.
60 *
61 * So, translating these to our LCD controller:
62 * Odd frame, 563 total lines, VSYNC at line 543-548, pixel 1128.
63 * Even frame, 562 total lines, VSYNC at line 542-547, pixel 2448.
64 * Note: Vsync front porch remains constant!
65 *
66 * if (odd_frame) {
67 * vtotal = mode->crtc_vtotal + 1;
68 * vbackporch = mode->crtc_vsync_start - mode->crtc_vdisplay + 1;
69 * vhorizpos = mode->crtc_hsync_start - mode->crtc_htotal / 2
70 * } else {
71 * vtotal = mode->crtc_vtotal;
72 * vbackporch = mode->crtc_vsync_start - mode->crtc_vdisplay;
73 * vhorizpos = mode->crtc_hsync_start;
74 * }
75 * vfrontporch = mode->crtc_vtotal - mode->crtc_vsync_end;
76 *
77 * So, we need to reprogram these registers on each vsync event:
78 * LCD_SPU_V_PORCH, LCD_SPU_ADV_REG, LCD_SPUT_V_H_TOTAL
79 *
80 * Note: we do not use the frame done interrupts because these appear
81 * to happen too early, and lead to jitter on the display (presumably
82 * they occur at the end of the last active line, before the vsync back
83 * porch, which we're reprogramming.)
84 */
85
86void
87armada_drm_crtc_update_regs(struct armada_crtc *dcrtc, struct armada_regs *regs)
88{
89 while (regs->offset != ~0) {
90 void __iomem *reg = dcrtc->base + regs->offset;
91 uint32_t val;
92
93 val = regs->mask;
94 if (val != 0)
95 val &= readl_relaxed(reg);
96 writel_relaxed(val | regs->val, reg);
97 ++regs;
98 }
99}
100
101#define dpms_blanked(dpms) ((dpms) != DRM_MODE_DPMS_ON)
102
103static void armada_drm_crtc_update(struct armada_crtc *dcrtc)
104{
105 uint32_t dumb_ctrl;
106
107 dumb_ctrl = dcrtc->cfg_dumb_ctrl;
108
109 if (!dpms_blanked(dcrtc->dpms))
110 dumb_ctrl |= CFG_DUMB_ENA;
111
112 /*
113 * When the dumb interface isn't in DUMB24_RGB888_0 mode, it might
114 * be using SPI or GPIO. If we set this to DUMB_BLANK, we will
115 * force LCD_D[23:0] to output blank color, overriding the GPIO or
116 * SPI usage. So leave it as-is unless in DUMB24_RGB888_0 mode.
117 */
118 if (dpms_blanked(dcrtc->dpms) &&
119 (dumb_ctrl & DUMB_MASK) == DUMB24_RGB888_0) {
120 dumb_ctrl &= ~DUMB_MASK;
121 dumb_ctrl |= DUMB_BLANK;
122 }
123
124 /*
125 * The documentation doesn't indicate what the normal state of
126 * the sync signals are. Sebastian Hesselbart kindly probed
127 * these signals on his board to determine their state.
128 *
129 * The non-inverted state of the sync signals is active high.
130 * Setting these bits makes the appropriate signal active low.
131 */
132 if (dcrtc->crtc.mode.flags & DRM_MODE_FLAG_NCSYNC)
133 dumb_ctrl |= CFG_INV_CSYNC;
134 if (dcrtc->crtc.mode.flags & DRM_MODE_FLAG_NHSYNC)
135 dumb_ctrl |= CFG_INV_HSYNC;
136 if (dcrtc->crtc.mode.flags & DRM_MODE_FLAG_NVSYNC)
137 dumb_ctrl |= CFG_INV_VSYNC;
138
139 if (dcrtc->dumb_ctrl != dumb_ctrl) {
140 dcrtc->dumb_ctrl = dumb_ctrl;
141 writel_relaxed(dumb_ctrl, dcrtc->base + LCD_SPU_DUMB_CTRL);
142 }
143}
144
145static unsigned armada_drm_crtc_calc_fb(struct drm_framebuffer *fb,
146 int x, int y, struct armada_regs *regs, bool interlaced)
147{
148 struct armada_gem_object *obj = drm_fb_obj(fb);
149 unsigned pitch = fb->pitches[0];
150 unsigned offset = y * pitch + x * fb->bits_per_pixel / 8;
151 uint32_t addr_odd, addr_even;
152 unsigned i = 0;
153
154 DRM_DEBUG_DRIVER("pitch %u x %d y %d bpp %d\n",
155 pitch, x, y, fb->bits_per_pixel);
156
157 addr_odd = addr_even = obj->dev_addr + offset;
158
159 if (interlaced) {
160 addr_even += pitch;
161 pitch *= 2;
162 }
163
164 /* write offset, base, and pitch */
165 armada_reg_queue_set(regs, i, addr_odd, LCD_CFG_GRA_START_ADDR0);
166 armada_reg_queue_set(regs, i, addr_even, LCD_CFG_GRA_START_ADDR1);
167 armada_reg_queue_mod(regs, i, pitch, 0xffff, LCD_CFG_GRA_PITCH);
168
169 return i;
170}
171
172static int armada_drm_crtc_queue_frame_work(struct armada_crtc *dcrtc,
173 struct armada_frame_work *work)
174{
175 struct drm_device *dev = dcrtc->crtc.dev;
176 unsigned long flags;
177 int ret;
178
179 ret = drm_vblank_get(dev, dcrtc->num);
180 if (ret) {
181 DRM_ERROR("failed to acquire vblank counter\n");
182 return ret;
183 }
184
185 spin_lock_irqsave(&dev->event_lock, flags);
186 if (!dcrtc->frame_work)
187 dcrtc->frame_work = work;
188 else
189 ret = -EBUSY;
190 spin_unlock_irqrestore(&dev->event_lock, flags);
191
192 if (ret)
193 drm_vblank_put(dev, dcrtc->num);
194
195 return ret;
196}
197
198static void armada_drm_crtc_complete_frame_work(struct armada_crtc *dcrtc)
199{
200 struct drm_device *dev = dcrtc->crtc.dev;
201 struct armada_frame_work *work = dcrtc->frame_work;
202
203 dcrtc->frame_work = NULL;
204
205 armada_drm_crtc_update_regs(dcrtc, work->regs);
206
207 if (work->event)
208 drm_send_vblank_event(dev, dcrtc->num, work->event);
209
210 drm_vblank_put(dev, dcrtc->num);
211
212 /* Finally, queue the process-half of the cleanup. */
213 __armada_drm_queue_unref_work(dcrtc->crtc.dev, work->old_fb);
214 kfree(work);
215}
216
217static void armada_drm_crtc_finish_fb(struct armada_crtc *dcrtc,
218 struct drm_framebuffer *fb, bool force)
219{
220 struct armada_frame_work *work;
221
222 if (!fb)
223 return;
224
225 if (force) {
226 /* Display is disabled, so just drop the old fb */
227 drm_framebuffer_unreference(fb);
228 return;
229 }
230
231 work = kmalloc(sizeof(*work), GFP_KERNEL);
232 if (work) {
233 int i = 0;
234 work->event = NULL;
235 work->old_fb = fb;
236 armada_reg_queue_end(work->regs, i);
237
238 if (armada_drm_crtc_queue_frame_work(dcrtc, work) == 0)
239 return;
240
241 kfree(work);
242 }
243
244 /*
245 * Oops - just drop the reference immediately and hope for
246 * the best. The worst that will happen is the buffer gets
247 * reused before it has finished being displayed.
248 */
249 drm_framebuffer_unreference(fb);
250}
251
252static void armada_drm_vblank_off(struct armada_crtc *dcrtc)
253{
254 struct drm_device *dev = dcrtc->crtc.dev;
255
256 /*
257 * Tell the DRM core that vblank IRQs aren't going to happen for
258 * a while. This cleans up any pending vblank events for us.
259 */
260 drm_vblank_off(dev, dcrtc->num);
261
262 /* Handle any pending flip event. */
263 spin_lock_irq(&dev->event_lock);
264 if (dcrtc->frame_work)
265 armada_drm_crtc_complete_frame_work(dcrtc);
266 spin_unlock_irq(&dev->event_lock);
267}
268
269void armada_drm_crtc_gamma_set(struct drm_crtc *crtc, u16 r, u16 g, u16 b,
270 int idx)
271{
272}
273
274void armada_drm_crtc_gamma_get(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
275 int idx)
276{
277}
278
279/* The mode_config.mutex will be held for this call */
280static void armada_drm_crtc_dpms(struct drm_crtc *crtc, int dpms)
281{
282 struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
283
284 if (dcrtc->dpms != dpms) {
285 dcrtc->dpms = dpms;
286 armada_drm_crtc_update(dcrtc);
287 if (dpms_blanked(dpms))
288 armada_drm_vblank_off(dcrtc);
289 }
290}
291
292/*
293 * Prepare for a mode set. Turn off overlay to ensure that we don't end
294 * up with the overlay size being bigger than the active screen size.
295 * We rely upon X refreshing this state after the mode set has completed.
296 *
297 * The mode_config.mutex will be held for this call
298 */
299static void armada_drm_crtc_prepare(struct drm_crtc *crtc)
300{
301 struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
302 struct drm_plane *plane;
303
304 /*
305 * If we have an overlay plane associated with this CRTC, disable
306 * it before the modeset to avoid its coordinates being outside
307 * the new mode parameters. DRM doesn't provide help with this.
308 */
309 plane = dcrtc->plane;
310 if (plane) {
311 struct drm_framebuffer *fb = plane->fb;
312
313 plane->funcs->disable_plane(plane);
314 plane->fb = NULL;
315 plane->crtc = NULL;
316 drm_framebuffer_unreference(fb);
317 }
318}
319
320/* The mode_config.mutex will be held for this call */
321static void armada_drm_crtc_commit(struct drm_crtc *crtc)
322{
323 struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
324
325 if (dcrtc->dpms != DRM_MODE_DPMS_ON) {
326 dcrtc->dpms = DRM_MODE_DPMS_ON;
327 armada_drm_crtc_update(dcrtc);
328 }
329}
330
331/* The mode_config.mutex will be held for this call */
332static bool armada_drm_crtc_mode_fixup(struct drm_crtc *crtc,
333 const struct drm_display_mode *mode, struct drm_display_mode *adj)
334{
335 struct armada_private *priv = crtc->dev->dev_private;
336 struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
337 int ret;
338
339 /* We can't do interlaced modes if we don't have the SPU_ADV_REG */
340 if (!priv->variant->has_spu_adv_reg &&
341 adj->flags & DRM_MODE_FLAG_INTERLACE)
342 return false;
343
344 /* Check whether the display mode is possible */
345 ret = priv->variant->crtc_compute_clock(dcrtc, adj, NULL);
346 if (ret)
347 return false;
348
349 return true;
350}
351
352void armada_drm_crtc_irq(struct armada_crtc *dcrtc, u32 stat)
353{
354 struct armada_vbl_event *e, *n;
355 void __iomem *base = dcrtc->base;
356
357 if (stat & DMA_FF_UNDERFLOW)
358 DRM_ERROR("video underflow on crtc %u\n", dcrtc->num);
359 if (stat & GRA_FF_UNDERFLOW)
360 DRM_ERROR("graphics underflow on crtc %u\n", dcrtc->num);
361
362 if (stat & VSYNC_IRQ)
363 drm_handle_vblank(dcrtc->crtc.dev, dcrtc->num);
364
365 spin_lock(&dcrtc->irq_lock);
366
367 list_for_each_entry_safe(e, n, &dcrtc->vbl_list, node) {
368 list_del_init(&e->node);
369 drm_vblank_put(dcrtc->crtc.dev, dcrtc->num);
370 e->fn(dcrtc, e->data);
371 }
372
373 if (stat & GRA_FRAME_IRQ && dcrtc->interlaced) {
374 int i = stat & GRA_FRAME_IRQ0 ? 0 : 1;
375 uint32_t val;
376
377 writel_relaxed(dcrtc->v[i].spu_v_porch, base + LCD_SPU_V_PORCH);
378 writel_relaxed(dcrtc->v[i].spu_v_h_total,
379 base + LCD_SPUT_V_H_TOTAL);
380
381 val = readl_relaxed(base + LCD_SPU_ADV_REG);
382 val &= ~(ADV_VSYNC_L_OFF | ADV_VSYNC_H_OFF | ADV_VSYNCOFFEN);
383 val |= dcrtc->v[i].spu_adv_reg;
384 writel_relaxed(val, base + LCD_SPU_ADV_REG);
385 }
386
387 if (stat & DUMB_FRAMEDONE && dcrtc->cursor_update) {
388 writel_relaxed(dcrtc->cursor_hw_pos,
389 base + LCD_SPU_HWC_OVSA_HPXL_VLN);
390 writel_relaxed(dcrtc->cursor_hw_sz,
391 base + LCD_SPU_HWC_HPXL_VLN);
392 armada_updatel(CFG_HWC_ENA,
393 CFG_HWC_ENA | CFG_HWC_1BITMOD | CFG_HWC_1BITENA,
394 base + LCD_SPU_DMA_CTRL0);
395 dcrtc->cursor_update = false;
396 armada_drm_crtc_disable_irq(dcrtc, DUMB_FRAMEDONE_ENA);
397 }
398
399 spin_unlock(&dcrtc->irq_lock);
400
401 if (stat & GRA_FRAME_IRQ) {
402 struct drm_device *dev = dcrtc->crtc.dev;
403
404 spin_lock(&dev->event_lock);
405 if (dcrtc->frame_work)
406 armada_drm_crtc_complete_frame_work(dcrtc);
407 spin_unlock(&dev->event_lock);
408
409 wake_up(&dcrtc->frame_wait);
410 }
411}
412
413/* These are locked by dev->vbl_lock */
414void armada_drm_crtc_disable_irq(struct armada_crtc *dcrtc, u32 mask)
415{
416 if (dcrtc->irq_ena & mask) {
417 dcrtc->irq_ena &= ~mask;
418 writel(dcrtc->irq_ena, dcrtc->base + LCD_SPU_IRQ_ENA);
419 }
420}
421
422void armada_drm_crtc_enable_irq(struct armada_crtc *dcrtc, u32 mask)
423{
424 if ((dcrtc->irq_ena & mask) != mask) {
425 dcrtc->irq_ena |= mask;
426 writel(dcrtc->irq_ena, dcrtc->base + LCD_SPU_IRQ_ENA);
427 if (readl_relaxed(dcrtc->base + LCD_SPU_IRQ_ISR) & mask)
428 writel(0, dcrtc->base + LCD_SPU_IRQ_ISR);
429 }
430}
431
432static uint32_t armada_drm_crtc_calculate_csc(struct armada_crtc *dcrtc)
433{
434 struct drm_display_mode *adj = &dcrtc->crtc.mode;
435 uint32_t val = 0;
436
437 if (dcrtc->csc_yuv_mode == CSC_YUV_CCIR709)
438 val |= CFG_CSC_YUV_CCIR709;
439 if (dcrtc->csc_rgb_mode == CSC_RGB_STUDIO)
440 val |= CFG_CSC_RGB_STUDIO;
441
442 /*
443 * In auto mode, set the colorimetry, based upon the HDMI spec.
444 * 1280x720p, 1920x1080p and 1920x1080i use ITU709, others use
445 * ITU601. It may be more appropriate to set this depending on
446 * the source - but what if the graphic frame is YUV and the
447 * video frame is RGB?
448 */
449 if ((adj->hdisplay == 1280 && adj->vdisplay == 720 &&
450 !(adj->flags & DRM_MODE_FLAG_INTERLACE)) ||
451 (adj->hdisplay == 1920 && adj->vdisplay == 1080)) {
452 if (dcrtc->csc_yuv_mode == CSC_AUTO)
453 val |= CFG_CSC_YUV_CCIR709;
454 }
455
456 /*
457 * We assume we're connected to a TV-like device, so the YUV->RGB
458 * conversion should produce a limited range. We should set this
459 * depending on the connectors attached to this CRTC, and what
460 * kind of device they report being connected.
461 */
462 if (dcrtc->csc_rgb_mode == CSC_AUTO)
463 val |= CFG_CSC_RGB_STUDIO;
464
465 return val;
466}
467
468/* The mode_config.mutex will be held for this call */
469static int armada_drm_crtc_mode_set(struct drm_crtc *crtc,
470 struct drm_display_mode *mode, struct drm_display_mode *adj,
471 int x, int y, struct drm_framebuffer *old_fb)
472{
473 struct armada_private *priv = crtc->dev->dev_private;
474 struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
475 struct armada_regs regs[17];
476 uint32_t lm, rm, tm, bm, val, sclk;
477 unsigned long flags;
478 unsigned i;
479 bool interlaced;
480
481 drm_framebuffer_reference(crtc->fb);
482
483 interlaced = !!(adj->flags & DRM_MODE_FLAG_INTERLACE);
484
485 i = armada_drm_crtc_calc_fb(dcrtc->crtc.fb, x, y, regs, interlaced);
486
487 rm = adj->crtc_hsync_start - adj->crtc_hdisplay;
488 lm = adj->crtc_htotal - adj->crtc_hsync_end;
489 bm = adj->crtc_vsync_start - adj->crtc_vdisplay;
490 tm = adj->crtc_vtotal - adj->crtc_vsync_end;
491
492 DRM_DEBUG_DRIVER("H: %d %d %d %d lm %d rm %d\n",
493 adj->crtc_hdisplay,
494 adj->crtc_hsync_start,
495 adj->crtc_hsync_end,
496 adj->crtc_htotal, lm, rm);
497 DRM_DEBUG_DRIVER("V: %d %d %d %d tm %d bm %d\n",
498 adj->crtc_vdisplay,
499 adj->crtc_vsync_start,
500 adj->crtc_vsync_end,
501 adj->crtc_vtotal, tm, bm);
502
503 /* Wait for pending flips to complete */
504 wait_event(dcrtc->frame_wait, !dcrtc->frame_work);
505
506 drm_vblank_pre_modeset(crtc->dev, dcrtc->num);
507
508 crtc->mode = *adj;
509
510 val = dcrtc->dumb_ctrl & ~CFG_DUMB_ENA;
511 if (val != dcrtc->dumb_ctrl) {
512 dcrtc->dumb_ctrl = val;
513 writel_relaxed(val, dcrtc->base + LCD_SPU_DUMB_CTRL);
514 }
515
516 /* Now compute the divider for real */
517 priv->variant->crtc_compute_clock(dcrtc, adj, &sclk);
518
519 /* Ensure graphic fifo is enabled */
520 armada_reg_queue_mod(regs, i, 0, CFG_PDWN64x66, LCD_SPU_SRAM_PARA1);
521 armada_reg_queue_set(regs, i, sclk, LCD_CFG_SCLK_DIV);
522
523 if (interlaced ^ dcrtc->interlaced) {
524 if (adj->flags & DRM_MODE_FLAG_INTERLACE)
525 drm_vblank_get(dcrtc->crtc.dev, dcrtc->num);
526 else
527 drm_vblank_put(dcrtc->crtc.dev, dcrtc->num);
528 dcrtc->interlaced = interlaced;
529 }
530
531 spin_lock_irqsave(&dcrtc->irq_lock, flags);
532
533 /* Even interlaced/progressive frame */
534 dcrtc->v[1].spu_v_h_total = adj->crtc_vtotal << 16 |
535 adj->crtc_htotal;
536 dcrtc->v[1].spu_v_porch = tm << 16 | bm;
537 val = adj->crtc_hsync_start;
538 dcrtc->v[1].spu_adv_reg = val << 20 | val | ADV_VSYNCOFFEN |
539 priv->variant->spu_adv_reg;
540
541 if (interlaced) {
542 /* Odd interlaced frame */
543 dcrtc->v[0].spu_v_h_total = dcrtc->v[1].spu_v_h_total +
544 (1 << 16);
545 dcrtc->v[0].spu_v_porch = dcrtc->v[1].spu_v_porch + 1;
546 val = adj->crtc_hsync_start - adj->crtc_htotal / 2;
547 dcrtc->v[0].spu_adv_reg = val << 20 | val | ADV_VSYNCOFFEN |
548 priv->variant->spu_adv_reg;
549 } else {
550 dcrtc->v[0] = dcrtc->v[1];
551 }
552
553 val = adj->crtc_vdisplay << 16 | adj->crtc_hdisplay;
554
555 armada_reg_queue_set(regs, i, val, LCD_SPU_V_H_ACTIVE);
556 armada_reg_queue_set(regs, i, val, LCD_SPU_GRA_HPXL_VLN);
557 armada_reg_queue_set(regs, i, val, LCD_SPU_GZM_HPXL_VLN);
558 armada_reg_queue_set(regs, i, (lm << 16) | rm, LCD_SPU_H_PORCH);
559 armada_reg_queue_set(regs, i, dcrtc->v[0].spu_v_porch, LCD_SPU_V_PORCH);
560 armada_reg_queue_set(regs, i, dcrtc->v[0].spu_v_h_total,
561 LCD_SPUT_V_H_TOTAL);
562
563 if (priv->variant->has_spu_adv_reg) {
564 armada_reg_queue_mod(regs, i, dcrtc->v[0].spu_adv_reg,
565 ADV_VSYNC_L_OFF | ADV_VSYNC_H_OFF |
566 ADV_VSYNCOFFEN, LCD_SPU_ADV_REG);
567 }
568
569 val = CFG_GRA_ENA | CFG_GRA_HSMOOTH;
570 val |= CFG_GRA_FMT(drm_fb_to_armada_fb(dcrtc->crtc.fb)->fmt);
571 val |= CFG_GRA_MOD(drm_fb_to_armada_fb(dcrtc->crtc.fb)->mod);
572
573 if (drm_fb_to_armada_fb(dcrtc->crtc.fb)->fmt > CFG_420)
574 val |= CFG_PALETTE_ENA;
575
576 if (interlaced)
577 val |= CFG_GRA_FTOGGLE;
578
579 armada_reg_queue_mod(regs, i, val, CFG_GRAFORMAT |
580 CFG_GRA_MOD(CFG_SWAPRB | CFG_SWAPUV |
581 CFG_SWAPYU | CFG_YUV2RGB) |
582 CFG_PALETTE_ENA | CFG_GRA_FTOGGLE,
583 LCD_SPU_DMA_CTRL0);
584
585 val = adj->flags & DRM_MODE_FLAG_NVSYNC ? CFG_VSYNC_INV : 0;
586 armada_reg_queue_mod(regs, i, val, CFG_VSYNC_INV, LCD_SPU_DMA_CTRL1);
587
588 val = dcrtc->spu_iopad_ctrl | armada_drm_crtc_calculate_csc(dcrtc);
589 armada_reg_queue_set(regs, i, val, LCD_SPU_IOPAD_CONTROL);
590 armada_reg_queue_end(regs, i);
591
592 armada_drm_crtc_update_regs(dcrtc, regs);
593 spin_unlock_irqrestore(&dcrtc->irq_lock, flags);
594
595 armada_drm_crtc_update(dcrtc);
596
597 drm_vblank_post_modeset(crtc->dev, dcrtc->num);
598 armada_drm_crtc_finish_fb(dcrtc, old_fb, dpms_blanked(dcrtc->dpms));
599
600 return 0;
601}
602
603/* The mode_config.mutex will be held for this call */
604static int armada_drm_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
605 struct drm_framebuffer *old_fb)
606{
607 struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
608 struct armada_regs regs[4];
609 unsigned i;
610
611 i = armada_drm_crtc_calc_fb(crtc->fb, crtc->x, crtc->y, regs,
612 dcrtc->interlaced);
613 armada_reg_queue_end(regs, i);
614
615 /* Wait for pending flips to complete */
616 wait_event(dcrtc->frame_wait, !dcrtc->frame_work);
617
618 /* Take a reference to the new fb as we're using it */
619 drm_framebuffer_reference(crtc->fb);
620
621 /* Update the base in the CRTC */
622 armada_drm_crtc_update_regs(dcrtc, regs);
623
624 /* Drop our previously held reference */
625 armada_drm_crtc_finish_fb(dcrtc, old_fb, dpms_blanked(dcrtc->dpms));
626
627 return 0;
628}
629
630static void armada_drm_crtc_load_lut(struct drm_crtc *crtc)
631{
632}
633
634/* The mode_config.mutex will be held for this call */
635static void armada_drm_crtc_disable(struct drm_crtc *crtc)
636{
637 struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
638
639 armada_drm_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
640 armada_drm_crtc_finish_fb(dcrtc, crtc->fb, true);
641
642 /* Power down most RAMs and FIFOs */
643 writel_relaxed(CFG_PDWN256x32 | CFG_PDWN256x24 | CFG_PDWN256x8 |
644 CFG_PDWN32x32 | CFG_PDWN16x66 | CFG_PDWN32x66 |
645 CFG_PDWN64x66, dcrtc->base + LCD_SPU_SRAM_PARA1);
646}
647
648static const struct drm_crtc_helper_funcs armada_crtc_helper_funcs = {
649 .dpms = armada_drm_crtc_dpms,
650 .prepare = armada_drm_crtc_prepare,
651 .commit = armada_drm_crtc_commit,
652 .mode_fixup = armada_drm_crtc_mode_fixup,
653 .mode_set = armada_drm_crtc_mode_set,
654 .mode_set_base = armada_drm_crtc_mode_set_base,
655 .load_lut = armada_drm_crtc_load_lut,
656 .disable = armada_drm_crtc_disable,
657};
658
659static void armada_load_cursor_argb(void __iomem *base, uint32_t *pix,
660 unsigned stride, unsigned width, unsigned height)
661{
662 uint32_t addr;
663 unsigned y;
664
665 addr = SRAM_HWC32_RAM1;
666 for (y = 0; y < height; y++) {
667 uint32_t *p = &pix[y * stride];
668 unsigned x;
669
670 for (x = 0; x < width; x++, p++) {
671 uint32_t val = *p;
672
673 val = (val & 0xff00ff00) |
674 (val & 0x000000ff) << 16 |
675 (val & 0x00ff0000) >> 16;
676
677 writel_relaxed(val,
678 base + LCD_SPU_SRAM_WRDAT);
679 writel_relaxed(addr | SRAM_WRITE,
680 base + LCD_SPU_SRAM_CTRL);
681 addr += 1;
682 if ((addr & 0x00ff) == 0)
683 addr += 0xf00;
684 if ((addr & 0x30ff) == 0)
685 addr = SRAM_HWC32_RAM2;
686 }
687 }
688}
689
690static void armada_drm_crtc_cursor_tran(void __iomem *base)
691{
692 unsigned addr;
693
694 for (addr = 0; addr < 256; addr++) {
695 /* write the default value */
696 writel_relaxed(0x55555555, base + LCD_SPU_SRAM_WRDAT);
697 writel_relaxed(addr | SRAM_WRITE | SRAM_HWC32_TRAN,
698 base + LCD_SPU_SRAM_CTRL);
699 }
700}
701
702static int armada_drm_crtc_cursor_update(struct armada_crtc *dcrtc, bool reload)
703{
704 uint32_t xoff, xscr, w = dcrtc->cursor_w, s;
705 uint32_t yoff, yscr, h = dcrtc->cursor_h;
706 uint32_t para1;
707
708 /*
709 * Calculate the visible width and height of the cursor,
710 * screen position, and the position in the cursor bitmap.
711 */
712 if (dcrtc->cursor_x < 0) {
713 xoff = -dcrtc->cursor_x;
714 xscr = 0;
715 w -= min(xoff, w);
716 } else if (dcrtc->cursor_x + w > dcrtc->crtc.mode.hdisplay) {
717 xoff = 0;
718 xscr = dcrtc->cursor_x;
719 w = max_t(int, dcrtc->crtc.mode.hdisplay - dcrtc->cursor_x, 0);
720 } else {
721 xoff = 0;
722 xscr = dcrtc->cursor_x;
723 }
724
725 if (dcrtc->cursor_y < 0) {
726 yoff = -dcrtc->cursor_y;
727 yscr = 0;
728 h -= min(yoff, h);
729 } else if (dcrtc->cursor_y + h > dcrtc->crtc.mode.vdisplay) {
730 yoff = 0;
731 yscr = dcrtc->cursor_y;
732 h = max_t(int, dcrtc->crtc.mode.vdisplay - dcrtc->cursor_y, 0);
733 } else {
734 yoff = 0;
735 yscr = dcrtc->cursor_y;
736 }
737
738 /* On interlaced modes, the vertical cursor size must be halved */
739 s = dcrtc->cursor_w;
740 if (dcrtc->interlaced) {
741 s *= 2;
742 yscr /= 2;
743 h /= 2;
744 }
745
746 if (!dcrtc->cursor_obj || !h || !w) {
747 spin_lock_irq(&dcrtc->irq_lock);
748 armada_drm_crtc_disable_irq(dcrtc, DUMB_FRAMEDONE_ENA);
749 dcrtc->cursor_update = false;
750 armada_updatel(0, CFG_HWC_ENA, dcrtc->base + LCD_SPU_DMA_CTRL0);
751 spin_unlock_irq(&dcrtc->irq_lock);
752 return 0;
753 }
754
755 para1 = readl_relaxed(dcrtc->base + LCD_SPU_SRAM_PARA1);
756 armada_updatel(CFG_CSB_256x32, CFG_CSB_256x32 | CFG_PDWN256x32,
757 dcrtc->base + LCD_SPU_SRAM_PARA1);
758
759 /*
760 * Initialize the transparency if the SRAM was powered down.
761 * We must also reload the cursor data as well.
762 */
763 if (!(para1 & CFG_CSB_256x32)) {
764 armada_drm_crtc_cursor_tran(dcrtc->base);
765 reload = true;
766 }
767
768 if (dcrtc->cursor_hw_sz != (h << 16 | w)) {
769 spin_lock_irq(&dcrtc->irq_lock);
770 armada_drm_crtc_disable_irq(dcrtc, DUMB_FRAMEDONE_ENA);
771 dcrtc->cursor_update = false;
772 armada_updatel(0, CFG_HWC_ENA, dcrtc->base + LCD_SPU_DMA_CTRL0);
773 spin_unlock_irq(&dcrtc->irq_lock);
774 reload = true;
775 }
776 if (reload) {
777 struct armada_gem_object *obj = dcrtc->cursor_obj;
778 uint32_t *pix;
779 /* Set the top-left corner of the cursor image */
780 pix = obj->addr;
781 pix += yoff * s + xoff;
782 armada_load_cursor_argb(dcrtc->base, pix, s, w, h);
783 }
784
785 /* Reload the cursor position, size and enable in the IRQ handler */
786 spin_lock_irq(&dcrtc->irq_lock);
787 dcrtc->cursor_hw_pos = yscr << 16 | xscr;
788 dcrtc->cursor_hw_sz = h << 16 | w;
789 dcrtc->cursor_update = true;
790 armada_drm_crtc_enable_irq(dcrtc, DUMB_FRAMEDONE_ENA);
791 spin_unlock_irq(&dcrtc->irq_lock);
792
793 return 0;
794}
795
796static void cursor_update(void *data)
797{
798 armada_drm_crtc_cursor_update(data, true);
799}
800
801static int armada_drm_crtc_cursor_set(struct drm_crtc *crtc,
802 struct drm_file *file, uint32_t handle, uint32_t w, uint32_t h)
803{
804 struct drm_device *dev = crtc->dev;
805 struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
806 struct armada_private *priv = crtc->dev->dev_private;
807 struct armada_gem_object *obj = NULL;
808 int ret;
809
810 /* If no cursor support, replicate drm's return value */
811 if (!priv->variant->has_spu_adv_reg)
812 return -ENXIO;
813
814 if (handle && w > 0 && h > 0) {
815 /* maximum size is 64x32 or 32x64 */
816 if (w > 64 || h > 64 || (w > 32 && h > 32))
817 return -ENOMEM;
818
819 obj = armada_gem_object_lookup(dev, file, handle);
820 if (!obj)
821 return -ENOENT;
822
823 /* Must be a kernel-mapped object */
824 if (!obj->addr) {
825 drm_gem_object_unreference_unlocked(&obj->obj);
826 return -EINVAL;
827 }
828
829 if (obj->obj.size < w * h * 4) {
830 DRM_ERROR("buffer is too small\n");
831 drm_gem_object_unreference_unlocked(&obj->obj);
832 return -ENOMEM;
833 }
834 }
835
836 mutex_lock(&dev->struct_mutex);
837 if (dcrtc->cursor_obj) {
838 dcrtc->cursor_obj->update = NULL;
839 dcrtc->cursor_obj->update_data = NULL;
840 drm_gem_object_unreference(&dcrtc->cursor_obj->obj);
841 }
842 dcrtc->cursor_obj = obj;
843 dcrtc->cursor_w = w;
844 dcrtc->cursor_h = h;
845 ret = armada_drm_crtc_cursor_update(dcrtc, true);
846 if (obj) {
847 obj->update_data = dcrtc;
848 obj->update = cursor_update;
849 }
850 mutex_unlock(&dev->struct_mutex);
851
852 return ret;
853}
854
855static int armada_drm_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
856{
857 struct drm_device *dev = crtc->dev;
858 struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
859 struct armada_private *priv = crtc->dev->dev_private;
860 int ret;
861
862 /* If no cursor support, replicate drm's return value */
863 if (!priv->variant->has_spu_adv_reg)
864 return -EFAULT;
865
866 mutex_lock(&dev->struct_mutex);
867 dcrtc->cursor_x = x;
868 dcrtc->cursor_y = y;
869 ret = armada_drm_crtc_cursor_update(dcrtc, false);
870 mutex_unlock(&dev->struct_mutex);
871
872 return ret;
873}
874
875static void armada_drm_crtc_destroy(struct drm_crtc *crtc)
876{
877 struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
878 struct armada_private *priv = crtc->dev->dev_private;
879
880 if (dcrtc->cursor_obj)
881 drm_gem_object_unreference(&dcrtc->cursor_obj->obj);
882
883 priv->dcrtc[dcrtc->num] = NULL;
884 drm_crtc_cleanup(&dcrtc->crtc);
885
886 if (!IS_ERR(dcrtc->clk))
887 clk_disable_unprepare(dcrtc->clk);
888
889 kfree(dcrtc);
890}
891
892/*
893 * The mode_config lock is held here, to prevent races between this
894 * and a mode_set.
895 */
896static int armada_drm_crtc_page_flip(struct drm_crtc *crtc,
897 struct drm_framebuffer *fb, struct drm_pending_vblank_event *event, uint32_t page_flip_flags)
898{
899 struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
900 struct armada_frame_work *work;
901 struct drm_device *dev = crtc->dev;
902 unsigned long flags;
903 unsigned i;
904 int ret;
905
906 /* We don't support changing the pixel format */
907 if (fb->pixel_format != crtc->fb->pixel_format)
908 return -EINVAL;
909
910 work = kmalloc(sizeof(*work), GFP_KERNEL);
911 if (!work)
912 return -ENOMEM;
913
914 work->event = event;
915 work->old_fb = dcrtc->crtc.fb;
916
917 i = armada_drm_crtc_calc_fb(fb, crtc->x, crtc->y, work->regs,
918 dcrtc->interlaced);
919 armada_reg_queue_end(work->regs, i);
920
921 /*
922 * Hold the old framebuffer for the work - DRM appears to drop our
923 * reference to the old framebuffer in drm_mode_page_flip_ioctl().
924 */
925 drm_framebuffer_reference(work->old_fb);
926
927 ret = armada_drm_crtc_queue_frame_work(dcrtc, work);
928 if (ret) {
929 /*
930 * Undo our reference above; DRM does not drop the reference
931 * to this object on error, so that's okay.
932 */
933 drm_framebuffer_unreference(work->old_fb);
934 kfree(work);
935 return ret;
936 }
937
938 /*
939 * Don't take a reference on the new framebuffer;
940 * drm_mode_page_flip_ioctl() has already grabbed a reference and
941 * will _not_ drop that reference on successful return from this
942 * function. Simply mark this new framebuffer as the current one.
943 */
944 dcrtc->crtc.fb = fb;
945
946 /*
947 * Finally, if the display is blanked, we won't receive an
948 * interrupt, so complete it now.
949 */
950 if (dpms_blanked(dcrtc->dpms)) {
951 spin_lock_irqsave(&dev->event_lock, flags);
952 if (dcrtc->frame_work)
953 armada_drm_crtc_complete_frame_work(dcrtc);
954 spin_unlock_irqrestore(&dev->event_lock, flags);
955 }
956
957 return 0;
958}
959
960static int
961armada_drm_crtc_set_property(struct drm_crtc *crtc,
962 struct drm_property *property, uint64_t val)
963{
964 struct armada_private *priv = crtc->dev->dev_private;
965 struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
966 bool update_csc = false;
967
968 if (property == priv->csc_yuv_prop) {
969 dcrtc->csc_yuv_mode = val;
970 update_csc = true;
971 } else if (property == priv->csc_rgb_prop) {
972 dcrtc->csc_rgb_mode = val;
973 update_csc = true;
974 }
975
976 if (update_csc) {
977 uint32_t val;
978
979 val = dcrtc->spu_iopad_ctrl |
980 armada_drm_crtc_calculate_csc(dcrtc);
981 writel_relaxed(val, dcrtc->base + LCD_SPU_IOPAD_CONTROL);
982 }
983
984 return 0;
985}
986
987static struct drm_crtc_funcs armada_crtc_funcs = {
988 .cursor_set = armada_drm_crtc_cursor_set,
989 .cursor_move = armada_drm_crtc_cursor_move,
990 .destroy = armada_drm_crtc_destroy,
991 .set_config = drm_crtc_helper_set_config,
992 .page_flip = armada_drm_crtc_page_flip,
993 .set_property = armada_drm_crtc_set_property,
994};
995
996static struct drm_prop_enum_list armada_drm_csc_yuv_enum_list[] = {
997 { CSC_AUTO, "Auto" },
998 { CSC_YUV_CCIR601, "CCIR601" },
999 { CSC_YUV_CCIR709, "CCIR709" },
1000};
1001
1002static struct drm_prop_enum_list armada_drm_csc_rgb_enum_list[] = {
1003 { CSC_AUTO, "Auto" },
1004 { CSC_RGB_COMPUTER, "Computer system" },
1005 { CSC_RGB_STUDIO, "Studio" },
1006};
1007
1008static int armada_drm_crtc_create_properties(struct drm_device *dev)
1009{
1010 struct armada_private *priv = dev->dev_private;
1011
1012 if (priv->csc_yuv_prop)
1013 return 0;
1014
1015 priv->csc_yuv_prop = drm_property_create_enum(dev, 0,
1016 "CSC_YUV", armada_drm_csc_yuv_enum_list,
1017 ARRAY_SIZE(armada_drm_csc_yuv_enum_list));
1018 priv->csc_rgb_prop = drm_property_create_enum(dev, 0,
1019 "CSC_RGB", armada_drm_csc_rgb_enum_list,
1020 ARRAY_SIZE(armada_drm_csc_rgb_enum_list));
1021
1022 if (!priv->csc_yuv_prop || !priv->csc_rgb_prop)
1023 return -ENOMEM;
1024
1025 return 0;
1026}
1027
1028int armada_drm_crtc_create(struct drm_device *dev, unsigned num,
1029 struct resource *res)
1030{
1031 struct armada_private *priv = dev->dev_private;
1032 struct armada_crtc *dcrtc;
1033 void __iomem *base;
1034 int ret;
1035
1036 ret = armada_drm_crtc_create_properties(dev);
1037 if (ret)
1038 return ret;
1039
1040 base = devm_request_and_ioremap(dev->dev, res);
1041 if (!base) {
1042 DRM_ERROR("failed to ioremap register\n");
1043 return -ENOMEM;
1044 }
1045
1046 dcrtc = kzalloc(sizeof(*dcrtc), GFP_KERNEL);
1047 if (!dcrtc) {
1048 DRM_ERROR("failed to allocate Armada crtc\n");
1049 return -ENOMEM;
1050 }
1051
1052 dcrtc->base = base;
1053 dcrtc->num = num;
1054 dcrtc->clk = ERR_PTR(-EINVAL);
1055 dcrtc->csc_yuv_mode = CSC_AUTO;
1056 dcrtc->csc_rgb_mode = CSC_AUTO;
1057 dcrtc->cfg_dumb_ctrl = DUMB24_RGB888_0;
1058 dcrtc->spu_iopad_ctrl = CFG_VSCALE_LN_EN | CFG_IOPAD_DUMB24;
1059 spin_lock_init(&dcrtc->irq_lock);
1060 dcrtc->irq_ena = CLEAN_SPU_IRQ_ISR;
1061 INIT_LIST_HEAD(&dcrtc->vbl_list);
1062 init_waitqueue_head(&dcrtc->frame_wait);
1063
1064 /* Initialize some registers which we don't otherwise set */
1065 writel_relaxed(0x00000001, dcrtc->base + LCD_CFG_SCLK_DIV);
1066 writel_relaxed(0x00000000, dcrtc->base + LCD_SPU_BLANKCOLOR);
1067 writel_relaxed(dcrtc->spu_iopad_ctrl,
1068 dcrtc->base + LCD_SPU_IOPAD_CONTROL);
1069 writel_relaxed(0x00000000, dcrtc->base + LCD_SPU_SRAM_PARA0);
1070 writel_relaxed(CFG_PDWN256x32 | CFG_PDWN256x24 | CFG_PDWN256x8 |
1071 CFG_PDWN32x32 | CFG_PDWN16x66 | CFG_PDWN32x66 |
1072 CFG_PDWN64x66, dcrtc->base + LCD_SPU_SRAM_PARA1);
1073 writel_relaxed(0x2032ff81, dcrtc->base + LCD_SPU_DMA_CTRL1);
1074 writel_relaxed(0x00000000, dcrtc->base + LCD_SPU_GRA_OVSA_HPXL_VLN);
1075
1076 if (priv->variant->crtc_init) {
1077 ret = priv->variant->crtc_init(dcrtc);
1078 if (ret) {
1079 kfree(dcrtc);
1080 return ret;
1081 }
1082 }
1083
1084 /* Ensure AXI pipeline is enabled */
1085 armada_updatel(CFG_ARBFAST_ENA, 0, dcrtc->base + LCD_SPU_DMA_CTRL0);
1086
1087 priv->dcrtc[dcrtc->num] = dcrtc;
1088
1089 drm_crtc_init(dev, &dcrtc->crtc, &armada_crtc_funcs);
1090 drm_crtc_helper_add(&dcrtc->crtc, &armada_crtc_helper_funcs);
1091
1092 drm_object_attach_property(&dcrtc->crtc.base, priv->csc_yuv_prop,
1093 dcrtc->csc_yuv_mode);
1094 drm_object_attach_property(&dcrtc->crtc.base, priv->csc_rgb_prop,
1095 dcrtc->csc_rgb_mode);
1096
1097 return armada_overlay_plane_create(dev, 1 << dcrtc->num);
1098}
diff --git a/drivers/gpu/drm/armada/armada_crtc.h b/drivers/gpu/drm/armada/armada_crtc.h
new file mode 100644
index 000000000000..9c10a07e7492
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_crtc.h
@@ -0,0 +1,83 @@
1/*
2 * Copyright (C) 2012 Russell King
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8#ifndef ARMADA_CRTC_H
9#define ARMADA_CRTC_H
10
11struct armada_gem_object;
12
13struct armada_regs {
14 uint32_t offset;
15 uint32_t mask;
16 uint32_t val;
17};
18
19#define armada_reg_queue_mod(_r, _i, _v, _m, _o) \
20 do { \
21 struct armada_regs *__reg = _r; \
22 __reg[_i].offset = _o; \
23 __reg[_i].mask = ~(_m); \
24 __reg[_i].val = _v; \
25 _i++; \
26 } while (0)
27
28#define armada_reg_queue_set(_r, _i, _v, _o) \
29 armada_reg_queue_mod(_r, _i, _v, ~0, _o)
30
31#define armada_reg_queue_end(_r, _i) \
32 armada_reg_queue_mod(_r, _i, 0, 0, ~0)
33
34struct armada_frame_work;
35
36struct armada_crtc {
37 struct drm_crtc crtc;
38 unsigned num;
39 void __iomem *base;
40 struct clk *clk;
41 struct {
42 uint32_t spu_v_h_total;
43 uint32_t spu_v_porch;
44 uint32_t spu_adv_reg;
45 } v[2];
46 bool interlaced;
47 bool cursor_update;
48 uint8_t csc_yuv_mode;
49 uint8_t csc_rgb_mode;
50
51 struct drm_plane *plane;
52
53 struct armada_gem_object *cursor_obj;
54 int cursor_x;
55 int cursor_y;
56 uint32_t cursor_hw_pos;
57 uint32_t cursor_hw_sz;
58 uint32_t cursor_w;
59 uint32_t cursor_h;
60
61 int dpms;
62 uint32_t cfg_dumb_ctrl;
63 uint32_t dumb_ctrl;
64 uint32_t spu_iopad_ctrl;
65
66 wait_queue_head_t frame_wait;
67 struct armada_frame_work *frame_work;
68
69 spinlock_t irq_lock;
70 uint32_t irq_ena;
71 struct list_head vbl_list;
72};
73#define drm_to_armada_crtc(c) container_of(c, struct armada_crtc, crtc)
74
75int armada_drm_crtc_create(struct drm_device *, unsigned, struct resource *);
76void armada_drm_crtc_gamma_set(struct drm_crtc *, u16, u16, u16, int);
77void armada_drm_crtc_gamma_get(struct drm_crtc *, u16 *, u16 *, u16 *, int);
78void armada_drm_crtc_irq(struct armada_crtc *, u32);
79void armada_drm_crtc_disable_irq(struct armada_crtc *, u32);
80void armada_drm_crtc_enable_irq(struct armada_crtc *, u32);
81void armada_drm_crtc_update_regs(struct armada_crtc *, struct armada_regs *);
82
83#endif
diff --git a/drivers/gpu/drm/armada/armada_debugfs.c b/drivers/gpu/drm/armada/armada_debugfs.c
new file mode 100644
index 000000000000..471e45627f1e
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_debugfs.c
@@ -0,0 +1,177 @@
1/*
2 * Copyright (C) 2012 Russell King
3 * Rewritten from the dovefb driver, and Armada510 manuals.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9#include <linux/ctype.h>
10#include <linux/debugfs.h>
11#include <linux/module.h>
12#include <linux/seq_file.h>
13#include <drm/drmP.h>
14#include "armada_crtc.h"
15#include "armada_drm.h"
16
17static int armada_debugfs_gem_linear_show(struct seq_file *m, void *data)
18{
19 struct drm_info_node *node = m->private;
20 struct drm_device *dev = node->minor->dev;
21 struct armada_private *priv = dev->dev_private;
22 int ret;
23
24 mutex_lock(&dev->struct_mutex);
25 ret = drm_mm_dump_table(m, &priv->linear);
26 mutex_unlock(&dev->struct_mutex);
27
28 return ret;
29}
30
31static int armada_debugfs_reg_show(struct seq_file *m, void *data)
32{
33 struct drm_device *dev = m->private;
34 struct armada_private *priv = dev->dev_private;
35 int n, i;
36
37 if (priv) {
38 for (n = 0; n < ARRAY_SIZE(priv->dcrtc); n++) {
39 struct armada_crtc *dcrtc = priv->dcrtc[n];
40 if (!dcrtc)
41 continue;
42
43 for (i = 0x84; i <= 0x1c4; i += 4) {
44 uint32_t v = readl_relaxed(dcrtc->base + i);
45 seq_printf(m, "%u: 0x%04x: 0x%08x\n", n, i, v);
46 }
47 }
48 }
49
50 return 0;
51}
52
53static int armada_debugfs_reg_r_open(struct inode *inode, struct file *file)
54{
55 return single_open(file, armada_debugfs_reg_show, inode->i_private);
56}
57
58static const struct file_operations fops_reg_r = {
59 .owner = THIS_MODULE,
60 .open = armada_debugfs_reg_r_open,
61 .read = seq_read,
62 .llseek = seq_lseek,
63 .release = single_release,
64};
65
66static int armada_debugfs_write(struct file *file, const char __user *ptr,
67 size_t len, loff_t *off)
68{
69 struct drm_device *dev = file->private_data;
70 struct armada_private *priv = dev->dev_private;
71 struct armada_crtc *dcrtc = priv->dcrtc[0];
72 char buf[32], *p;
73 uint32_t reg, val;
74 int ret;
75
76 if (*off != 0)
77 return 0;
78
79 if (len > sizeof(buf) - 1)
80 len = sizeof(buf) - 1;
81
82 ret = strncpy_from_user(buf, ptr, len);
83 if (ret < 0)
84 return ret;
85 buf[len] = '\0';
86
87 reg = simple_strtoul(buf, &p, 16);
88 if (!isspace(*p))
89 return -EINVAL;
90 val = simple_strtoul(p + 1, NULL, 16);
91
92 if (reg >= 0x84 && reg <= 0x1c4)
93 writel(val, dcrtc->base + reg);
94
95 return len;
96}
97
98static const struct file_operations fops_reg_w = {
99 .owner = THIS_MODULE,
100 .open = simple_open,
101 .write = armada_debugfs_write,
102 .llseek = noop_llseek,
103};
104
105static struct drm_info_list armada_debugfs_list[] = {
106 { "gem_linear", armada_debugfs_gem_linear_show, 0 },
107};
108#define ARMADA_DEBUGFS_ENTRIES ARRAY_SIZE(armada_debugfs_list)
109
110static int drm_add_fake_info_node(struct drm_minor *minor, struct dentry *ent,
111 const void *key)
112{
113 struct drm_info_node *node;
114
115 node = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL);
116 if (node == NULL) {
117 debugfs_remove(ent);
118 return -ENOMEM;
119 }
120
121 node->minor = minor;
122 node->dent = ent;
123 node->info_ent = (void *) key;
124
125 mutex_lock(&minor->debugfs_lock);
126 list_add(&node->list, &minor->debugfs_list);
127 mutex_unlock(&minor->debugfs_lock);
128
129 return 0;
130}
131
132static int armada_debugfs_create(struct dentry *root, struct drm_minor *minor,
133 const char *name, umode_t mode, const struct file_operations *fops)
134{
135 struct dentry *de;
136
137 de = debugfs_create_file(name, mode, root, minor->dev, fops);
138
139 return drm_add_fake_info_node(minor, de, fops);
140}
141
142int armada_drm_debugfs_init(struct drm_minor *minor)
143{
144 int ret;
145
146 ret = drm_debugfs_create_files(armada_debugfs_list,
147 ARMADA_DEBUGFS_ENTRIES,
148 minor->debugfs_root, minor);
149 if (ret)
150 return ret;
151
152 ret = armada_debugfs_create(minor->debugfs_root, minor,
153 "reg", S_IFREG | S_IRUSR, &fops_reg_r);
154 if (ret)
155 goto err_1;
156
157 ret = armada_debugfs_create(minor->debugfs_root, minor,
158 "reg_wr", S_IFREG | S_IWUSR, &fops_reg_w);
159 if (ret)
160 goto err_2;
161 return ret;
162
163 err_2:
164 drm_debugfs_remove_files((struct drm_info_list *)&fops_reg_r, 1, minor);
165 err_1:
166 drm_debugfs_remove_files(armada_debugfs_list, ARMADA_DEBUGFS_ENTRIES,
167 minor);
168 return ret;
169}
170
171void armada_drm_debugfs_cleanup(struct drm_minor *minor)
172{
173 drm_debugfs_remove_files((struct drm_info_list *)&fops_reg_w, 1, minor);
174 drm_debugfs_remove_files((struct drm_info_list *)&fops_reg_r, 1, minor);
175 drm_debugfs_remove_files(armada_debugfs_list, ARMADA_DEBUGFS_ENTRIES,
176 minor);
177}
diff --git a/drivers/gpu/drm/armada/armada_drm.h b/drivers/gpu/drm/armada/armada_drm.h
new file mode 100644
index 000000000000..eef09ec9a5ff
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_drm.h
@@ -0,0 +1,113 @@
1/*
2 * Copyright (C) 2012 Russell King
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8#ifndef ARMADA_DRM_H
9#define ARMADA_DRM_H
10
11#include <linux/kfifo.h>
12#include <linux/io.h>
13#include <linux/workqueue.h>
14#include <drm/drmP.h>
15
16struct armada_crtc;
17struct armada_gem_object;
18struct clk;
19struct drm_fb_helper;
20
21static inline void
22armada_updatel(uint32_t val, uint32_t mask, void __iomem *ptr)
23{
24 uint32_t ov, v;
25
26 ov = v = readl_relaxed(ptr);
27 v = (v & ~mask) | val;
28 if (ov != v)
29 writel_relaxed(v, ptr);
30}
31
32static inline uint32_t armada_pitch(uint32_t width, uint32_t bpp)
33{
34 uint32_t pitch = bpp != 4 ? width * ((bpp + 7) / 8) : width / 2;
35
36 /* 88AP510 spec recommends pitch be a multiple of 128 */
37 return ALIGN(pitch, 128);
38}
39
40struct armada_vbl_event {
41 struct list_head node;
42 void *data;
43 void (*fn)(struct armada_crtc *, void *);
44};
45void armada_drm_vbl_event_add(struct armada_crtc *,
46 struct armada_vbl_event *);
47void armada_drm_vbl_event_remove(struct armada_crtc *,
48 struct armada_vbl_event *);
49void armada_drm_vbl_event_remove_unlocked(struct armada_crtc *,
50 struct armada_vbl_event *);
51#define armada_drm_vbl_event_init(_e, _f, _d) do { \
52 struct armada_vbl_event *__e = _e; \
53 INIT_LIST_HEAD(&__e->node); \
54 __e->data = _d; \
55 __e->fn = _f; \
56} while (0)
57
58
59struct armada_private;
60
61struct armada_variant {
62 bool has_spu_adv_reg;
63 uint32_t spu_adv_reg;
64 int (*init)(struct armada_private *, struct device *);
65 int (*crtc_init)(struct armada_crtc *);
66 int (*crtc_compute_clock)(struct armada_crtc *,
67 const struct drm_display_mode *,
68 uint32_t *);
69};
70
71/* Variant ops */
72extern const struct armada_variant armada510_ops;
73
74struct armada_private {
75 const struct armada_variant *variant;
76 struct work_struct fb_unref_work;
77 DECLARE_KFIFO(fb_unref, struct drm_framebuffer *, 8);
78 struct drm_fb_helper *fbdev;
79 struct armada_crtc *dcrtc[2];
80 struct drm_mm linear;
81 struct clk *extclk[2];
82 struct drm_property *csc_yuv_prop;
83 struct drm_property *csc_rgb_prop;
84 struct drm_property *colorkey_prop;
85 struct drm_property *colorkey_min_prop;
86 struct drm_property *colorkey_max_prop;
87 struct drm_property *colorkey_val_prop;
88 struct drm_property *colorkey_alpha_prop;
89 struct drm_property *colorkey_mode_prop;
90 struct drm_property *brightness_prop;
91 struct drm_property *contrast_prop;
92 struct drm_property *saturation_prop;
93#ifdef CONFIG_DEBUG_FS
94 struct dentry *de;
95#endif
96};
97
98void __armada_drm_queue_unref_work(struct drm_device *,
99 struct drm_framebuffer *);
100void armada_drm_queue_unref_work(struct drm_device *,
101 struct drm_framebuffer *);
102
103extern const struct drm_mode_config_funcs armada_drm_mode_config_funcs;
104
105int armada_fbdev_init(struct drm_device *);
106void armada_fbdev_fini(struct drm_device *);
107
108int armada_overlay_plane_create(struct drm_device *, unsigned long);
109
110int armada_drm_debugfs_init(struct drm_minor *);
111void armada_drm_debugfs_cleanup(struct drm_minor *);
112
113#endif
diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c
new file mode 100644
index 000000000000..4f2b28354915
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_drv.c
@@ -0,0 +1,421 @@
1/*
2 * Copyright (C) 2012 Russell King
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8#include <linux/clk.h>
9#include <linux/module.h>
10#include <drm/drmP.h>
11#include <drm/drm_crtc_helper.h>
12#include "armada_crtc.h"
13#include "armada_drm.h"
14#include "armada_gem.h"
15#include "armada_hw.h"
16#include <drm/armada_drm.h>
17#include "armada_ioctlP.h"
18
19#ifdef CONFIG_DRM_ARMADA_TDA1998X
20#include <drm/i2c/tda998x.h>
21#include "armada_slave.h"
22
23static struct tda998x_encoder_params params = {
24 /* With 0x24, there is no translation between vp_out and int_vp
25 FB LCD out Pins VIP Int Vp
26 R:23:16 R:7:0 VPC7:0 7:0 7:0[R]
27 G:15:8 G:15:8 VPB7:0 23:16 23:16[G]
28 B:7:0 B:23:16 VPA7:0 15:8 15:8[B]
29 */
30 .swap_a = 2,
31 .swap_b = 3,
32 .swap_c = 4,
33 .swap_d = 5,
34 .swap_e = 0,
35 .swap_f = 1,
36 .audio_cfg = BIT(2),
37 .audio_frame[1] = 1,
38 .audio_format = AFMT_SPDIF,
39 .audio_sample_rate = 44100,
40};
41
42static const struct armada_drm_slave_config tda19988_config = {
43 .i2c_adapter_id = 0,
44 .crtcs = 1 << 0, /* Only LCD0 at the moment */
45 .polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT,
46 .interlace_allowed = true,
47 .info = {
48 .type = "tda998x",
49 .addr = 0x70,
50 .platform_data = &params,
51 },
52};
53#endif
54
55static void armada_drm_unref_work(struct work_struct *work)
56{
57 struct armada_private *priv =
58 container_of(work, struct armada_private, fb_unref_work);
59 struct drm_framebuffer *fb;
60
61 while (kfifo_get(&priv->fb_unref, &fb))
62 drm_framebuffer_unreference(fb);
63}
64
65/* Must be called with dev->event_lock held */
66void __armada_drm_queue_unref_work(struct drm_device *dev,
67 struct drm_framebuffer *fb)
68{
69 struct armada_private *priv = dev->dev_private;
70
71 /*
72 * Yes, we really must jump through these hoops just to store a
73 * _pointer_ to something into the kfifo. This is utterly insane
74 * and idiotic, because it kfifo requires the _data_ pointed to by
75 * the pointer const, not the pointer itself. Not only that, but
76 * you have to pass a pointer _to_ the pointer you want stored.
77 */
78 const struct drm_framebuffer *silly_api_alert = fb;
79 WARN_ON(!kfifo_put(&priv->fb_unref, &silly_api_alert));
80 schedule_work(&priv->fb_unref_work);
81}
82
83void armada_drm_queue_unref_work(struct drm_device *dev,
84 struct drm_framebuffer *fb)
85{
86 unsigned long flags;
87
88 spin_lock_irqsave(&dev->event_lock, flags);
89 __armada_drm_queue_unref_work(dev, fb);
90 spin_unlock_irqrestore(&dev->event_lock, flags);
91}
92
93static int armada_drm_load(struct drm_device *dev, unsigned long flags)
94{
95 const struct platform_device_id *id;
96 struct armada_private *priv;
97 struct resource *res[ARRAY_SIZE(priv->dcrtc)];
98 struct resource *mem = NULL;
99 int ret, n, i;
100
101 memset(res, 0, sizeof(res));
102
103 for (n = i = 0; ; n++) {
104 struct resource *r = platform_get_resource(dev->platformdev,
105 IORESOURCE_MEM, n);
106 if (!r)
107 break;
108
109 /* Resources above 64K are graphics memory */
110 if (resource_size(r) > SZ_64K)
111 mem = r;
112 else if (i < ARRAY_SIZE(priv->dcrtc))
113 res[i++] = r;
114 else
115 return -EINVAL;
116 }
117
118 if (!res[0] || !mem)
119 return -ENXIO;
120
121 if (!devm_request_mem_region(dev->dev, mem->start,
122 resource_size(mem), "armada-drm"))
123 return -EBUSY;
124
125 priv = devm_kzalloc(dev->dev, sizeof(*priv), GFP_KERNEL);
126 if (!priv) {
127 DRM_ERROR("failed to allocate private\n");
128 return -ENOMEM;
129 }
130
131 dev->dev_private = priv;
132
133 /* Get the implementation specific driver data. */
134 id = platform_get_device_id(dev->platformdev);
135 if (!id)
136 return -ENXIO;
137
138 priv->variant = (struct armada_variant *)id->driver_data;
139
140 ret = priv->variant->init(priv, dev->dev);
141 if (ret)
142 return ret;
143
144 INIT_WORK(&priv->fb_unref_work, armada_drm_unref_work);
145 INIT_KFIFO(priv->fb_unref);
146
147 /* Mode setting support */
148 drm_mode_config_init(dev);
149 dev->mode_config.min_width = 320;
150 dev->mode_config.min_height = 200;
151
152 /*
153 * With vscale enabled, the maximum width is 1920 due to the
154 * 1920 by 3 lines RAM
155 */
156 dev->mode_config.max_width = 1920;
157 dev->mode_config.max_height = 2048;
158
159 dev->mode_config.preferred_depth = 24;
160 dev->mode_config.funcs = &armada_drm_mode_config_funcs;
161 drm_mm_init(&priv->linear, mem->start, resource_size(mem));
162
163 /* Create all LCD controllers */
164 for (n = 0; n < ARRAY_SIZE(priv->dcrtc); n++) {
165 if (!res[n])
166 break;
167
168 ret = armada_drm_crtc_create(dev, n, res[n]);
169 if (ret)
170 goto err_kms;
171 }
172
173#ifdef CONFIG_DRM_ARMADA_TDA1998X
174 ret = armada_drm_connector_slave_create(dev, &tda19988_config);
175 if (ret)
176 goto err_kms;
177#endif
178
179 ret = drm_vblank_init(dev, n);
180 if (ret)
181 goto err_kms;
182
183 ret = drm_irq_install(dev);
184 if (ret)
185 goto err_kms;
186
187 dev->vblank_disable_allowed = 1;
188
189 ret = armada_fbdev_init(dev);
190 if (ret)
191 goto err_irq;
192
193 drm_kms_helper_poll_init(dev);
194
195 return 0;
196
197 err_irq:
198 drm_irq_uninstall(dev);
199 err_kms:
200 drm_mode_config_cleanup(dev);
201 drm_mm_takedown(&priv->linear);
202 flush_work(&priv->fb_unref_work);
203
204 return ret;
205}
206
207static int armada_drm_unload(struct drm_device *dev)
208{
209 struct armada_private *priv = dev->dev_private;
210
211 drm_kms_helper_poll_fini(dev);
212 armada_fbdev_fini(dev);
213 drm_irq_uninstall(dev);
214 drm_mode_config_cleanup(dev);
215 drm_mm_takedown(&priv->linear);
216 flush_work(&priv->fb_unref_work);
217 dev->dev_private = NULL;
218
219 return 0;
220}
221
222void armada_drm_vbl_event_add(struct armada_crtc *dcrtc,
223 struct armada_vbl_event *evt)
224{
225 unsigned long flags;
226
227 spin_lock_irqsave(&dcrtc->irq_lock, flags);
228 if (list_empty(&evt->node)) {
229 list_add_tail(&evt->node, &dcrtc->vbl_list);
230
231 drm_vblank_get(dcrtc->crtc.dev, dcrtc->num);
232 }
233 spin_unlock_irqrestore(&dcrtc->irq_lock, flags);
234}
235
236void armada_drm_vbl_event_remove(struct armada_crtc *dcrtc,
237 struct armada_vbl_event *evt)
238{
239 if (!list_empty(&evt->node)) {
240 list_del_init(&evt->node);
241 drm_vblank_put(dcrtc->crtc.dev, dcrtc->num);
242 }
243}
244
245void armada_drm_vbl_event_remove_unlocked(struct armada_crtc *dcrtc,
246 struct armada_vbl_event *evt)
247{
248 unsigned long flags;
249
250 spin_lock_irqsave(&dcrtc->irq_lock, flags);
251 armada_drm_vbl_event_remove(dcrtc, evt);
252 spin_unlock_irqrestore(&dcrtc->irq_lock, flags);
253}
254
255/* These are called under the vbl_lock. */
256static int armada_drm_enable_vblank(struct drm_device *dev, int crtc)
257{
258 struct armada_private *priv = dev->dev_private;
259 armada_drm_crtc_enable_irq(priv->dcrtc[crtc], VSYNC_IRQ_ENA);
260 return 0;
261}
262
263static void armada_drm_disable_vblank(struct drm_device *dev, int crtc)
264{
265 struct armada_private *priv = dev->dev_private;
266 armada_drm_crtc_disable_irq(priv->dcrtc[crtc], VSYNC_IRQ_ENA);
267}
268
269static irqreturn_t armada_drm_irq_handler(int irq, void *arg)
270{
271 struct drm_device *dev = arg;
272 struct armada_private *priv = dev->dev_private;
273 struct armada_crtc *dcrtc = priv->dcrtc[0];
274 uint32_t v, stat = readl_relaxed(dcrtc->base + LCD_SPU_IRQ_ISR);
275 irqreturn_t handled = IRQ_NONE;
276
277 /*
278 * This is rediculous - rather than writing bits to clear, we
279 * have to set the actual status register value. This is racy.
280 */
281 writel_relaxed(0, dcrtc->base + LCD_SPU_IRQ_ISR);
282
283 /* Mask out those interrupts we haven't enabled */
284 v = stat & dcrtc->irq_ena;
285
286 if (v & (VSYNC_IRQ|GRA_FRAME_IRQ|DUMB_FRAMEDONE)) {
287 armada_drm_crtc_irq(dcrtc, stat);
288 handled = IRQ_HANDLED;
289 }
290
291 return handled;
292}
293
294static int armada_drm_irq_postinstall(struct drm_device *dev)
295{
296 struct armada_private *priv = dev->dev_private;
297 struct armada_crtc *dcrtc = priv->dcrtc[0];
298
299 spin_lock_irq(&dev->vbl_lock);
300 writel_relaxed(dcrtc->irq_ena, dcrtc->base + LCD_SPU_IRQ_ENA);
301 writel(0, dcrtc->base + LCD_SPU_IRQ_ISR);
302 spin_unlock_irq(&dev->vbl_lock);
303
304 return 0;
305}
306
307static void armada_drm_irq_uninstall(struct drm_device *dev)
308{
309 struct armada_private *priv = dev->dev_private;
310 struct armada_crtc *dcrtc = priv->dcrtc[0];
311
312 writel(0, dcrtc->base + LCD_SPU_IRQ_ENA);
313}
314
315static struct drm_ioctl_desc armada_ioctls[] = {
316 DRM_IOCTL_DEF_DRV(ARMADA_GEM_CREATE, armada_gem_create_ioctl,
317 DRM_UNLOCKED),
318 DRM_IOCTL_DEF_DRV(ARMADA_GEM_MMAP, armada_gem_mmap_ioctl,
319 DRM_UNLOCKED),
320 DRM_IOCTL_DEF_DRV(ARMADA_GEM_PWRITE, armada_gem_pwrite_ioctl,
321 DRM_UNLOCKED),
322};
323
324static const struct file_operations armada_drm_fops = {
325 .owner = THIS_MODULE,
326 .llseek = no_llseek,
327 .read = drm_read,
328 .poll = drm_poll,
329 .unlocked_ioctl = drm_ioctl,
330 .mmap = drm_gem_mmap,
331 .open = drm_open,
332 .release = drm_release,
333};
334
335static struct drm_driver armada_drm_driver = {
336 .load = armada_drm_load,
337 .open = NULL,
338 .preclose = NULL,
339 .postclose = NULL,
340 .lastclose = NULL,
341 .unload = armada_drm_unload,
342 .get_vblank_counter = drm_vblank_count,
343 .enable_vblank = armada_drm_enable_vblank,
344 .disable_vblank = armada_drm_disable_vblank,
345 .irq_handler = armada_drm_irq_handler,
346 .irq_postinstall = armada_drm_irq_postinstall,
347 .irq_uninstall = armada_drm_irq_uninstall,
348#ifdef CONFIG_DEBUG_FS
349 .debugfs_init = armada_drm_debugfs_init,
350 .debugfs_cleanup = armada_drm_debugfs_cleanup,
351#endif
352 .gem_free_object = armada_gem_free_object,
353 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
354 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
355 .gem_prime_export = armada_gem_prime_export,
356 .gem_prime_import = armada_gem_prime_import,
357 .dumb_create = armada_gem_dumb_create,
358 .dumb_map_offset = armada_gem_dumb_map_offset,
359 .dumb_destroy = armada_gem_dumb_destroy,
360 .gem_vm_ops = &armada_gem_vm_ops,
361 .major = 1,
362 .minor = 0,
363 .name = "armada-drm",
364 .desc = "Armada SoC DRM",
365 .date = "20120730",
366 .driver_features = DRIVER_GEM | DRIVER_MODESET |
367 DRIVER_HAVE_IRQ | DRIVER_PRIME,
368 .ioctls = armada_ioctls,
369 .fops = &armada_drm_fops,
370};
371
372static int armada_drm_probe(struct platform_device *pdev)
373{
374 return drm_platform_init(&armada_drm_driver, pdev);
375}
376
377static int armada_drm_remove(struct platform_device *pdev)
378{
379 drm_platform_exit(&armada_drm_driver, pdev);
380 return 0;
381}
382
383static const struct platform_device_id armada_drm_platform_ids[] = {
384 {
385 .name = "armada-drm",
386 .driver_data = (unsigned long)&armada510_ops,
387 }, {
388 .name = "armada-510-drm",
389 .driver_data = (unsigned long)&armada510_ops,
390 },
391 { },
392};
393MODULE_DEVICE_TABLE(platform, armada_drm_platform_ids);
394
395static struct platform_driver armada_drm_platform_driver = {
396 .probe = armada_drm_probe,
397 .remove = armada_drm_remove,
398 .driver = {
399 .name = "armada-drm",
400 .owner = THIS_MODULE,
401 },
402 .id_table = armada_drm_platform_ids,
403};
404
405static int __init armada_drm_init(void)
406{
407 armada_drm_driver.num_ioctls = DRM_ARRAY_SIZE(armada_ioctls);
408 return platform_driver_register(&armada_drm_platform_driver);
409}
410module_init(armada_drm_init);
411
412static void __exit armada_drm_exit(void)
413{
414 platform_driver_unregister(&armada_drm_platform_driver);
415}
416module_exit(armada_drm_exit);
417
418MODULE_AUTHOR("Russell King <rmk+kernel@arm.linux.org.uk>");
419MODULE_DESCRIPTION("Armada DRM Driver");
420MODULE_LICENSE("GPL");
421MODULE_ALIAS("platform:armada-drm");
diff --git a/drivers/gpu/drm/armada/armada_fb.c b/drivers/gpu/drm/armada/armada_fb.c
new file mode 100644
index 000000000000..1c90969def3e
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_fb.c
@@ -0,0 +1,170 @@
1/*
2 * Copyright (C) 2012 Russell King
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8#include <drm/drmP.h>
9#include <drm/drm_crtc_helper.h>
10#include <drm/drm_fb_helper.h>
11#include "armada_drm.h"
12#include "armada_fb.h"
13#include "armada_gem.h"
14#include "armada_hw.h"
15
16static void armada_fb_destroy(struct drm_framebuffer *fb)
17{
18 struct armada_framebuffer *dfb = drm_fb_to_armada_fb(fb);
19
20 drm_framebuffer_cleanup(&dfb->fb);
21 drm_gem_object_unreference_unlocked(&dfb->obj->obj);
22 kfree(dfb);
23}
24
25static int armada_fb_create_handle(struct drm_framebuffer *fb,
26 struct drm_file *dfile, unsigned int *handle)
27{
28 struct armada_framebuffer *dfb = drm_fb_to_armada_fb(fb);
29 return drm_gem_handle_create(dfile, &dfb->obj->obj, handle);
30}
31
32static const struct drm_framebuffer_funcs armada_fb_funcs = {
33 .destroy = armada_fb_destroy,
34 .create_handle = armada_fb_create_handle,
35};
36
37struct armada_framebuffer *armada_framebuffer_create(struct drm_device *dev,
38 struct drm_mode_fb_cmd2 *mode, struct armada_gem_object *obj)
39{
40 struct armada_framebuffer *dfb;
41 uint8_t format, config;
42 int ret;
43
44 switch (mode->pixel_format) {
45#define FMT(drm, fmt, mod) \
46 case DRM_FORMAT_##drm: \
47 format = CFG_##fmt; \
48 config = mod; \
49 break
50 FMT(RGB565, 565, CFG_SWAPRB);
51 FMT(BGR565, 565, 0);
52 FMT(ARGB1555, 1555, CFG_SWAPRB);
53 FMT(ABGR1555, 1555, 0);
54 FMT(RGB888, 888PACK, CFG_SWAPRB);
55 FMT(BGR888, 888PACK, 0);
56 FMT(XRGB8888, X888, CFG_SWAPRB);
57 FMT(XBGR8888, X888, 0);
58 FMT(ARGB8888, 8888, CFG_SWAPRB);
59 FMT(ABGR8888, 8888, 0);
60 FMT(YUYV, 422PACK, CFG_YUV2RGB | CFG_SWAPYU | CFG_SWAPUV);
61 FMT(UYVY, 422PACK, CFG_YUV2RGB);
62 FMT(VYUY, 422PACK, CFG_YUV2RGB | CFG_SWAPUV);
63 FMT(YVYU, 422PACK, CFG_YUV2RGB | CFG_SWAPYU);
64 FMT(YUV422, 422, CFG_YUV2RGB);
65 FMT(YVU422, 422, CFG_YUV2RGB | CFG_SWAPUV);
66 FMT(YUV420, 420, CFG_YUV2RGB);
67 FMT(YVU420, 420, CFG_YUV2RGB | CFG_SWAPUV);
68 FMT(C8, PSEUDO8, 0);
69#undef FMT
70 default:
71 return ERR_PTR(-EINVAL);
72 }
73
74 dfb = kzalloc(sizeof(*dfb), GFP_KERNEL);
75 if (!dfb) {
76 DRM_ERROR("failed to allocate Armada fb object\n");
77 return ERR_PTR(-ENOMEM);
78 }
79
80 dfb->fmt = format;
81 dfb->mod = config;
82 dfb->obj = obj;
83
84 drm_helper_mode_fill_fb_struct(&dfb->fb, mode);
85
86 ret = drm_framebuffer_init(dev, &dfb->fb, &armada_fb_funcs);
87 if (ret) {
88 kfree(dfb);
89 return ERR_PTR(ret);
90 }
91
92 /*
93 * Take a reference on our object as we're successful - the
94 * caller already holds a reference, which keeps us safe for
95 * the above call, but the caller will drop their reference
96 * to it. Hence we need to take our own reference.
97 */
98 drm_gem_object_reference(&obj->obj);
99
100 return dfb;
101}
102
103static struct drm_framebuffer *armada_fb_create(struct drm_device *dev,
104 struct drm_file *dfile, struct drm_mode_fb_cmd2 *mode)
105{
106 struct armada_gem_object *obj;
107 struct armada_framebuffer *dfb;
108 int ret;
109
110 DRM_DEBUG_DRIVER("w%u h%u pf%08x f%u p%u,%u,%u\n",
111 mode->width, mode->height, mode->pixel_format,
112 mode->flags, mode->pitches[0], mode->pitches[1],
113 mode->pitches[2]);
114
115 /* We can only handle a single plane at the moment */
116 if (drm_format_num_planes(mode->pixel_format) > 1 &&
117 (mode->handles[0] != mode->handles[1] ||
118 mode->handles[0] != mode->handles[2])) {
119 ret = -EINVAL;
120 goto err;
121 }
122
123 obj = armada_gem_object_lookup(dev, dfile, mode->handles[0]);
124 if (!obj) {
125 ret = -ENOENT;
126 goto err;
127 }
128
129 if (obj->obj.import_attach && !obj->sgt) {
130 ret = armada_gem_map_import(obj);
131 if (ret)
132 goto err_unref;
133 }
134
135 /* Framebuffer objects must have a valid device address for scanout */
136 if (obj->dev_addr == DMA_ERROR_CODE) {
137 ret = -EINVAL;
138 goto err_unref;
139 }
140
141 dfb = armada_framebuffer_create(dev, mode, obj);
142 if (IS_ERR(dfb)) {
143 ret = PTR_ERR(dfb);
144 goto err;
145 }
146
147 drm_gem_object_unreference_unlocked(&obj->obj);
148
149 return &dfb->fb;
150
151 err_unref:
152 drm_gem_object_unreference_unlocked(&obj->obj);
153 err:
154 DRM_ERROR("failed to initialize framebuffer: %d\n", ret);
155 return ERR_PTR(ret);
156}
157
158static void armada_output_poll_changed(struct drm_device *dev)
159{
160 struct armada_private *priv = dev->dev_private;
161 struct drm_fb_helper *fbh = priv->fbdev;
162
163 if (fbh)
164 drm_fb_helper_hotplug_event(fbh);
165}
166
167const struct drm_mode_config_funcs armada_drm_mode_config_funcs = {
168 .fb_create = armada_fb_create,
169 .output_poll_changed = armada_output_poll_changed,
170};
diff --git a/drivers/gpu/drm/armada/armada_fb.h b/drivers/gpu/drm/armada/armada_fb.h
new file mode 100644
index 000000000000..ce3f12ebfc53
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_fb.h
@@ -0,0 +1,24 @@
1/*
2 * Copyright (C) 2012 Russell King
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8#ifndef ARMADA_FB_H
9#define ARMADA_FB_H
10
11struct armada_framebuffer {
12 struct drm_framebuffer fb;
13 struct armada_gem_object *obj;
14 uint8_t fmt;
15 uint8_t mod;
16};
17#define drm_fb_to_armada_fb(dfb) \
18 container_of(dfb, struct armada_framebuffer, fb)
19#define drm_fb_obj(fb) drm_fb_to_armada_fb(fb)->obj
20
21struct armada_framebuffer *armada_framebuffer_create(struct drm_device *,
22 struct drm_mode_fb_cmd2 *, struct armada_gem_object *);
23
24#endif
diff --git a/drivers/gpu/drm/armada/armada_fbdev.c b/drivers/gpu/drm/armada/armada_fbdev.c
new file mode 100644
index 000000000000..dd5ea77dac96
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_fbdev.c
@@ -0,0 +1,202 @@
1/*
2 * Copyright (C) 2012 Russell King
3 * Written from the i915 driver.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9#include <linux/errno.h>
10#include <linux/fb.h>
11#include <linux/kernel.h>
12#include <linux/module.h>
13
14#include <drm/drmP.h>
15#include <drm/drm_fb_helper.h>
16#include "armada_crtc.h"
17#include "armada_drm.h"
18#include "armada_fb.h"
19#include "armada_gem.h"
20
21static /*const*/ struct fb_ops armada_fb_ops = {
22 .owner = THIS_MODULE,
23 .fb_check_var = drm_fb_helper_check_var,
24 .fb_set_par = drm_fb_helper_set_par,
25 .fb_fillrect = cfb_fillrect,
26 .fb_copyarea = cfb_copyarea,
27 .fb_imageblit = cfb_imageblit,
28 .fb_pan_display = drm_fb_helper_pan_display,
29 .fb_blank = drm_fb_helper_blank,
30 .fb_setcmap = drm_fb_helper_setcmap,
31 .fb_debug_enter = drm_fb_helper_debug_enter,
32 .fb_debug_leave = drm_fb_helper_debug_leave,
33};
34
35static int armada_fb_create(struct drm_fb_helper *fbh,
36 struct drm_fb_helper_surface_size *sizes)
37{
38 struct drm_device *dev = fbh->dev;
39 struct drm_mode_fb_cmd2 mode;
40 struct armada_framebuffer *dfb;
41 struct armada_gem_object *obj;
42 struct fb_info *info;
43 int size, ret;
44 void *ptr;
45
46 memset(&mode, 0, sizeof(mode));
47 mode.width = sizes->surface_width;
48 mode.height = sizes->surface_height;
49 mode.pitches[0] = armada_pitch(mode.width, sizes->surface_bpp);
50 mode.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
51 sizes->surface_depth);
52
53 size = mode.pitches[0] * mode.height;
54 obj = armada_gem_alloc_private_object(dev, size);
55 if (!obj) {
56 DRM_ERROR("failed to allocate fb memory\n");
57 return -ENOMEM;
58 }
59
60 ret = armada_gem_linear_back(dev, obj);
61 if (ret) {
62 drm_gem_object_unreference_unlocked(&obj->obj);
63 return ret;
64 }
65
66 ptr = armada_gem_map_object(dev, obj);
67 if (!ptr) {
68 drm_gem_object_unreference_unlocked(&obj->obj);
69 return -ENOMEM;
70 }
71
72 dfb = armada_framebuffer_create(dev, &mode, obj);
73
74 /*
75 * A reference is now held by the framebuffer object if
76 * successful, otherwise this drops the ref for the error path.
77 */
78 drm_gem_object_unreference_unlocked(&obj->obj);
79
80 if (IS_ERR(dfb))
81 return PTR_ERR(dfb);
82
83 info = framebuffer_alloc(0, dev->dev);
84 if (!info) {
85 ret = -ENOMEM;
86 goto err_fballoc;
87 }
88
89 ret = fb_alloc_cmap(&info->cmap, 256, 0);
90 if (ret) {
91 ret = -ENOMEM;
92 goto err_fbcmap;
93 }
94
95 strlcpy(info->fix.id, "armada-drmfb", sizeof(info->fix.id));
96 info->par = fbh;
97 info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
98 info->fbops = &armada_fb_ops;
99 info->fix.smem_start = obj->phys_addr;
100 info->fix.smem_len = obj->obj.size;
101 info->screen_size = obj->obj.size;
102 info->screen_base = ptr;
103 fbh->fb = &dfb->fb;
104 fbh->fbdev = info;
105 drm_fb_helper_fill_fix(info, dfb->fb.pitches[0], dfb->fb.depth);
106 drm_fb_helper_fill_var(info, fbh, sizes->fb_width, sizes->fb_height);
107
108 DRM_DEBUG_KMS("allocated %dx%d %dbpp fb: 0x%08x\n",
109 dfb->fb.width, dfb->fb.height,
110 dfb->fb.bits_per_pixel, obj->phys_addr);
111
112 return 0;
113
114 err_fbcmap:
115 framebuffer_release(info);
116 err_fballoc:
117 dfb->fb.funcs->destroy(&dfb->fb);
118 return ret;
119}
120
121static int armada_fb_probe(struct drm_fb_helper *fbh,
122 struct drm_fb_helper_surface_size *sizes)
123{
124 int ret = 0;
125
126 if (!fbh->fb) {
127 ret = armada_fb_create(fbh, sizes);
128 if (ret == 0)
129 ret = 1;
130 }
131 return ret;
132}
133
134static struct drm_fb_helper_funcs armada_fb_helper_funcs = {
135 .gamma_set = armada_drm_crtc_gamma_set,
136 .gamma_get = armada_drm_crtc_gamma_get,
137 .fb_probe = armada_fb_probe,
138};
139
140int armada_fbdev_init(struct drm_device *dev)
141{
142 struct armada_private *priv = dev->dev_private;
143 struct drm_fb_helper *fbh;
144 int ret;
145
146 fbh = devm_kzalloc(dev->dev, sizeof(*fbh), GFP_KERNEL);
147 if (!fbh)
148 return -ENOMEM;
149
150 priv->fbdev = fbh;
151
152 fbh->funcs = &armada_fb_helper_funcs;
153
154 ret = drm_fb_helper_init(dev, fbh, 1, 1);
155 if (ret) {
156 DRM_ERROR("failed to initialize drm fb helper\n");
157 goto err_fb_helper;
158 }
159
160 ret = drm_fb_helper_single_add_all_connectors(fbh);
161 if (ret) {
162 DRM_ERROR("failed to add fb connectors\n");
163 goto err_fb_setup;
164 }
165
166 ret = drm_fb_helper_initial_config(fbh, 32);
167 if (ret) {
168 DRM_ERROR("failed to set initial config\n");
169 goto err_fb_setup;
170 }
171
172 return 0;
173 err_fb_setup:
174 drm_fb_helper_fini(fbh);
175 err_fb_helper:
176 priv->fbdev = NULL;
177 return ret;
178}
179
180void armada_fbdev_fini(struct drm_device *dev)
181{
182 struct armada_private *priv = dev->dev_private;
183 struct drm_fb_helper *fbh = priv->fbdev;
184
185 if (fbh) {
186 struct fb_info *info = fbh->fbdev;
187
188 if (info) {
189 unregister_framebuffer(info);
190 if (info->cmap.len)
191 fb_dealloc_cmap(&info->cmap);
192 framebuffer_release(info);
193 }
194
195 if (fbh->fb)
196 fbh->fb->funcs->destroy(fbh->fb);
197
198 drm_fb_helper_fini(fbh);
199
200 priv->fbdev = NULL;
201 }
202}
diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c
new file mode 100644
index 000000000000..9f2356bae7fd
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_gem.c
@@ -0,0 +1,611 @@
1/*
2 * Copyright (C) 2012 Russell King
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8#include <linux/dma-buf.h>
9#include <linux/dma-mapping.h>
10#include <linux/shmem_fs.h>
11#include <drm/drmP.h>
12#include "armada_drm.h"
13#include "armada_gem.h"
14#include <drm/armada_drm.h>
15#include "armada_ioctlP.h"
16
17static int armada_gem_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
18{
19 struct armada_gem_object *obj = drm_to_armada_gem(vma->vm_private_data);
20 unsigned long addr = (unsigned long)vmf->virtual_address;
21 unsigned long pfn = obj->phys_addr >> PAGE_SHIFT;
22 int ret;
23
24 pfn += (addr - vma->vm_start) >> PAGE_SHIFT;
25 ret = vm_insert_pfn(vma, addr, pfn);
26
27 switch (ret) {
28 case 0:
29 case -EBUSY:
30 return VM_FAULT_NOPAGE;
31 case -ENOMEM:
32 return VM_FAULT_OOM;
33 default:
34 return VM_FAULT_SIGBUS;
35 }
36}
37
38const struct vm_operations_struct armada_gem_vm_ops = {
39 .fault = armada_gem_vm_fault,
40 .open = drm_gem_vm_open,
41 .close = drm_gem_vm_close,
42};
43
44static size_t roundup_gem_size(size_t size)
45{
46 return roundup(size, PAGE_SIZE);
47}
48
49/* dev->struct_mutex is held here */
50void armada_gem_free_object(struct drm_gem_object *obj)
51{
52 struct armada_gem_object *dobj = drm_to_armada_gem(obj);
53
54 DRM_DEBUG_DRIVER("release obj %p\n", dobj);
55
56 drm_gem_free_mmap_offset(&dobj->obj);
57
58 if (dobj->page) {
59 /* page backed memory */
60 unsigned int order = get_order(dobj->obj.size);
61 __free_pages(dobj->page, order);
62 } else if (dobj->linear) {
63 /* linear backed memory */
64 drm_mm_remove_node(dobj->linear);
65 kfree(dobj->linear);
66 if (dobj->addr)
67 iounmap(dobj->addr);
68 }
69
70 if (dobj->obj.import_attach) {
71 /* We only ever display imported data */
72 dma_buf_unmap_attachment(dobj->obj.import_attach, dobj->sgt,
73 DMA_TO_DEVICE);
74 drm_prime_gem_destroy(&dobj->obj, NULL);
75 }
76
77 drm_gem_object_release(&dobj->obj);
78
79 kfree(dobj);
80}
81
82int
83armada_gem_linear_back(struct drm_device *dev, struct armada_gem_object *obj)
84{
85 struct armada_private *priv = dev->dev_private;
86 size_t size = obj->obj.size;
87
88 if (obj->page || obj->linear)
89 return 0;
90
91 /*
92 * If it is a small allocation (typically cursor, which will
93 * be 32x64 or 64x32 ARGB pixels) try to get it from the system.
94 * Framebuffers will never be this small (our minimum size for
95 * framebuffers is larger than this anyway.) Such objects are
96 * only accessed by the CPU so we don't need any special handing
97 * here.
98 */
99 if (size <= 8192) {
100 unsigned int order = get_order(size);
101 struct page *p = alloc_pages(GFP_KERNEL, order);
102
103 if (p) {
104 obj->addr = page_address(p);
105 obj->phys_addr = page_to_phys(p);
106 obj->page = p;
107
108 memset(obj->addr, 0, PAGE_ALIGN(size));
109 }
110 }
111
112 /*
113 * We could grab something from CMA if it's enabled, but that
114 * involves building in a problem:
115 *
116 * CMA's interface uses dma_alloc_coherent(), which provides us
117 * with an CPU virtual address and a device address.
118 *
119 * The CPU virtual address may be either an address in the kernel
120 * direct mapped region (for example, as it would be on x86) or
121 * it may be remapped into another part of kernel memory space
122 * (eg, as it would be on ARM.) This means virt_to_phys() on the
123 * returned virtual address is invalid depending on the architecture
124 * implementation.
125 *
126 * The device address may also not be a physical address; it may
127 * be that there is some kind of remapping between the device and
128 * system RAM, which makes the use of the device address also
129 * unsafe to re-use as a physical address.
130 *
131 * This makes DRM usage of dma_alloc_coherent() in a generic way
132 * at best very questionable and unsafe.
133 */
134
135 /* Otherwise, grab it from our linear allocation */
136 if (!obj->page) {
137 struct drm_mm_node *node;
138 unsigned align = min_t(unsigned, size, SZ_2M);
139 void __iomem *ptr;
140 int ret;
141
142 node = kzalloc(sizeof(*node), GFP_KERNEL);
143 if (!node)
144 return -ENOSPC;
145
146 mutex_lock(&dev->struct_mutex);
147 ret = drm_mm_insert_node(&priv->linear, node, size, align,
148 DRM_MM_SEARCH_DEFAULT);
149 mutex_unlock(&dev->struct_mutex);
150 if (ret) {
151 kfree(node);
152 return ret;
153 }
154
155 obj->linear = node;
156
157 /* Ensure that the memory we're returning is cleared. */
158 ptr = ioremap_wc(obj->linear->start, size);
159 if (!ptr) {
160 mutex_lock(&dev->struct_mutex);
161 drm_mm_remove_node(obj->linear);
162 mutex_unlock(&dev->struct_mutex);
163 kfree(obj->linear);
164 obj->linear = NULL;
165 return -ENOMEM;
166 }
167
168 memset_io(ptr, 0, size);
169 iounmap(ptr);
170
171 obj->phys_addr = obj->linear->start;
172 obj->dev_addr = obj->linear->start;
173 }
174
175 DRM_DEBUG_DRIVER("obj %p phys %#x dev %#x\n",
176 obj, obj->phys_addr, obj->dev_addr);
177
178 return 0;
179}
180
181void *
182armada_gem_map_object(struct drm_device *dev, struct armada_gem_object *dobj)
183{
184 /* only linear objects need to be ioremap'd */
185 if (!dobj->addr && dobj->linear)
186 dobj->addr = ioremap_wc(dobj->phys_addr, dobj->obj.size);
187 return dobj->addr;
188}
189
190struct armada_gem_object *
191armada_gem_alloc_private_object(struct drm_device *dev, size_t size)
192{
193 struct armada_gem_object *obj;
194
195 size = roundup_gem_size(size);
196
197 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
198 if (!obj)
199 return NULL;
200
201 drm_gem_private_object_init(dev, &obj->obj, size);
202 obj->dev_addr = DMA_ERROR_CODE;
203
204 DRM_DEBUG_DRIVER("alloc private obj %p size %zu\n", obj, size);
205
206 return obj;
207}
208
209struct armada_gem_object *armada_gem_alloc_object(struct drm_device *dev,
210 size_t size)
211{
212 struct armada_gem_object *obj;
213 struct address_space *mapping;
214
215 size = roundup_gem_size(size);
216
217 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
218 if (!obj)
219 return NULL;
220
221 if (drm_gem_object_init(dev, &obj->obj, size)) {
222 kfree(obj);
223 return NULL;
224 }
225
226 obj->dev_addr = DMA_ERROR_CODE;
227
228 mapping = obj->obj.filp->f_path.dentry->d_inode->i_mapping;
229 mapping_set_gfp_mask(mapping, GFP_HIGHUSER | __GFP_RECLAIMABLE);
230
231 DRM_DEBUG_DRIVER("alloc obj %p size %zu\n", obj, size);
232
233 return obj;
234}
235
236/* Dumb alloc support */
237int armada_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
238 struct drm_mode_create_dumb *args)
239{
240 struct armada_gem_object *dobj;
241 u32 handle;
242 size_t size;
243 int ret;
244
245 args->pitch = armada_pitch(args->width, args->bpp);
246 args->size = size = args->pitch * args->height;
247
248 dobj = armada_gem_alloc_private_object(dev, size);
249 if (dobj == NULL)
250 return -ENOMEM;
251
252 ret = armada_gem_linear_back(dev, dobj);
253 if (ret)
254 goto err;
255
256 ret = drm_gem_handle_create(file, &dobj->obj, &handle);
257 if (ret)
258 goto err;
259
260 args->handle = handle;
261
262 /* drop reference from allocate - handle holds it now */
263 DRM_DEBUG_DRIVER("obj %p size %zu handle %#x\n", dobj, size, handle);
264 err:
265 drm_gem_object_unreference_unlocked(&dobj->obj);
266 return ret;
267}
268
269int armada_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
270 uint32_t handle, uint64_t *offset)
271{
272 struct armada_gem_object *obj;
273 int ret = 0;
274
275 mutex_lock(&dev->struct_mutex);
276 obj = armada_gem_object_lookup(dev, file, handle);
277 if (!obj) {
278 DRM_ERROR("failed to lookup gem object\n");
279 ret = -EINVAL;
280 goto err_unlock;
281 }
282
283 /* Don't allow imported objects to be mapped */
284 if (obj->obj.import_attach) {
285 ret = -EINVAL;
286 goto err_unlock;
287 }
288
289 ret = drm_gem_create_mmap_offset(&obj->obj);
290 if (ret == 0) {
291 *offset = drm_vma_node_offset_addr(&obj->obj.vma_node);
292 DRM_DEBUG_DRIVER("handle %#x offset %llx\n", handle, *offset);
293 }
294
295 drm_gem_object_unreference(&obj->obj);
296 err_unlock:
297 mutex_unlock(&dev->struct_mutex);
298
299 return ret;
300}
301
302int armada_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
303 uint32_t handle)
304{
305 return drm_gem_handle_delete(file, handle);
306}
307
308/* Private driver gem ioctls */
309int armada_gem_create_ioctl(struct drm_device *dev, void *data,
310 struct drm_file *file)
311{
312 struct drm_armada_gem_create *args = data;
313 struct armada_gem_object *dobj;
314 size_t size;
315 u32 handle;
316 int ret;
317
318 if (args->size == 0)
319 return -ENOMEM;
320
321 size = args->size;
322
323 dobj = armada_gem_alloc_object(dev, size);
324 if (dobj == NULL)
325 return -ENOMEM;
326
327 ret = drm_gem_handle_create(file, &dobj->obj, &handle);
328 if (ret)
329 goto err;
330
331 args->handle = handle;
332
333 /* drop reference from allocate - handle holds it now */
334 DRM_DEBUG_DRIVER("obj %p size %zu handle %#x\n", dobj, size, handle);
335 err:
336 drm_gem_object_unreference_unlocked(&dobj->obj);
337 return ret;
338}
339
340/* Map a shmem-backed object into process memory space */
341int armada_gem_mmap_ioctl(struct drm_device *dev, void *data,
342 struct drm_file *file)
343{
344 struct drm_armada_gem_mmap *args = data;
345 struct armada_gem_object *dobj;
346 unsigned long addr;
347
348 dobj = armada_gem_object_lookup(dev, file, args->handle);
349 if (dobj == NULL)
350 return -ENOENT;
351
352 if (!dobj->obj.filp) {
353 drm_gem_object_unreference(&dobj->obj);
354 return -EINVAL;
355 }
356
357 addr = vm_mmap(dobj->obj.filp, 0, args->size, PROT_READ | PROT_WRITE,
358 MAP_SHARED, args->offset);
359 drm_gem_object_unreference(&dobj->obj);
360 if (IS_ERR_VALUE(addr))
361 return addr;
362
363 args->addr = addr;
364
365 return 0;
366}
367
368int armada_gem_pwrite_ioctl(struct drm_device *dev, void *data,
369 struct drm_file *file)
370{
371 struct drm_armada_gem_pwrite *args = data;
372 struct armada_gem_object *dobj;
373 char __user *ptr;
374 int ret;
375
376 DRM_DEBUG_DRIVER("handle %u off %u size %u ptr 0x%llx\n",
377 args->handle, args->offset, args->size, args->ptr);
378
379 if (args->size == 0)
380 return 0;
381
382 ptr = (char __user *)(uintptr_t)args->ptr;
383
384 if (!access_ok(VERIFY_READ, ptr, args->size))
385 return -EFAULT;
386
387 ret = fault_in_multipages_readable(ptr, args->size);
388 if (ret)
389 return ret;
390
391 dobj = armada_gem_object_lookup(dev, file, args->handle);
392 if (dobj == NULL)
393 return -ENOENT;
394
395 /* Must be a kernel-mapped object */
396 if (!dobj->addr)
397 return -EINVAL;
398
399 if (args->offset > dobj->obj.size ||
400 args->size > dobj->obj.size - args->offset) {
401 DRM_ERROR("invalid size: object size %u\n", dobj->obj.size);
402 ret = -EINVAL;
403 goto unref;
404 }
405
406 if (copy_from_user(dobj->addr + args->offset, ptr, args->size)) {
407 ret = -EFAULT;
408 } else if (dobj->update) {
409 dobj->update(dobj->update_data);
410 ret = 0;
411 }
412
413 unref:
414 drm_gem_object_unreference_unlocked(&dobj->obj);
415 return ret;
416}
417
418/* Prime support */
419struct sg_table *
420armada_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
421 enum dma_data_direction dir)
422{
423 struct drm_gem_object *obj = attach->dmabuf->priv;
424 struct armada_gem_object *dobj = drm_to_armada_gem(obj);
425 struct scatterlist *sg;
426 struct sg_table *sgt;
427 int i, num;
428
429 sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
430 if (!sgt)
431 return NULL;
432
433 if (dobj->obj.filp) {
434 struct address_space *mapping;
435 gfp_t gfp;
436 int count;
437
438 count = dobj->obj.size / PAGE_SIZE;
439 if (sg_alloc_table(sgt, count, GFP_KERNEL))
440 goto free_sgt;
441
442 mapping = file_inode(dobj->obj.filp)->i_mapping;
443 gfp = mapping_gfp_mask(mapping);
444
445 for_each_sg(sgt->sgl, sg, count, i) {
446 struct page *page;
447
448 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
449 if (IS_ERR(page)) {
450 num = i;
451 goto release;
452 }
453
454 sg_set_page(sg, page, PAGE_SIZE, 0);
455 }
456
457 if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0) {
458 num = sgt->nents;
459 goto release;
460 }
461 } else if (dobj->page) {
462 /* Single contiguous page */
463 if (sg_alloc_table(sgt, 1, GFP_KERNEL))
464 goto free_sgt;
465
466 sg_set_page(sgt->sgl, dobj->page, dobj->obj.size, 0);
467
468 if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0)
469 goto free_table;
470 } else if (dobj->linear) {
471 /* Single contiguous physical region - no struct page */
472 if (sg_alloc_table(sgt, 1, GFP_KERNEL))
473 goto free_sgt;
474 sg_dma_address(sgt->sgl) = dobj->dev_addr;
475 sg_dma_len(sgt->sgl) = dobj->obj.size;
476 } else {
477 goto free_sgt;
478 }
479 return sgt;
480
481 release:
482 for_each_sg(sgt->sgl, sg, num, i)
483 page_cache_release(sg_page(sg));
484 free_table:
485 sg_free_table(sgt);
486 free_sgt:
487 kfree(sgt);
488 return NULL;
489}
490
491static void armada_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
492 struct sg_table *sgt, enum dma_data_direction dir)
493{
494 struct drm_gem_object *obj = attach->dmabuf->priv;
495 struct armada_gem_object *dobj = drm_to_armada_gem(obj);
496 int i;
497
498 if (!dobj->linear)
499 dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
500
501 if (dobj->obj.filp) {
502 struct scatterlist *sg;
503 for_each_sg(sgt->sgl, sg, sgt->nents, i)
504 page_cache_release(sg_page(sg));
505 }
506
507 sg_free_table(sgt);
508 kfree(sgt);
509}
510
511static void *armada_gem_dmabuf_no_kmap(struct dma_buf *buf, unsigned long n)
512{
513 return NULL;
514}
515
516static void
517armada_gem_dmabuf_no_kunmap(struct dma_buf *buf, unsigned long n, void *addr)
518{
519}
520
521static int
522armada_gem_dmabuf_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
523{
524 return -EINVAL;
525}
526
527static const struct dma_buf_ops armada_gem_prime_dmabuf_ops = {
528 .map_dma_buf = armada_gem_prime_map_dma_buf,
529 .unmap_dma_buf = armada_gem_prime_unmap_dma_buf,
530 .release = drm_gem_dmabuf_release,
531 .kmap_atomic = armada_gem_dmabuf_no_kmap,
532 .kunmap_atomic = armada_gem_dmabuf_no_kunmap,
533 .kmap = armada_gem_dmabuf_no_kmap,
534 .kunmap = armada_gem_dmabuf_no_kunmap,
535 .mmap = armada_gem_dmabuf_mmap,
536};
537
538struct dma_buf *
539armada_gem_prime_export(struct drm_device *dev, struct drm_gem_object *obj,
540 int flags)
541{
542 return dma_buf_export(obj, &armada_gem_prime_dmabuf_ops, obj->size,
543 O_RDWR);
544}
545
546struct drm_gem_object *
547armada_gem_prime_import(struct drm_device *dev, struct dma_buf *buf)
548{
549 struct dma_buf_attachment *attach;
550 struct armada_gem_object *dobj;
551
552 if (buf->ops == &armada_gem_prime_dmabuf_ops) {
553 struct drm_gem_object *obj = buf->priv;
554 if (obj->dev == dev) {
555 /*
556 * Importing our own dmabuf(s) increases the
557 * refcount on the gem object itself.
558 */
559 drm_gem_object_reference(obj);
560 dma_buf_put(buf);
561 return obj;
562 }
563 }
564
565 attach = dma_buf_attach(buf, dev->dev);
566 if (IS_ERR(attach))
567 return ERR_CAST(attach);
568
569 dobj = armada_gem_alloc_private_object(dev, buf->size);
570 if (!dobj) {
571 dma_buf_detach(buf, attach);
572 return ERR_PTR(-ENOMEM);
573 }
574
575 dobj->obj.import_attach = attach;
576
577 /*
578 * Don't call dma_buf_map_attachment() here - it maps the
579 * scatterlist immediately for DMA, and this is not always
580 * an appropriate thing to do.
581 */
582 return &dobj->obj;
583}
584
585int armada_gem_map_import(struct armada_gem_object *dobj)
586{
587 int ret;
588
589 dobj->sgt = dma_buf_map_attachment(dobj->obj.import_attach,
590 DMA_TO_DEVICE);
591 if (!dobj->sgt) {
592 DRM_ERROR("dma_buf_map_attachment() returned NULL\n");
593 return -EINVAL;
594 }
595 if (IS_ERR(dobj->sgt)) {
596 ret = PTR_ERR(dobj->sgt);
597 dobj->sgt = NULL;
598 DRM_ERROR("dma_buf_map_attachment() error: %d\n", ret);
599 return ret;
600 }
601 if (dobj->sgt->nents > 1) {
602 DRM_ERROR("dma_buf_map_attachment() returned an (unsupported) scattered list\n");
603 return -EINVAL;
604 }
605 if (sg_dma_len(dobj->sgt->sgl) < dobj->obj.size) {
606 DRM_ERROR("dma_buf_map_attachment() returned a small buffer\n");
607 return -EINVAL;
608 }
609 dobj->dev_addr = sg_dma_address(dobj->sgt->sgl);
610 return 0;
611}
diff --git a/drivers/gpu/drm/armada/armada_gem.h b/drivers/gpu/drm/armada/armada_gem.h
new file mode 100644
index 000000000000..00b6cd461a03
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_gem.h
@@ -0,0 +1,52 @@
1/*
2 * Copyright (C) 2012 Russell King
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8#ifndef ARMADA_GEM_H
9#define ARMADA_GEM_H
10
11/* GEM */
12struct armada_gem_object {
13 struct drm_gem_object obj;
14 void *addr;
15 phys_addr_t phys_addr;
16 resource_size_t dev_addr;
17 struct drm_mm_node *linear; /* for linear backed */
18 struct page *page; /* for page backed */
19 struct sg_table *sgt; /* for imported */
20 void (*update)(void *);
21 void *update_data;
22};
23
24extern const struct vm_operations_struct armada_gem_vm_ops;
25
26#define drm_to_armada_gem(o) container_of(o, struct armada_gem_object, obj)
27
28void armada_gem_free_object(struct drm_gem_object *);
29int armada_gem_linear_back(struct drm_device *, struct armada_gem_object *);
30void *armada_gem_map_object(struct drm_device *, struct armada_gem_object *);
31struct armada_gem_object *armada_gem_alloc_private_object(struct drm_device *,
32 size_t);
33int armada_gem_dumb_create(struct drm_file *, struct drm_device *,
34 struct drm_mode_create_dumb *);
35int armada_gem_dumb_map_offset(struct drm_file *, struct drm_device *,
36 uint32_t, uint64_t *);
37int armada_gem_dumb_destroy(struct drm_file *, struct drm_device *,
38 uint32_t);
39struct dma_buf *armada_gem_prime_export(struct drm_device *dev,
40 struct drm_gem_object *obj, int flags);
41struct drm_gem_object *armada_gem_prime_import(struct drm_device *,
42 struct dma_buf *);
43int armada_gem_map_import(struct armada_gem_object *);
44
45static inline struct armada_gem_object *armada_gem_object_lookup(
46 struct drm_device *dev, struct drm_file *dfile, unsigned handle)
47{
48 struct drm_gem_object *obj = drm_gem_object_lookup(dev, dfile, handle);
49
50 return obj ? drm_to_armada_gem(obj) : NULL;
51}
52#endif
diff --git a/drivers/gpu/drm/armada/armada_hw.h b/drivers/gpu/drm/armada/armada_hw.h
new file mode 100644
index 000000000000..27319a8335e2
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_hw.h
@@ -0,0 +1,318 @@
1/*
2 * Copyright (C) 2012 Russell King
3 * Rewritten from the dovefb driver, and Armada510 manuals.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9#ifndef ARMADA_HW_H
10#define ARMADA_HW_H
11
12/*
13 * Note: the following registers are written from IRQ context:
14 * LCD_SPU_V_PORCH, LCD_SPU_ADV_REG, LCD_SPUT_V_H_TOTAL
15 * LCD_SPU_DMA_START_ADDR_[YUV][01], LCD_SPU_DMA_PITCH_YC,
16 * LCD_SPU_DMA_PITCH_UV, LCD_SPU_DMA_OVSA_HPXL_VLN,
17 * LCD_SPU_DMA_HPXL_VLN, LCD_SPU_DZM_HPXL_VLN, LCD_SPU_DMA_CTRL0
18 */
19enum {
20 LCD_SPU_ADV_REG = 0x0084, /* Armada 510 */
21 LCD_SPU_DMA_START_ADDR_Y0 = 0x00c0,
22 LCD_SPU_DMA_START_ADDR_U0 = 0x00c4,
23 LCD_SPU_DMA_START_ADDR_V0 = 0x00c8,
24 LCD_CFG_DMA_START_ADDR_0 = 0x00cc,
25 LCD_SPU_DMA_START_ADDR_Y1 = 0x00d0,
26 LCD_SPU_DMA_START_ADDR_U1 = 0x00d4,
27 LCD_SPU_DMA_START_ADDR_V1 = 0x00d8,
28 LCD_CFG_DMA_START_ADDR_1 = 0x00dc,
29 LCD_SPU_DMA_PITCH_YC = 0x00e0,
30 LCD_SPU_DMA_PITCH_UV = 0x00e4,
31 LCD_SPU_DMA_OVSA_HPXL_VLN = 0x00e8,
32 LCD_SPU_DMA_HPXL_VLN = 0x00ec,
33 LCD_SPU_DZM_HPXL_VLN = 0x00f0,
34 LCD_CFG_GRA_START_ADDR0 = 0x00f4,
35 LCD_CFG_GRA_START_ADDR1 = 0x00f8,
36 LCD_CFG_GRA_PITCH = 0x00fc,
37 LCD_SPU_GRA_OVSA_HPXL_VLN = 0x0100,
38 LCD_SPU_GRA_HPXL_VLN = 0x0104,
39 LCD_SPU_GZM_HPXL_VLN = 0x0108,
40 LCD_SPU_HWC_OVSA_HPXL_VLN = 0x010c,
41 LCD_SPU_HWC_HPXL_VLN = 0x0110,
42 LCD_SPUT_V_H_TOTAL = 0x0114,
43 LCD_SPU_V_H_ACTIVE = 0x0118,
44 LCD_SPU_H_PORCH = 0x011c,
45 LCD_SPU_V_PORCH = 0x0120,
46 LCD_SPU_BLANKCOLOR = 0x0124,
47 LCD_SPU_ALPHA_COLOR1 = 0x0128,
48 LCD_SPU_ALPHA_COLOR2 = 0x012c,
49 LCD_SPU_COLORKEY_Y = 0x0130,
50 LCD_SPU_COLORKEY_U = 0x0134,
51 LCD_SPU_COLORKEY_V = 0x0138,
52 LCD_CFG_RDREG4F = 0x013c, /* Armada 510 */
53 LCD_SPU_SPI_RXDATA = 0x0140,
54 LCD_SPU_ISA_RXDATA = 0x0144,
55 LCD_SPU_HWC_RDDAT = 0x0158,
56 LCD_SPU_GAMMA_RDDAT = 0x015c,
57 LCD_SPU_PALETTE_RDDAT = 0x0160,
58 LCD_SPU_IOPAD_IN = 0x0178,
59 LCD_CFG_RDREG5F = 0x017c,
60 LCD_SPU_SPI_CTRL = 0x0180,
61 LCD_SPU_SPI_TXDATA = 0x0184,
62 LCD_SPU_SMPN_CTRL = 0x0188,
63 LCD_SPU_DMA_CTRL0 = 0x0190,
64 LCD_SPU_DMA_CTRL1 = 0x0194,
65 LCD_SPU_SRAM_CTRL = 0x0198,
66 LCD_SPU_SRAM_WRDAT = 0x019c,
67 LCD_SPU_SRAM_PARA0 = 0x01a0, /* Armada 510 */
68 LCD_SPU_SRAM_PARA1 = 0x01a4,
69 LCD_CFG_SCLK_DIV = 0x01a8,
70 LCD_SPU_CONTRAST = 0x01ac,
71 LCD_SPU_SATURATION = 0x01b0,
72 LCD_SPU_CBSH_HUE = 0x01b4,
73 LCD_SPU_DUMB_CTRL = 0x01b8,
74 LCD_SPU_IOPAD_CONTROL = 0x01bc,
75 LCD_SPU_IRQ_ENA = 0x01c0,
76 LCD_SPU_IRQ_ISR = 0x01c4,
77};
78
79/* For LCD_SPU_ADV_REG */
80enum {
81 ADV_VSYNC_L_OFF = 0xfff << 20,
82 ADV_GRACOLORKEY = 1 << 19,
83 ADV_VIDCOLORKEY = 1 << 18,
84 ADV_HWC32BLEND = 1 << 15,
85 ADV_HWC32ARGB = 1 << 14,
86 ADV_HWC32ENABLE = 1 << 13,
87 ADV_VSYNCOFFEN = 1 << 12,
88 ADV_VSYNC_H_OFF = 0xfff << 0,
89};
90
91enum {
92 CFG_565 = 0,
93 CFG_1555 = 1,
94 CFG_888PACK = 2,
95 CFG_X888 = 3,
96 CFG_8888 = 4,
97 CFG_422PACK = 5,
98 CFG_422 = 6,
99 CFG_420 = 7,
100 CFG_PSEUDO4 = 9,
101 CFG_PSEUDO8 = 10,
102 CFG_SWAPRB = 1 << 4,
103 CFG_SWAPUV = 1 << 3,
104 CFG_SWAPYU = 1 << 2,
105 CFG_YUV2RGB = 1 << 1,
106};
107
108/* For LCD_SPU_DMA_CTRL0 */
109enum {
110 CFG_NOBLENDING = 1 << 31,
111 CFG_GAMMA_ENA = 1 << 30,
112 CFG_CBSH_ENA = 1 << 29,
113 CFG_PALETTE_ENA = 1 << 28,
114 CFG_ARBFAST_ENA = 1 << 27,
115 CFG_HWC_1BITMOD = 1 << 26,
116 CFG_HWC_1BITENA = 1 << 25,
117 CFG_HWC_ENA = 1 << 24,
118 CFG_DMAFORMAT = 0xf << 20,
119#define CFG_DMA_FMT(x) ((x) << 20)
120 CFG_GRAFORMAT = 0xf << 16,
121#define CFG_GRA_FMT(x) ((x) << 16)
122#define CFG_GRA_MOD(x) ((x) << 8)
123 CFG_GRA_FTOGGLE = 1 << 15,
124 CFG_GRA_HSMOOTH = 1 << 14,
125 CFG_GRA_TSTMODE = 1 << 13,
126 CFG_GRA_ENA = 1 << 8,
127#define CFG_DMA_MOD(x) ((x) << 0)
128 CFG_DMA_FTOGGLE = 1 << 7,
129 CFG_DMA_HSMOOTH = 1 << 6,
130 CFG_DMA_TSTMODE = 1 << 5,
131 CFG_DMA_ENA = 1 << 0,
132};
133
134enum {
135 CKMODE_DISABLE = 0,
136 CKMODE_Y = 1,
137 CKMODE_U = 2,
138 CKMODE_RGB = 3,
139 CKMODE_V = 4,
140 CKMODE_R = 5,
141 CKMODE_G = 6,
142 CKMODE_B = 7,
143};
144
145/* For LCD_SPU_DMA_CTRL1 */
146enum {
147 CFG_FRAME_TRIG = 1 << 31,
148 CFG_VSYNC_INV = 1 << 27,
149 CFG_CKMODE_MASK = 0x7 << 24,
150#define CFG_CKMODE(x) ((x) << 24)
151 CFG_CARRY = 1 << 23,
152 CFG_GATED_CLK = 1 << 21,
153 CFG_PWRDN_ENA = 1 << 20,
154 CFG_DSCALE_MASK = 0x3 << 18,
155 CFG_DSCALE_NONE = 0x0 << 18,
156 CFG_DSCALE_HALF = 0x1 << 18,
157 CFG_DSCALE_QUAR = 0x2 << 18,
158 CFG_ALPHAM_MASK = 0x3 << 16,
159 CFG_ALPHAM_VIDEO = 0x0 << 16,
160 CFG_ALPHAM_GRA = 0x1 << 16,
161 CFG_ALPHAM_CFG = 0x2 << 16,
162 CFG_ALPHA_MASK = 0xff << 8,
163 CFG_PIXCMD_MASK = 0xff,
164};
165
166/* For LCD_SPU_SRAM_CTRL */
167enum {
168 SRAM_READ = 0 << 14,
169 SRAM_WRITE = 2 << 14,
170 SRAM_INIT = 3 << 14,
171 SRAM_HWC32_RAM1 = 0xc << 8,
172 SRAM_HWC32_RAM2 = 0xd << 8,
173 SRAM_HWC32_RAMR = SRAM_HWC32_RAM1,
174 SRAM_HWC32_RAMG = SRAM_HWC32_RAM2,
175 SRAM_HWC32_RAMB = 0xe << 8,
176 SRAM_HWC32_TRAN = 0xf << 8,
177 SRAM_HWC = 0xf << 8,
178};
179
180/* For LCD_SPU_SRAM_PARA1 */
181enum {
182 CFG_CSB_256x32 = 1 << 15, /* cursor */
183 CFG_CSB_256x24 = 1 << 14, /* palette */
184 CFG_CSB_256x8 = 1 << 13, /* gamma */
185 CFG_PDWN1920x32 = 1 << 8, /* Armada 510: power down vscale ram */
186 CFG_PDWN256x32 = 1 << 7, /* power down cursor */
187 CFG_PDWN256x24 = 1 << 6, /* power down palette */
188 CFG_PDWN256x8 = 1 << 5, /* power down gamma */
189 CFG_PDWNHWC = 1 << 4, /* Armada 510: power down all hwc ram */
190 CFG_PDWN32x32 = 1 << 3, /* power down slave->smart ram */
191 CFG_PDWN16x66 = 1 << 2, /* power down UV fifo */
192 CFG_PDWN32x66 = 1 << 1, /* power down Y fifo */
193 CFG_PDWN64x66 = 1 << 0, /* power down graphic fifo */
194};
195
196/* For LCD_CFG_SCLK_DIV */
197enum {
198 /* Armada 510 */
199 SCLK_510_AXI = 0x0 << 30,
200 SCLK_510_EXTCLK0 = 0x1 << 30,
201 SCLK_510_PLL = 0x2 << 30,
202 SCLK_510_EXTCLK1 = 0x3 << 30,
203 SCLK_510_DIV_CHANGE = 1 << 29,
204 SCLK_510_FRAC_DIV_MASK = 0xfff << 16,
205 SCLK_510_INT_DIV_MASK = 0xffff << 0,
206
207 /* Armada 16x */
208 SCLK_16X_AHB = 0x0 << 28,
209 SCLK_16X_PCLK = 0x1 << 28,
210 SCLK_16X_AXI = 0x4 << 28,
211 SCLK_16X_PLL = 0x8 << 28,
212 SCLK_16X_FRAC_DIV_MASK = 0xfff << 16,
213 SCLK_16X_INT_DIV_MASK = 0xffff << 0,
214};
215
216/* For LCD_SPU_DUMB_CTRL */
217enum {
218 DUMB16_RGB565_0 = 0x0 << 28,
219 DUMB16_RGB565_1 = 0x1 << 28,
220 DUMB18_RGB666_0 = 0x2 << 28,
221 DUMB18_RGB666_1 = 0x3 << 28,
222 DUMB12_RGB444_0 = 0x4 << 28,
223 DUMB12_RGB444_1 = 0x5 << 28,
224 DUMB24_RGB888_0 = 0x6 << 28,
225 DUMB_BLANK = 0x7 << 28,
226 DUMB_MASK = 0xf << 28,
227 CFG_BIAS_OUT = 1 << 8,
228 CFG_REV_RGB = 1 << 7,
229 CFG_INV_CBLANK = 1 << 6,
230 CFG_INV_CSYNC = 1 << 5, /* Normally active high */
231 CFG_INV_HENA = 1 << 4,
232 CFG_INV_VSYNC = 1 << 3, /* Normally active high */
233 CFG_INV_HSYNC = 1 << 2, /* Normally active high */
234 CFG_INV_PCLK = 1 << 1,
235 CFG_DUMB_ENA = 1 << 0,
236};
237
238/* For LCD_SPU_IOPAD_CONTROL */
239enum {
240 CFG_VSCALE_LN_EN = 3 << 18,
241 CFG_GRA_VM_ENA = 1 << 15,
242 CFG_DMA_VM_ENA = 1 << 13,
243 CFG_CMD_VM_ENA = 1 << 11,
244 CFG_CSC_MASK = 3 << 8,
245 CFG_CSC_YUV_CCIR709 = 1 << 9,
246 CFG_CSC_YUV_CCIR601 = 0 << 9,
247 CFG_CSC_RGB_STUDIO = 1 << 8,
248 CFG_CSC_RGB_COMPUTER = 0 << 8,
249 CFG_IOPAD_MASK = 0xf << 0,
250 CFG_IOPAD_DUMB24 = 0x0 << 0,
251 CFG_IOPAD_DUMB18SPI = 0x1 << 0,
252 CFG_IOPAD_DUMB18GPIO = 0x2 << 0,
253 CFG_IOPAD_DUMB16SPI = 0x3 << 0,
254 CFG_IOPAD_DUMB16GPIO = 0x4 << 0,
255 CFG_IOPAD_DUMB12GPIO = 0x5 << 0,
256 CFG_IOPAD_SMART18 = 0x6 << 0,
257 CFG_IOPAD_SMART16 = 0x7 << 0,
258 CFG_IOPAD_SMART8 = 0x8 << 0,
259};
260
261#define IOPAD_DUMB24 0x0
262
263/* For LCD_SPU_IRQ_ENA */
264enum {
265 DMA_FRAME_IRQ0_ENA = 1 << 31,
266 DMA_FRAME_IRQ1_ENA = 1 << 30,
267 DMA_FRAME_IRQ_ENA = DMA_FRAME_IRQ0_ENA | DMA_FRAME_IRQ1_ENA,
268 DMA_FF_UNDERFLOW_ENA = 1 << 29,
269 GRA_FRAME_IRQ0_ENA = 1 << 27,
270 GRA_FRAME_IRQ1_ENA = 1 << 26,
271 GRA_FRAME_IRQ_ENA = GRA_FRAME_IRQ0_ENA | GRA_FRAME_IRQ1_ENA,
272 GRA_FF_UNDERFLOW_ENA = 1 << 25,
273 VSYNC_IRQ_ENA = 1 << 23,
274 DUMB_FRAMEDONE_ENA = 1 << 22,
275 TWC_FRAMEDONE_ENA = 1 << 21,
276 HWC_FRAMEDONE_ENA = 1 << 20,
277 SLV_IRQ_ENA = 1 << 19,
278 SPI_IRQ_ENA = 1 << 18,
279 PWRDN_IRQ_ENA = 1 << 17,
280 ERR_IRQ_ENA = 1 << 16,
281 CLEAN_SPU_IRQ_ISR = 0xffff,
282};
283
284/* For LCD_SPU_IRQ_ISR */
285enum {
286 DMA_FRAME_IRQ0 = 1 << 31,
287 DMA_FRAME_IRQ1 = 1 << 30,
288 DMA_FRAME_IRQ = DMA_FRAME_IRQ0 | DMA_FRAME_IRQ1,
289 DMA_FF_UNDERFLOW = 1 << 29,
290 GRA_FRAME_IRQ0 = 1 << 27,
291 GRA_FRAME_IRQ1 = 1 << 26,
292 GRA_FRAME_IRQ = GRA_FRAME_IRQ0 | GRA_FRAME_IRQ1,
293 GRA_FF_UNDERFLOW = 1 << 25,
294 VSYNC_IRQ = 1 << 23,
295 DUMB_FRAMEDONE = 1 << 22,
296 TWC_FRAMEDONE = 1 << 21,
297 HWC_FRAMEDONE = 1 << 20,
298 SLV_IRQ = 1 << 19,
299 SPI_IRQ = 1 << 18,
300 PWRDN_IRQ = 1 << 17,
301 ERR_IRQ = 1 << 16,
302 DMA_FRAME_IRQ0_LEVEL = 1 << 15,
303 DMA_FRAME_IRQ1_LEVEL = 1 << 14,
304 DMA_FRAME_CNT_ISR = 3 << 12,
305 GRA_FRAME_IRQ0_LEVEL = 1 << 11,
306 GRA_FRAME_IRQ1_LEVEL = 1 << 10,
307 GRA_FRAME_CNT_ISR = 3 << 8,
308 VSYNC_IRQ_LEVEL = 1 << 7,
309 DUMB_FRAMEDONE_LEVEL = 1 << 6,
310 TWC_FRAMEDONE_LEVEL = 1 << 5,
311 HWC_FRAMEDONE_LEVEL = 1 << 4,
312 SLV_FF_EMPTY = 1 << 3,
313 DMA_FF_ALLEMPTY = 1 << 2,
314 GRA_FF_ALLEMPTY = 1 << 1,
315 PWRDN_IRQ_LEVEL = 1 << 0,
316};
317
318#endif
diff --git a/drivers/gpu/drm/armada/armada_ioctlP.h b/drivers/gpu/drm/armada/armada_ioctlP.h
new file mode 100644
index 000000000000..bd8c4562066c
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_ioctlP.h
@@ -0,0 +1,18 @@
1/*
2 * Copyright (C) 2012 Russell King
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8#ifndef ARMADA_IOCTLP_H
9#define ARMADA_IOCTLP_H
10
11#define ARMADA_IOCTL_PROTO(name)\
12extern int armada_##name##_ioctl(struct drm_device *, void *, struct drm_file *)
13
14ARMADA_IOCTL_PROTO(gem_create);
15ARMADA_IOCTL_PROTO(gem_mmap);
16ARMADA_IOCTL_PROTO(gem_pwrite);
17
18#endif
diff --git a/drivers/gpu/drm/armada/armada_output.c b/drivers/gpu/drm/armada/armada_output.c
new file mode 100644
index 000000000000..d685a5421485
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_output.c
@@ -0,0 +1,158 @@
1/*
2 * Copyright (C) 2012 Russell King
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8#include <drm/drmP.h>
9#include <drm/drm_crtc_helper.h>
10#include <drm/drm_edid.h>
11#include <drm/drm_encoder_slave.h>
12#include "armada_output.h"
13#include "armada_drm.h"
14
15struct armada_connector {
16 struct drm_connector conn;
17 const struct armada_output_type *type;
18};
19
20#define drm_to_armada_conn(c) container_of(c, struct armada_connector, conn)
21
22struct drm_encoder *armada_drm_connector_encoder(struct drm_connector *conn)
23{
24 struct drm_encoder *enc = conn->encoder;
25
26 return enc ? enc : drm_encoder_find(conn->dev, conn->encoder_ids[0]);
27}
28
29static enum drm_connector_status armada_drm_connector_detect(
30 struct drm_connector *conn, bool force)
31{
32 struct armada_connector *dconn = drm_to_armada_conn(conn);
33 enum drm_connector_status status = connector_status_disconnected;
34
35 if (dconn->type->detect) {
36 status = dconn->type->detect(conn, force);
37 } else {
38 struct drm_encoder *enc = armada_drm_connector_encoder(conn);
39
40 if (enc)
41 status = encoder_helper_funcs(enc)->detect(enc, conn);
42 }
43
44 return status;
45}
46
47static void armada_drm_connector_destroy(struct drm_connector *conn)
48{
49 struct armada_connector *dconn = drm_to_armada_conn(conn);
50
51 drm_sysfs_connector_remove(conn);
52 drm_connector_cleanup(conn);
53 kfree(dconn);
54}
55
56static int armada_drm_connector_set_property(struct drm_connector *conn,
57 struct drm_property *property, uint64_t value)
58{
59 struct armada_connector *dconn = drm_to_armada_conn(conn);
60
61 if (!dconn->type->set_property)
62 return -EINVAL;
63
64 return dconn->type->set_property(conn, property, value);
65}
66
67static const struct drm_connector_funcs armada_drm_conn_funcs = {
68 .dpms = drm_helper_connector_dpms,
69 .fill_modes = drm_helper_probe_single_connector_modes,
70 .detect = armada_drm_connector_detect,
71 .destroy = armada_drm_connector_destroy,
72 .set_property = armada_drm_connector_set_property,
73};
74
75void armada_drm_encoder_prepare(struct drm_encoder *encoder)
76{
77 encoder_helper_funcs(encoder)->dpms(encoder, DRM_MODE_DPMS_OFF);
78}
79
80void armada_drm_encoder_commit(struct drm_encoder *encoder)
81{
82 encoder_helper_funcs(encoder)->dpms(encoder, DRM_MODE_DPMS_ON);
83}
84
85bool armada_drm_encoder_mode_fixup(struct drm_encoder *encoder,
86 const struct drm_display_mode *mode, struct drm_display_mode *adjusted)
87{
88 return true;
89}
90
91/* Shouldn't this be a generic helper function? */
92int armada_drm_slave_encoder_mode_valid(struct drm_connector *conn,
93 struct drm_display_mode *mode)
94{
95 struct drm_encoder *encoder = armada_drm_connector_encoder(conn);
96 int valid = MODE_BAD;
97
98 if (encoder) {
99 struct drm_encoder_slave *slave = to_encoder_slave(encoder);
100
101 valid = slave->slave_funcs->mode_valid(encoder, mode);
102 }
103 return valid;
104}
105
106int armada_drm_slave_encoder_set_property(struct drm_connector *conn,
107 struct drm_property *property, uint64_t value)
108{
109 struct drm_encoder *encoder = armada_drm_connector_encoder(conn);
110 int rc = -EINVAL;
111
112 if (encoder) {
113 struct drm_encoder_slave *slave = to_encoder_slave(encoder);
114
115 rc = slave->slave_funcs->set_property(encoder, conn, property,
116 value);
117 }
118 return rc;
119}
120
121int armada_output_create(struct drm_device *dev,
122 const struct armada_output_type *type, const void *data)
123{
124 struct armada_connector *dconn;
125 int ret;
126
127 dconn = kzalloc(sizeof(*dconn), GFP_KERNEL);
128 if (!dconn)
129 return -ENOMEM;
130
131 dconn->type = type;
132
133 ret = drm_connector_init(dev, &dconn->conn, &armada_drm_conn_funcs,
134 type->connector_type);
135 if (ret) {
136 DRM_ERROR("unable to init connector\n");
137 goto err_destroy_dconn;
138 }
139
140 ret = type->create(&dconn->conn, data);
141 if (ret)
142 goto err_conn;
143
144 ret = drm_sysfs_connector_add(&dconn->conn);
145 if (ret)
146 goto err_sysfs;
147
148 return 0;
149
150 err_sysfs:
151 if (dconn->conn.encoder)
152 dconn->conn.encoder->funcs->destroy(dconn->conn.encoder);
153 err_conn:
154 drm_connector_cleanup(&dconn->conn);
155 err_destroy_dconn:
156 kfree(dconn);
157 return ret;
158}
diff --git a/drivers/gpu/drm/armada/armada_output.h b/drivers/gpu/drm/armada/armada_output.h
new file mode 100644
index 000000000000..4126d43b5057
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_output.h
@@ -0,0 +1,39 @@
1/*
2 * Copyright (C) 2012 Russell King
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8#ifndef ARMADA_CONNETOR_H
9#define ARMADA_CONNETOR_H
10
11#define encoder_helper_funcs(encoder) \
12 ((struct drm_encoder_helper_funcs *)encoder->helper_private)
13
14struct armada_output_type {
15 int connector_type;
16 enum drm_connector_status (*detect)(struct drm_connector *, bool);
17 int (*create)(struct drm_connector *, const void *);
18 int (*set_property)(struct drm_connector *, struct drm_property *,
19 uint64_t);
20};
21
22struct drm_encoder *armada_drm_connector_encoder(struct drm_connector *conn);
23
24void armada_drm_encoder_prepare(struct drm_encoder *encoder);
25void armada_drm_encoder_commit(struct drm_encoder *encoder);
26
27bool armada_drm_encoder_mode_fixup(struct drm_encoder *encoder,
28 const struct drm_display_mode *mode, struct drm_display_mode *adj);
29
30int armada_drm_slave_encoder_mode_valid(struct drm_connector *conn,
31 struct drm_display_mode *mode);
32
33int armada_drm_slave_encoder_set_property(struct drm_connector *conn,
34 struct drm_property *property, uint64_t value);
35
36int armada_output_create(struct drm_device *dev,
37 const struct armada_output_type *type, const void *data);
38
39#endif
diff --git a/drivers/gpu/drm/armada/armada_overlay.c b/drivers/gpu/drm/armada/armada_overlay.c
new file mode 100644
index 000000000000..c5b06fdb459c
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_overlay.c
@@ -0,0 +1,477 @@
1/*
2 * Copyright (C) 2012 Russell King
3 * Rewritten from the dovefb driver, and Armada510 manuals.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9#include <drm/drmP.h>
10#include "armada_crtc.h"
11#include "armada_drm.h"
12#include "armada_fb.h"
13#include "armada_gem.h"
14#include "armada_hw.h"
15#include <drm/armada_drm.h>
16#include "armada_ioctlP.h"
17
18struct armada_plane_properties {
19 uint32_t colorkey_yr;
20 uint32_t colorkey_ug;
21 uint32_t colorkey_vb;
22#define K2R(val) (((val) >> 0) & 0xff)
23#define K2G(val) (((val) >> 8) & 0xff)
24#define K2B(val) (((val) >> 16) & 0xff)
25 int16_t brightness;
26 uint16_t contrast;
27 uint16_t saturation;
28 uint32_t colorkey_mode;
29};
30
31struct armada_plane {
32 struct drm_plane base;
33 spinlock_t lock;
34 struct drm_framebuffer *old_fb;
35 uint32_t src_hw;
36 uint32_t dst_hw;
37 uint32_t dst_yx;
38 uint32_t ctrl0;
39 struct {
40 struct armada_vbl_event update;
41 struct armada_regs regs[13];
42 wait_queue_head_t wait;
43 } vbl;
44 struct armada_plane_properties prop;
45};
46#define drm_to_armada_plane(p) container_of(p, struct armada_plane, base)
47
48
49static void
50armada_ovl_update_attr(struct armada_plane_properties *prop,
51 struct armada_crtc *dcrtc)
52{
53 writel_relaxed(prop->colorkey_yr, dcrtc->base + LCD_SPU_COLORKEY_Y);
54 writel_relaxed(prop->colorkey_ug, dcrtc->base + LCD_SPU_COLORKEY_U);
55 writel_relaxed(prop->colorkey_vb, dcrtc->base + LCD_SPU_COLORKEY_V);
56
57 writel_relaxed(prop->brightness << 16 | prop->contrast,
58 dcrtc->base + LCD_SPU_CONTRAST);
59 /* Docs say 15:0, but it seems to actually be 31:16 on Armada 510 */
60 writel_relaxed(prop->saturation << 16,
61 dcrtc->base + LCD_SPU_SATURATION);
62 writel_relaxed(0x00002000, dcrtc->base + LCD_SPU_CBSH_HUE);
63
64 spin_lock_irq(&dcrtc->irq_lock);
65 armada_updatel(prop->colorkey_mode | CFG_ALPHAM_GRA,
66 CFG_CKMODE_MASK | CFG_ALPHAM_MASK | CFG_ALPHA_MASK,
67 dcrtc->base + LCD_SPU_DMA_CTRL1);
68
69 armada_updatel(ADV_GRACOLORKEY, 0, dcrtc->base + LCD_SPU_ADV_REG);
70 spin_unlock_irq(&dcrtc->irq_lock);
71}
72
73/* === Plane support === */
74static void armada_plane_vbl(struct armada_crtc *dcrtc, void *data)
75{
76 struct armada_plane *dplane = data;
77 struct drm_framebuffer *fb;
78
79 armada_drm_crtc_update_regs(dcrtc, dplane->vbl.regs);
80
81 spin_lock(&dplane->lock);
82 fb = dplane->old_fb;
83 dplane->old_fb = NULL;
84 spin_unlock(&dplane->lock);
85
86 if (fb)
87 armada_drm_queue_unref_work(dcrtc->crtc.dev, fb);
88}
89
90static unsigned armada_limit(int start, unsigned size, unsigned max)
91{
92 int end = start + size;
93 if (end < 0)
94 return 0;
95 if (start < 0)
96 start = 0;
97 return (unsigned)end > max ? max - start : end - start;
98}
99
100static int
101armada_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
102 struct drm_framebuffer *fb,
103 int crtc_x, int crtc_y, unsigned crtc_w, unsigned crtc_h,
104 uint32_t src_x, uint32_t src_y, uint32_t src_w, uint32_t src_h)
105{
106 struct armada_plane *dplane = drm_to_armada_plane(plane);
107 struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
108 uint32_t val, ctrl0;
109 unsigned idx = 0;
110 int ret;
111
112 crtc_w = armada_limit(crtc_x, crtc_w, dcrtc->crtc.mode.hdisplay);
113 crtc_h = armada_limit(crtc_y, crtc_h, dcrtc->crtc.mode.vdisplay);
114 ctrl0 = CFG_DMA_FMT(drm_fb_to_armada_fb(fb)->fmt) |
115 CFG_DMA_MOD(drm_fb_to_armada_fb(fb)->mod) |
116 CFG_CBSH_ENA | CFG_DMA_HSMOOTH | CFG_DMA_ENA;
117
118 /* Does the position/size result in nothing to display? */
119 if (crtc_w == 0 || crtc_h == 0) {
120 ctrl0 &= ~CFG_DMA_ENA;
121 }
122
123 /*
124 * FIXME: if the starting point is off screen, we need to
125 * adjust src_x, src_y, src_w, src_h appropriately, and
126 * according to the scale.
127 */
128
129 if (!dcrtc->plane) {
130 dcrtc->plane = plane;
131 armada_ovl_update_attr(&dplane->prop, dcrtc);
132 }
133
134 /* FIXME: overlay on an interlaced display */
135 /* Just updating the position/size? */
136 if (plane->fb == fb && dplane->ctrl0 == ctrl0) {
137 val = (src_h & 0xffff0000) | src_w >> 16;
138 dplane->src_hw = val;
139 writel_relaxed(val, dcrtc->base + LCD_SPU_DMA_HPXL_VLN);
140 val = crtc_h << 16 | crtc_w;
141 dplane->dst_hw = val;
142 writel_relaxed(val, dcrtc->base + LCD_SPU_DZM_HPXL_VLN);
143 val = crtc_y << 16 | crtc_x;
144 dplane->dst_yx = val;
145 writel_relaxed(val, dcrtc->base + LCD_SPU_DMA_OVSA_HPXL_VLN);
146 return 0;
147 } else if (~dplane->ctrl0 & ctrl0 & CFG_DMA_ENA) {
148 /* Power up the Y/U/V FIFOs on ENA 0->1 transitions */
149 armada_updatel(0, CFG_PDWN16x66 | CFG_PDWN32x66,
150 dcrtc->base + LCD_SPU_SRAM_PARA1);
151 }
152
153 ret = wait_event_timeout(dplane->vbl.wait,
154 list_empty(&dplane->vbl.update.node),
155 HZ/25);
156 if (ret < 0)
157 return ret;
158
159 if (plane->fb != fb) {
160 struct armada_gem_object *obj = drm_fb_obj(fb);
161 uint32_t sy, su, sv;
162
163 /*
164 * Take a reference on the new framebuffer - we want to
165 * hold on to it while the hardware is displaying it.
166 */
167 drm_framebuffer_reference(fb);
168
169 if (plane->fb) {
170 struct drm_framebuffer *older_fb;
171
172 spin_lock_irq(&dplane->lock);
173 older_fb = dplane->old_fb;
174 dplane->old_fb = plane->fb;
175 spin_unlock_irq(&dplane->lock);
176 if (older_fb)
177 armada_drm_queue_unref_work(dcrtc->crtc.dev,
178 older_fb);
179 }
180
181 src_y >>= 16;
182 src_x >>= 16;
183 sy = obj->dev_addr + fb->offsets[0] + src_y * fb->pitches[0] +
184 src_x * fb->bits_per_pixel / 8;
185 su = obj->dev_addr + fb->offsets[1] + src_y * fb->pitches[1] +
186 src_x;
187 sv = obj->dev_addr + fb->offsets[2] + src_y * fb->pitches[2] +
188 src_x;
189
190 armada_reg_queue_set(dplane->vbl.regs, idx, sy,
191 LCD_SPU_DMA_START_ADDR_Y0);
192 armada_reg_queue_set(dplane->vbl.regs, idx, su,
193 LCD_SPU_DMA_START_ADDR_U0);
194 armada_reg_queue_set(dplane->vbl.regs, idx, sv,
195 LCD_SPU_DMA_START_ADDR_V0);
196 armada_reg_queue_set(dplane->vbl.regs, idx, sy,
197 LCD_SPU_DMA_START_ADDR_Y1);
198 armada_reg_queue_set(dplane->vbl.regs, idx, su,
199 LCD_SPU_DMA_START_ADDR_U1);
200 armada_reg_queue_set(dplane->vbl.regs, idx, sv,
201 LCD_SPU_DMA_START_ADDR_V1);
202
203 val = fb->pitches[0] << 16 | fb->pitches[0];
204 armada_reg_queue_set(dplane->vbl.regs, idx, val,
205 LCD_SPU_DMA_PITCH_YC);
206 val = fb->pitches[1] << 16 | fb->pitches[2];
207 armada_reg_queue_set(dplane->vbl.regs, idx, val,
208 LCD_SPU_DMA_PITCH_UV);
209 }
210
211 val = (src_h & 0xffff0000) | src_w >> 16;
212 if (dplane->src_hw != val) {
213 dplane->src_hw = val;
214 armada_reg_queue_set(dplane->vbl.regs, idx, val,
215 LCD_SPU_DMA_HPXL_VLN);
216 }
217 val = crtc_h << 16 | crtc_w;
218 if (dplane->dst_hw != val) {
219 dplane->dst_hw = val;
220 armada_reg_queue_set(dplane->vbl.regs, idx, val,
221 LCD_SPU_DZM_HPXL_VLN);
222 }
223 val = crtc_y << 16 | crtc_x;
224 if (dplane->dst_yx != val) {
225 dplane->dst_yx = val;
226 armada_reg_queue_set(dplane->vbl.regs, idx, val,
227 LCD_SPU_DMA_OVSA_HPXL_VLN);
228 }
229 if (dplane->ctrl0 != ctrl0) {
230 dplane->ctrl0 = ctrl0;
231 armada_reg_queue_mod(dplane->vbl.regs, idx, ctrl0,
232 CFG_CBSH_ENA | CFG_DMAFORMAT | CFG_DMA_FTOGGLE |
233 CFG_DMA_HSMOOTH | CFG_DMA_TSTMODE |
234 CFG_DMA_MOD(CFG_SWAPRB | CFG_SWAPUV | CFG_SWAPYU |
235 CFG_YUV2RGB) | CFG_DMA_ENA,
236 LCD_SPU_DMA_CTRL0);
237 }
238 if (idx) {
239 armada_reg_queue_end(dplane->vbl.regs, idx);
240 armada_drm_vbl_event_add(dcrtc, &dplane->vbl.update);
241 }
242 return 0;
243}
244
245static int armada_plane_disable(struct drm_plane *plane)
246{
247 struct armada_plane *dplane = drm_to_armada_plane(plane);
248 struct drm_framebuffer *fb;
249 struct armada_crtc *dcrtc;
250
251 if (!dplane->base.crtc)
252 return 0;
253
254 dcrtc = drm_to_armada_crtc(dplane->base.crtc);
255 dcrtc->plane = NULL;
256
257 spin_lock_irq(&dcrtc->irq_lock);
258 armada_drm_vbl_event_remove(dcrtc, &dplane->vbl.update);
259 armada_updatel(0, CFG_DMA_ENA, dcrtc->base + LCD_SPU_DMA_CTRL0);
260 dplane->ctrl0 = 0;
261 spin_unlock_irq(&dcrtc->irq_lock);
262
263 /* Power down the Y/U/V FIFOs */
264 armada_updatel(CFG_PDWN16x66 | CFG_PDWN32x66, 0,
265 dcrtc->base + LCD_SPU_SRAM_PARA1);
266
267 if (plane->fb)
268 drm_framebuffer_unreference(plane->fb);
269
270 spin_lock_irq(&dplane->lock);
271 fb = dplane->old_fb;
272 dplane->old_fb = NULL;
273 spin_unlock_irq(&dplane->lock);
274 if (fb)
275 drm_framebuffer_unreference(fb);
276
277 return 0;
278}
279
280static void armada_plane_destroy(struct drm_plane *plane)
281{
282 kfree(plane);
283}
284
285static int armada_plane_set_property(struct drm_plane *plane,
286 struct drm_property *property, uint64_t val)
287{
288 struct armada_private *priv = plane->dev->dev_private;
289 struct armada_plane *dplane = drm_to_armada_plane(plane);
290 bool update_attr = false;
291
292 if (property == priv->colorkey_prop) {
293#define CCC(v) ((v) << 24 | (v) << 16 | (v) << 8)
294 dplane->prop.colorkey_yr = CCC(K2R(val));
295 dplane->prop.colorkey_ug = CCC(K2G(val));
296 dplane->prop.colorkey_vb = CCC(K2B(val));
297#undef CCC
298 update_attr = true;
299 } else if (property == priv->colorkey_min_prop) {
300 dplane->prop.colorkey_yr &= ~0x00ff0000;
301 dplane->prop.colorkey_yr |= K2R(val) << 16;
302 dplane->prop.colorkey_ug &= ~0x00ff0000;
303 dplane->prop.colorkey_ug |= K2G(val) << 16;
304 dplane->prop.colorkey_vb &= ~0x00ff0000;
305 dplane->prop.colorkey_vb |= K2B(val) << 16;
306 update_attr = true;
307 } else if (property == priv->colorkey_max_prop) {
308 dplane->prop.colorkey_yr &= ~0xff000000;
309 dplane->prop.colorkey_yr |= K2R(val) << 24;
310 dplane->prop.colorkey_ug &= ~0xff000000;
311 dplane->prop.colorkey_ug |= K2G(val) << 24;
312 dplane->prop.colorkey_vb &= ~0xff000000;
313 dplane->prop.colorkey_vb |= K2B(val) << 24;
314 update_attr = true;
315 } else if (property == priv->colorkey_val_prop) {
316 dplane->prop.colorkey_yr &= ~0x0000ff00;
317 dplane->prop.colorkey_yr |= K2R(val) << 8;
318 dplane->prop.colorkey_ug &= ~0x0000ff00;
319 dplane->prop.colorkey_ug |= K2G(val) << 8;
320 dplane->prop.colorkey_vb &= ~0x0000ff00;
321 dplane->prop.colorkey_vb |= K2B(val) << 8;
322 update_attr = true;
323 } else if (property == priv->colorkey_alpha_prop) {
324 dplane->prop.colorkey_yr &= ~0x000000ff;
325 dplane->prop.colorkey_yr |= K2R(val);
326 dplane->prop.colorkey_ug &= ~0x000000ff;
327 dplane->prop.colorkey_ug |= K2G(val);
328 dplane->prop.colorkey_vb &= ~0x000000ff;
329 dplane->prop.colorkey_vb |= K2B(val);
330 update_attr = true;
331 } else if (property == priv->colorkey_mode_prop) {
332 dplane->prop.colorkey_mode &= ~CFG_CKMODE_MASK;
333 dplane->prop.colorkey_mode |= CFG_CKMODE(val);
334 update_attr = true;
335 } else if (property == priv->brightness_prop) {
336 dplane->prop.brightness = val - 256;
337 update_attr = true;
338 } else if (property == priv->contrast_prop) {
339 dplane->prop.contrast = val;
340 update_attr = true;
341 } else if (property == priv->saturation_prop) {
342 dplane->prop.saturation = val;
343 update_attr = true;
344 }
345
346 if (update_attr && dplane->base.crtc)
347 armada_ovl_update_attr(&dplane->prop,
348 drm_to_armada_crtc(dplane->base.crtc));
349
350 return 0;
351}
352
353static const struct drm_plane_funcs armada_plane_funcs = {
354 .update_plane = armada_plane_update,
355 .disable_plane = armada_plane_disable,
356 .destroy = armada_plane_destroy,
357 .set_property = armada_plane_set_property,
358};
359
360static const uint32_t armada_formats[] = {
361 DRM_FORMAT_UYVY,
362 DRM_FORMAT_YUYV,
363 DRM_FORMAT_YUV420,
364 DRM_FORMAT_YVU420,
365 DRM_FORMAT_YUV422,
366 DRM_FORMAT_YVU422,
367 DRM_FORMAT_VYUY,
368 DRM_FORMAT_YVYU,
369 DRM_FORMAT_ARGB8888,
370 DRM_FORMAT_ABGR8888,
371 DRM_FORMAT_XRGB8888,
372 DRM_FORMAT_XBGR8888,
373 DRM_FORMAT_RGB888,
374 DRM_FORMAT_BGR888,
375 DRM_FORMAT_ARGB1555,
376 DRM_FORMAT_ABGR1555,
377 DRM_FORMAT_RGB565,
378 DRM_FORMAT_BGR565,
379};
380
381static struct drm_prop_enum_list armada_drm_colorkey_enum_list[] = {
382 { CKMODE_DISABLE, "disabled" },
383 { CKMODE_Y, "Y component" },
384 { CKMODE_U, "U component" },
385 { CKMODE_V, "V component" },
386 { CKMODE_RGB, "RGB" },
387 { CKMODE_R, "R component" },
388 { CKMODE_G, "G component" },
389 { CKMODE_B, "B component" },
390};
391
392static int armada_overlay_create_properties(struct drm_device *dev)
393{
394 struct armada_private *priv = dev->dev_private;
395
396 if (priv->colorkey_prop)
397 return 0;
398
399 priv->colorkey_prop = drm_property_create_range(dev, 0,
400 "colorkey", 0, 0xffffff);
401 priv->colorkey_min_prop = drm_property_create_range(dev, 0,
402 "colorkey_min", 0, 0xffffff);
403 priv->colorkey_max_prop = drm_property_create_range(dev, 0,
404 "colorkey_max", 0, 0xffffff);
405 priv->colorkey_val_prop = drm_property_create_range(dev, 0,
406 "colorkey_val", 0, 0xffffff);
407 priv->colorkey_alpha_prop = drm_property_create_range(dev, 0,
408 "colorkey_alpha", 0, 0xffffff);
409 priv->colorkey_mode_prop = drm_property_create_enum(dev, 0,
410 "colorkey_mode",
411 armada_drm_colorkey_enum_list,
412 ARRAY_SIZE(armada_drm_colorkey_enum_list));
413 priv->brightness_prop = drm_property_create_range(dev, 0,
414 "brightness", 0, 256 + 255);
415 priv->contrast_prop = drm_property_create_range(dev, 0,
416 "contrast", 0, 0x7fff);
417 priv->saturation_prop = drm_property_create_range(dev, 0,
418 "saturation", 0, 0x7fff);
419
420 if (!priv->colorkey_prop)
421 return -ENOMEM;
422
423 return 0;
424}
425
426int armada_overlay_plane_create(struct drm_device *dev, unsigned long crtcs)
427{
428 struct armada_private *priv = dev->dev_private;
429 struct drm_mode_object *mobj;
430 struct armada_plane *dplane;
431 int ret;
432
433 ret = armada_overlay_create_properties(dev);
434 if (ret)
435 return ret;
436
437 dplane = kzalloc(sizeof(*dplane), GFP_KERNEL);
438 if (!dplane)
439 return -ENOMEM;
440
441 spin_lock_init(&dplane->lock);
442 init_waitqueue_head(&dplane->vbl.wait);
443 armada_drm_vbl_event_init(&dplane->vbl.update, armada_plane_vbl,
444 dplane);
445
446 drm_plane_init(dev, &dplane->base, crtcs, &armada_plane_funcs,
447 armada_formats, ARRAY_SIZE(armada_formats), false);
448
449 dplane->prop.colorkey_yr = 0xfefefe00;
450 dplane->prop.colorkey_ug = 0x01010100;
451 dplane->prop.colorkey_vb = 0x01010100;
452 dplane->prop.colorkey_mode = CFG_CKMODE(CKMODE_RGB);
453 dplane->prop.brightness = 0;
454 dplane->prop.contrast = 0x4000;
455 dplane->prop.saturation = 0x4000;
456
457 mobj = &dplane->base.base;
458 drm_object_attach_property(mobj, priv->colorkey_prop,
459 0x0101fe);
460 drm_object_attach_property(mobj, priv->colorkey_min_prop,
461 0x0101fe);
462 drm_object_attach_property(mobj, priv->colorkey_max_prop,
463 0x0101fe);
464 drm_object_attach_property(mobj, priv->colorkey_val_prop,
465 0x0101fe);
466 drm_object_attach_property(mobj, priv->colorkey_alpha_prop,
467 0x000000);
468 drm_object_attach_property(mobj, priv->colorkey_mode_prop,
469 CKMODE_RGB);
470 drm_object_attach_property(mobj, priv->brightness_prop, 256);
471 drm_object_attach_property(mobj, priv->contrast_prop,
472 dplane->prop.contrast);
473 drm_object_attach_property(mobj, priv->saturation_prop,
474 dplane->prop.saturation);
475
476 return 0;
477}
diff --git a/drivers/gpu/drm/armada/armada_slave.c b/drivers/gpu/drm/armada/armada_slave.c
new file mode 100644
index 000000000000..00d0facb42f3
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_slave.c
@@ -0,0 +1,139 @@
1/*
2 * Copyright (C) 2012 Russell King
3 * Rewritten from the dovefb driver, and Armada510 manuals.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9#include <drm/drmP.h>
10#include <drm/drm_crtc_helper.h>
11#include <drm/drm_edid.h>
12#include <drm/drm_encoder_slave.h>
13#include "armada_drm.h"
14#include "armada_output.h"
15#include "armada_slave.h"
16
17static int armada_drm_slave_get_modes(struct drm_connector *conn)
18{
19 struct drm_encoder *enc = armada_drm_connector_encoder(conn);
20 int count = 0;
21
22 if (enc) {
23 struct drm_encoder_slave *slave = to_encoder_slave(enc);
24
25 count = slave->slave_funcs->get_modes(enc, conn);
26 }
27
28 return count;
29}
30
31static void armada_drm_slave_destroy(struct drm_encoder *enc)
32{
33 struct drm_encoder_slave *slave = to_encoder_slave(enc);
34 struct i2c_client *client = drm_i2c_encoder_get_client(enc);
35
36 if (slave->slave_funcs)
37 slave->slave_funcs->destroy(enc);
38 if (client)
39 i2c_put_adapter(client->adapter);
40
41 drm_encoder_cleanup(&slave->base);
42 kfree(slave);
43}
44
45static const struct drm_encoder_funcs armada_drm_slave_encoder_funcs = {
46 .destroy = armada_drm_slave_destroy,
47};
48
49static const struct drm_connector_helper_funcs armada_drm_slave_helper_funcs = {
50 .get_modes = armada_drm_slave_get_modes,
51 .mode_valid = armada_drm_slave_encoder_mode_valid,
52 .best_encoder = armada_drm_connector_encoder,
53};
54
55static const struct drm_encoder_helper_funcs drm_slave_encoder_helpers = {
56 .dpms = drm_i2c_encoder_dpms,
57 .save = drm_i2c_encoder_save,
58 .restore = drm_i2c_encoder_restore,
59 .mode_fixup = drm_i2c_encoder_mode_fixup,
60 .prepare = drm_i2c_encoder_prepare,
61 .commit = drm_i2c_encoder_commit,
62 .mode_set = drm_i2c_encoder_mode_set,
63 .detect = drm_i2c_encoder_detect,
64};
65
66static int
67armada_drm_conn_slave_create(struct drm_connector *conn, const void *data)
68{
69 const struct armada_drm_slave_config *config = data;
70 struct drm_encoder_slave *slave;
71 struct i2c_adapter *adap;
72 int ret;
73
74 conn->interlace_allowed = config->interlace_allowed;
75 conn->doublescan_allowed = config->doublescan_allowed;
76 conn->polled = config->polled;
77
78 drm_connector_helper_add(conn, &armada_drm_slave_helper_funcs);
79
80 slave = kzalloc(sizeof(*slave), GFP_KERNEL);
81 if (!slave)
82 return -ENOMEM;
83
84 slave->base.possible_crtcs = config->crtcs;
85
86 adap = i2c_get_adapter(config->i2c_adapter_id);
87 if (!adap) {
88 kfree(slave);
89 return -EPROBE_DEFER;
90 }
91
92 ret = drm_encoder_init(conn->dev, &slave->base,
93 &armada_drm_slave_encoder_funcs,
94 DRM_MODE_ENCODER_TMDS);
95 if (ret) {
96 DRM_ERROR("unable to init encoder\n");
97 i2c_put_adapter(adap);
98 kfree(slave);
99 return ret;
100 }
101
102 ret = drm_i2c_encoder_init(conn->dev, slave, adap, &config->info);
103 i2c_put_adapter(adap);
104 if (ret) {
105 DRM_ERROR("unable to init encoder slave\n");
106 armada_drm_slave_destroy(&slave->base);
107 return ret;
108 }
109
110 drm_encoder_helper_add(&slave->base, &drm_slave_encoder_helpers);
111
112 ret = slave->slave_funcs->create_resources(&slave->base, conn);
113 if (ret) {
114 armada_drm_slave_destroy(&slave->base);
115 return ret;
116 }
117
118 ret = drm_mode_connector_attach_encoder(conn, &slave->base);
119 if (ret) {
120 armada_drm_slave_destroy(&slave->base);
121 return ret;
122 }
123
124 conn->encoder = &slave->base;
125
126 return ret;
127}
128
129static const struct armada_output_type armada_drm_conn_slave = {
130 .connector_type = DRM_MODE_CONNECTOR_HDMIA,
131 .create = armada_drm_conn_slave_create,
132 .set_property = armada_drm_slave_encoder_set_property,
133};
134
135int armada_drm_connector_slave_create(struct drm_device *dev,
136 const struct armada_drm_slave_config *config)
137{
138 return armada_output_create(dev, &armada_drm_conn_slave, config);
139}
diff --git a/drivers/gpu/drm/armada/armada_slave.h b/drivers/gpu/drm/armada/armada_slave.h
new file mode 100644
index 000000000000..bf2374c96fc1
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_slave.h
@@ -0,0 +1,26 @@
1/*
2 * Copyright (C) 2012 Russell King
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8#ifndef ARMADA_SLAVE_H
9#define ARMADA_SLAVE_H
10
11#include <linux/i2c.h>
12#include <drm/drmP.h>
13
14struct armada_drm_slave_config {
15 int i2c_adapter_id;
16 uint32_t crtcs;
17 uint8_t polled;
18 bool interlace_allowed;
19 bool doublescan_allowed;
20 struct i2c_board_info info;
21};
22
23int armada_drm_connector_slave_create(struct drm_device *dev,
24 const struct armada_drm_slave_config *);
25
26#endif
diff --git a/drivers/gpu/drm/ast/Kconfig b/drivers/gpu/drm/ast/Kconfig
index da4a51eae824..8a784c460c89 100644
--- a/drivers/gpu/drm/ast/Kconfig
+++ b/drivers/gpu/drm/ast/Kconfig
@@ -6,6 +6,7 @@ config DRM_AST
6 select FB_SYS_FILLRECT 6 select FB_SYS_FILLRECT
7 select FB_SYS_IMAGEBLIT 7 select FB_SYS_IMAGEBLIT
8 select DRM_KMS_HELPER 8 select DRM_KMS_HELPER
9 select DRM_KMS_FB_HELPER
9 select DRM_TTM 10 select DRM_TTM
10 help 11 help
11 Say yes for experimental AST GPU driver. Do not enable 12 Say yes for experimental AST GPU driver. Do not enable
diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
index 32e270dc714e..5137f15dba19 100644
--- a/drivers/gpu/drm/ast/ast_drv.c
+++ b/drivers/gpu/drm/ast/ast_drv.c
@@ -211,7 +211,6 @@ static struct drm_driver driver = {
211 .minor = DRIVER_MINOR, 211 .minor = DRIVER_MINOR,
212 .patchlevel = DRIVER_PATCHLEVEL, 212 .patchlevel = DRIVER_PATCHLEVEL,
213 213
214 .gem_init_object = ast_gem_init_object,
215 .gem_free_object = ast_gem_free_object, 214 .gem_free_object = ast_gem_free_object,
216 .dumb_create = ast_dumb_create, 215 .dumb_create = ast_dumb_create,
217 .dumb_map_offset = ast_dumb_mmap_offset, 216 .dumb_map_offset = ast_dumb_mmap_offset,
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index 8492b68e873c..9833a1b1acc1 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -323,7 +323,6 @@ extern int ast_dumb_create(struct drm_file *file,
323 struct drm_device *dev, 323 struct drm_device *dev,
324 struct drm_mode_create_dumb *args); 324 struct drm_mode_create_dumb *args);
325 325
326extern int ast_gem_init_object(struct drm_gem_object *obj);
327extern void ast_gem_free_object(struct drm_gem_object *obj); 326extern void ast_gem_free_object(struct drm_gem_object *obj);
328extern int ast_dumb_mmap_offset(struct drm_file *file, 327extern int ast_dumb_mmap_offset(struct drm_file *file,
329 struct drm_device *dev, 328 struct drm_device *dev,
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index 7f6152d374ca..af0b868a9dfd 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -449,12 +449,6 @@ int ast_dumb_create(struct drm_file *file,
449 return 0; 449 return 0;
450} 450}
451 451
452int ast_gem_init_object(struct drm_gem_object *obj)
453{
454 BUG();
455 return 0;
456}
457
458void ast_bo_unref(struct ast_bo **bo) 452void ast_bo_unref(struct ast_bo **bo)
459{ 453{
460 struct ttm_buffer_object *tbo; 454 struct ttm_buffer_object *tbo;
diff --git a/drivers/gpu/drm/cirrus/Kconfig b/drivers/gpu/drm/cirrus/Kconfig
index bf67b22723f9..9864559e5fb9 100644
--- a/drivers/gpu/drm/cirrus/Kconfig
+++ b/drivers/gpu/drm/cirrus/Kconfig
@@ -5,6 +5,7 @@ config DRM_CIRRUS_QEMU
5 select FB_SYS_COPYAREA 5 select FB_SYS_COPYAREA
6 select FB_SYS_IMAGEBLIT 6 select FB_SYS_IMAGEBLIT
7 select DRM_KMS_HELPER 7 select DRM_KMS_HELPER
8 select DRM_KMS_FB_HELPER
8 select DRM_TTM 9 select DRM_TTM
9 help 10 help
10 This is a KMS driver for emulated cirrus device in qemu. 11 This is a KMS driver for emulated cirrus device in qemu.
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.c b/drivers/gpu/drm/cirrus/cirrus_drv.c
index 138364d91782..953fc8aea69c 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.c
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.c
@@ -97,7 +97,6 @@ static struct drm_driver driver = {
97 .major = DRIVER_MAJOR, 97 .major = DRIVER_MAJOR,
98 .minor = DRIVER_MINOR, 98 .minor = DRIVER_MINOR,
99 .patchlevel = DRIVER_PATCHLEVEL, 99 .patchlevel = DRIVER_PATCHLEVEL,
100 .gem_init_object = cirrus_gem_init_object,
101 .gem_free_object = cirrus_gem_free_object, 100 .gem_free_object = cirrus_gem_free_object,
102 .dumb_create = cirrus_dumb_create, 101 .dumb_create = cirrus_dumb_create,
103 .dumb_map_offset = cirrus_dumb_mmap_offset, 102 .dumb_map_offset = cirrus_dumb_mmap_offset,
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.h b/drivers/gpu/drm/cirrus/cirrus_drv.h
index 9b0bb9184afd..b6aded73838b 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.h
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.h
@@ -191,7 +191,6 @@ int cirrus_device_init(struct cirrus_device *cdev,
191 struct pci_dev *pdev, 191 struct pci_dev *pdev,
192 uint32_t flags); 192 uint32_t flags);
193void cirrus_device_fini(struct cirrus_device *cdev); 193void cirrus_device_fini(struct cirrus_device *cdev);
194int cirrus_gem_init_object(struct drm_gem_object *obj);
195void cirrus_gem_free_object(struct drm_gem_object *obj); 194void cirrus_gem_free_object(struct drm_gem_object *obj);
196int cirrus_dumb_mmap_offset(struct drm_file *file, 195int cirrus_dumb_mmap_offset(struct drm_file *file,
197 struct drm_device *dev, 196 struct drm_device *dev,
diff --git a/drivers/gpu/drm/cirrus/cirrus_main.c b/drivers/gpu/drm/cirrus/cirrus_main.c
index f130a533a512..78e76f24343d 100644
--- a/drivers/gpu/drm/cirrus/cirrus_main.c
+++ b/drivers/gpu/drm/cirrus/cirrus_main.c
@@ -255,12 +255,6 @@ int cirrus_dumb_create(struct drm_file *file,
255 return 0; 255 return 0;
256} 256}
257 257
258int cirrus_gem_init_object(struct drm_gem_object *obj)
259{
260 BUG();
261 return 0;
262}
263
264void cirrus_bo_unref(struct cirrus_bo **bo) 258void cirrus_bo_unref(struct cirrus_bo **bo)
265{ 259{
266 struct ttm_buffer_object *tbo; 260 struct ttm_buffer_object *tbo;
diff --git a/drivers/gpu/drm/cirrus/cirrus_mode.c b/drivers/gpu/drm/cirrus/cirrus_mode.c
index 60685b21cc36..adabc3daaa5b 100644
--- a/drivers/gpu/drm/cirrus/cirrus_mode.c
+++ b/drivers/gpu/drm/cirrus/cirrus_mode.c
@@ -494,13 +494,12 @@ static struct drm_encoder *cirrus_encoder_init(struct drm_device *dev)
494 494
495int cirrus_vga_get_modes(struct drm_connector *connector) 495int cirrus_vga_get_modes(struct drm_connector *connector)
496{ 496{
497 /* Just add a static list of modes */ 497 int count;
498 drm_add_modes_noedid(connector, 640, 480);
499 drm_add_modes_noedid(connector, 800, 600);
500 drm_add_modes_noedid(connector, 1024, 768);
501 drm_add_modes_noedid(connector, 1280, 1024);
502 498
503 return 4; 499 /* Just add a static list of modes */
500 count = drm_add_modes_noedid(connector, 1280, 1024);
501 drm_set_preferred_mode(connector, 1024, 768);
502 return count;
504} 503}
505 504
506static int cirrus_vga_mode_valid(struct drm_connector *connector, 505static int cirrus_vga_mode_valid(struct drm_connector *connector,
diff --git a/drivers/gpu/drm/drm_context.c b/drivers/gpu/drm/drm_context.c
index 224ff965bcf7..a4b017b6849e 100644
--- a/drivers/gpu/drm/drm_context.c
+++ b/drivers/gpu/drm/drm_context.c
@@ -334,7 +334,6 @@ int drm_addctx(struct drm_device *dev, void *data,
334 334
335 mutex_lock(&dev->ctxlist_mutex); 335 mutex_lock(&dev->ctxlist_mutex);
336 list_add(&ctx_entry->head, &dev->ctxlist); 336 list_add(&ctx_entry->head, &dev->ctxlist);
337 ++dev->ctx_count;
338 mutex_unlock(&dev->ctxlist_mutex); 337 mutex_unlock(&dev->ctxlist_mutex);
339 338
340 return 0; 339 return 0;
@@ -432,7 +431,6 @@ int drm_rmctx(struct drm_device *dev, void *data,
432 if (pos->handle == ctx->handle) { 431 if (pos->handle == ctx->handle) {
433 list_del(&pos->head); 432 list_del(&pos->head);
434 kfree(pos); 433 kfree(pos);
435 --dev->ctx_count;
436 } 434 }
437 } 435 }
438 } 436 }
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index bff2fa941f60..d6cf77c472e7 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -202,6 +202,7 @@ static struct drm_conn_prop_enum_list drm_connector_enum_list[] =
202 { DRM_MODE_CONNECTOR_TV, "TV" }, 202 { DRM_MODE_CONNECTOR_TV, "TV" },
203 { DRM_MODE_CONNECTOR_eDP, "eDP" }, 203 { DRM_MODE_CONNECTOR_eDP, "eDP" },
204 { DRM_MODE_CONNECTOR_VIRTUAL, "Virtual" }, 204 { DRM_MODE_CONNECTOR_VIRTUAL, "Virtual" },
205 { DRM_MODE_CONNECTOR_DSI, "DSI" },
205}; 206};
206 207
207static const struct drm_prop_enum_list drm_encoder_enum_list[] = 208static const struct drm_prop_enum_list drm_encoder_enum_list[] =
@@ -211,6 +212,7 @@ static const struct drm_prop_enum_list drm_encoder_enum_list[] =
211 { DRM_MODE_ENCODER_LVDS, "LVDS" }, 212 { DRM_MODE_ENCODER_LVDS, "LVDS" },
212 { DRM_MODE_ENCODER_TVDAC, "TV" }, 213 { DRM_MODE_ENCODER_TVDAC, "TV" },
213 { DRM_MODE_ENCODER_VIRTUAL, "Virtual" }, 214 { DRM_MODE_ENCODER_VIRTUAL, "Virtual" },
215 { DRM_MODE_ENCODER_DSI, "DSI" },
214}; 216};
215 217
216void drm_connector_ida_init(void) 218void drm_connector_ida_init(void)
@@ -1301,7 +1303,7 @@ static void drm_crtc_convert_to_umode(struct drm_mode_modeinfo *out,
1301} 1303}
1302 1304
1303/** 1305/**
1304 * drm_crtc_convert_to_umode - convert a modeinfo into a drm_display_mode 1306 * drm_crtc_convert_umode - convert a modeinfo into a drm_display_mode
1305 * @out: drm_display_mode to return to the user 1307 * @out: drm_display_mode to return to the user
1306 * @in: drm_mode_modeinfo to use 1308 * @in: drm_mode_modeinfo to use
1307 * 1309 *
@@ -1317,6 +1319,9 @@ static int drm_crtc_convert_umode(struct drm_display_mode *out,
1317 if (in->clock > INT_MAX || in->vrefresh > INT_MAX) 1319 if (in->clock > INT_MAX || in->vrefresh > INT_MAX)
1318 return -ERANGE; 1320 return -ERANGE;
1319 1321
1322 if ((in->flags & DRM_MODE_FLAG_3D_MASK) > DRM_MODE_FLAG_3D_MAX)
1323 return -EINVAL;
1324
1320 out->clock = in->clock; 1325 out->clock = in->clock;
1321 out->hdisplay = in->hdisplay; 1326 out->hdisplay = in->hdisplay;
1322 out->hsync_start = in->hsync_start; 1327 out->hsync_start = in->hsync_start;
@@ -1552,7 +1557,7 @@ int drm_mode_getcrtc(struct drm_device *dev,
1552 obj = drm_mode_object_find(dev, crtc_resp->crtc_id, 1557 obj = drm_mode_object_find(dev, crtc_resp->crtc_id,
1553 DRM_MODE_OBJECT_CRTC); 1558 DRM_MODE_OBJECT_CRTC);
1554 if (!obj) { 1559 if (!obj) {
1555 ret = -EINVAL; 1560 ret = -ENOENT;
1556 goto out; 1561 goto out;
1557 } 1562 }
1558 crtc = obj_to_crtc(obj); 1563 crtc = obj_to_crtc(obj);
@@ -1579,6 +1584,19 @@ out:
1579 return ret; 1584 return ret;
1580} 1585}
1581 1586
1587static bool drm_mode_expose_to_userspace(const struct drm_display_mode *mode,
1588 const struct drm_file *file_priv)
1589{
1590 /*
1591 * If user-space hasn't configured the driver to expose the stereo 3D
1592 * modes, don't expose them.
1593 */
1594 if (!file_priv->stereo_allowed && drm_mode_is_stereo(mode))
1595 return false;
1596
1597 return true;
1598}
1599
1582/** 1600/**
1583 * drm_mode_getconnector - get connector configuration 1601 * drm_mode_getconnector - get connector configuration
1584 * @dev: drm device for the ioctl 1602 * @dev: drm device for the ioctl
@@ -1623,7 +1641,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
1623 obj = drm_mode_object_find(dev, out_resp->connector_id, 1641 obj = drm_mode_object_find(dev, out_resp->connector_id,
1624 DRM_MODE_OBJECT_CONNECTOR); 1642 DRM_MODE_OBJECT_CONNECTOR);
1625 if (!obj) { 1643 if (!obj) {
1626 ret = -EINVAL; 1644 ret = -ENOENT;
1627 goto out; 1645 goto out;
1628 } 1646 }
1629 connector = obj_to_connector(obj); 1647 connector = obj_to_connector(obj);
@@ -1644,7 +1662,8 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
1644 1662
1645 /* delayed so we get modes regardless of pre-fill_modes state */ 1663 /* delayed so we get modes regardless of pre-fill_modes state */
1646 list_for_each_entry(mode, &connector->modes, head) 1664 list_for_each_entry(mode, &connector->modes, head)
1647 mode_count++; 1665 if (drm_mode_expose_to_userspace(mode, file_priv))
1666 mode_count++;
1648 1667
1649 out_resp->connector_id = connector->base.id; 1668 out_resp->connector_id = connector->base.id;
1650 out_resp->connector_type = connector->connector_type; 1669 out_resp->connector_type = connector->connector_type;
@@ -1666,6 +1685,9 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
1666 copied = 0; 1685 copied = 0;
1667 mode_ptr = (struct drm_mode_modeinfo __user *)(unsigned long)out_resp->modes_ptr; 1686 mode_ptr = (struct drm_mode_modeinfo __user *)(unsigned long)out_resp->modes_ptr;
1668 list_for_each_entry(mode, &connector->modes, head) { 1687 list_for_each_entry(mode, &connector->modes, head) {
1688 if (!drm_mode_expose_to_userspace(mode, file_priv))
1689 continue;
1690
1669 drm_crtc_convert_to_umode(&u_mode, mode); 1691 drm_crtc_convert_to_umode(&u_mode, mode);
1670 if (copy_to_user(mode_ptr + copied, 1692 if (copy_to_user(mode_ptr + copied,
1671 &u_mode, sizeof(u_mode))) { 1693 &u_mode, sizeof(u_mode))) {
@@ -1735,7 +1757,7 @@ int drm_mode_getencoder(struct drm_device *dev, void *data,
1735 obj = drm_mode_object_find(dev, enc_resp->encoder_id, 1757 obj = drm_mode_object_find(dev, enc_resp->encoder_id,
1736 DRM_MODE_OBJECT_ENCODER); 1758 DRM_MODE_OBJECT_ENCODER);
1737 if (!obj) { 1759 if (!obj) {
1738 ret = -EINVAL; 1760 ret = -ENOENT;
1739 goto out; 1761 goto out;
1740 } 1762 }
1741 encoder = obj_to_encoder(obj); 1763 encoder = obj_to_encoder(obj);
@@ -2040,6 +2062,45 @@ int drm_mode_set_config_internal(struct drm_mode_set *set)
2040} 2062}
2041EXPORT_SYMBOL(drm_mode_set_config_internal); 2063EXPORT_SYMBOL(drm_mode_set_config_internal);
2042 2064
2065/*
2066 * Checks that the framebuffer is big enough for the CRTC viewport
2067 * (x, y, hdisplay, vdisplay)
2068 */
2069static int drm_crtc_check_viewport(const struct drm_crtc *crtc,
2070 int x, int y,
2071 const struct drm_display_mode *mode,
2072 const struct drm_framebuffer *fb)
2073
2074{
2075 int hdisplay, vdisplay;
2076
2077 hdisplay = mode->hdisplay;
2078 vdisplay = mode->vdisplay;
2079
2080 if (drm_mode_is_stereo(mode)) {
2081 struct drm_display_mode adjusted = *mode;
2082
2083 drm_mode_set_crtcinfo(&adjusted, CRTC_STEREO_DOUBLE);
2084 hdisplay = adjusted.crtc_hdisplay;
2085 vdisplay = adjusted.crtc_vdisplay;
2086 }
2087
2088 if (crtc->invert_dimensions)
2089 swap(hdisplay, vdisplay);
2090
2091 if (hdisplay > fb->width ||
2092 vdisplay > fb->height ||
2093 x > fb->width - hdisplay ||
2094 y > fb->height - vdisplay) {
2095 DRM_DEBUG_KMS("Invalid fb size %ux%u for CRTC viewport %ux%u+%d+%d%s.\n",
2096 fb->width, fb->height, hdisplay, vdisplay, x, y,
2097 crtc->invert_dimensions ? " (inverted)" : "");
2098 return -ENOSPC;
2099 }
2100
2101 return 0;
2102}
2103
2043/** 2104/**
2044 * drm_mode_setcrtc - set CRTC configuration 2105 * drm_mode_setcrtc - set CRTC configuration
2045 * @dev: drm device for the ioctl 2106 * @dev: drm device for the ioctl
@@ -2080,14 +2141,13 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
2080 DRM_MODE_OBJECT_CRTC); 2141 DRM_MODE_OBJECT_CRTC);
2081 if (!obj) { 2142 if (!obj) {
2082 DRM_DEBUG_KMS("Unknown CRTC ID %d\n", crtc_req->crtc_id); 2143 DRM_DEBUG_KMS("Unknown CRTC ID %d\n", crtc_req->crtc_id);
2083 ret = -EINVAL; 2144 ret = -ENOENT;
2084 goto out; 2145 goto out;
2085 } 2146 }
2086 crtc = obj_to_crtc(obj); 2147 crtc = obj_to_crtc(obj);
2087 DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id); 2148 DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
2088 2149
2089 if (crtc_req->mode_valid) { 2150 if (crtc_req->mode_valid) {
2090 int hdisplay, vdisplay;
2091 /* If we have a mode we need a framebuffer. */ 2151 /* If we have a mode we need a framebuffer. */
2092 /* If we pass -1, set the mode with the currently bound fb */ 2152 /* If we pass -1, set the mode with the currently bound fb */
2093 if (crtc_req->fb_id == -1) { 2153 if (crtc_req->fb_id == -1) {
@@ -2104,7 +2164,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
2104 if (!fb) { 2164 if (!fb) {
2105 DRM_DEBUG_KMS("Unknown FB ID%d\n", 2165 DRM_DEBUG_KMS("Unknown FB ID%d\n",
2106 crtc_req->fb_id); 2166 crtc_req->fb_id);
2107 ret = -EINVAL; 2167 ret = -ENOENT;
2108 goto out; 2168 goto out;
2109 } 2169 }
2110 } 2170 }
@@ -2123,23 +2183,11 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
2123 2183
2124 drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V); 2184 drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
2125 2185
2126 hdisplay = mode->hdisplay; 2186 ret = drm_crtc_check_viewport(crtc, crtc_req->x, crtc_req->y,
2127 vdisplay = mode->vdisplay; 2187 mode, fb);
2128 2188 if (ret)
2129 if (crtc->invert_dimensions)
2130 swap(hdisplay, vdisplay);
2131
2132 if (hdisplay > fb->width ||
2133 vdisplay > fb->height ||
2134 crtc_req->x > fb->width - hdisplay ||
2135 crtc_req->y > fb->height - vdisplay) {
2136 DRM_DEBUG_KMS("Invalid fb size %ux%u for CRTC viewport %ux%u+%d+%d%s.\n",
2137 fb->width, fb->height,
2138 hdisplay, vdisplay, crtc_req->x, crtc_req->y,
2139 crtc->invert_dimensions ? " (inverted)" : "");
2140 ret = -ENOSPC;
2141 goto out; 2189 goto out;
2142 } 2190
2143 } 2191 }
2144 2192
2145 if (crtc_req->count_connectors == 0 && mode) { 2193 if (crtc_req->count_connectors == 0 && mode) {
@@ -2184,7 +2232,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
2184 if (!obj) { 2232 if (!obj) {
2185 DRM_DEBUG_KMS("Connector id %d unknown\n", 2233 DRM_DEBUG_KMS("Connector id %d unknown\n",
2186 out_id); 2234 out_id);
2187 ret = -EINVAL; 2235 ret = -ENOENT;
2188 goto out; 2236 goto out;
2189 } 2237 }
2190 connector = obj_to_connector(obj); 2238 connector = obj_to_connector(obj);
@@ -2232,7 +2280,7 @@ static int drm_mode_cursor_common(struct drm_device *dev,
2232 obj = drm_mode_object_find(dev, req->crtc_id, DRM_MODE_OBJECT_CRTC); 2280 obj = drm_mode_object_find(dev, req->crtc_id, DRM_MODE_OBJECT_CRTC);
2233 if (!obj) { 2281 if (!obj) {
2234 DRM_DEBUG_KMS("Unknown CRTC ID %d\n", req->crtc_id); 2282 DRM_DEBUG_KMS("Unknown CRTC ID %d\n", req->crtc_id);
2235 return -EINVAL; 2283 return -ENOENT;
2236 } 2284 }
2237 crtc = obj_to_crtc(obj); 2285 crtc = obj_to_crtc(obj);
2238 2286
@@ -2441,6 +2489,8 @@ static int format_check(const struct drm_mode_fb_cmd2 *r)
2441 case DRM_FORMAT_YVU444: 2489 case DRM_FORMAT_YVU444:
2442 return 0; 2490 return 0;
2443 default: 2491 default:
2492 DRM_DEBUG_KMS("invalid pixel format %s\n",
2493 drm_get_format_name(r->pixel_format));
2444 return -EINVAL; 2494 return -EINVAL;
2445 } 2495 }
2446} 2496}
@@ -2606,7 +2656,7 @@ fail_lookup:
2606 mutex_unlock(&dev->mode_config.fb_lock); 2656 mutex_unlock(&dev->mode_config.fb_lock);
2607 mutex_unlock(&file_priv->fbs_lock); 2657 mutex_unlock(&file_priv->fbs_lock);
2608 2658
2609 return -EINVAL; 2659 return -ENOENT;
2610} 2660}
2611 2661
2612/** 2662/**
@@ -2634,7 +2684,7 @@ int drm_mode_getfb(struct drm_device *dev,
2634 2684
2635 fb = drm_framebuffer_lookup(dev, r->fb_id); 2685 fb = drm_framebuffer_lookup(dev, r->fb_id);
2636 if (!fb) 2686 if (!fb)
2637 return -EINVAL; 2687 return -ENOENT;
2638 2688
2639 r->height = fb->height; 2689 r->height = fb->height;
2640 r->width = fb->width; 2690 r->width = fb->width;
@@ -2679,7 +2729,7 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
2679 2729
2680 fb = drm_framebuffer_lookup(dev, r->fb_id); 2730 fb = drm_framebuffer_lookup(dev, r->fb_id);
2681 if (!fb) 2731 if (!fb)
2682 return -EINVAL; 2732 return -ENOENT;
2683 2733
2684 num_clips = r->num_clips; 2734 num_clips = r->num_clips;
2685 clips_ptr = (struct drm_clip_rect __user *)(unsigned long)r->clips_ptr; 2735 clips_ptr = (struct drm_clip_rect __user *)(unsigned long)r->clips_ptr;
@@ -3011,7 +3061,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
3011 drm_modeset_lock_all(dev); 3061 drm_modeset_lock_all(dev);
3012 obj = drm_mode_object_find(dev, out_resp->prop_id, DRM_MODE_OBJECT_PROPERTY); 3062 obj = drm_mode_object_find(dev, out_resp->prop_id, DRM_MODE_OBJECT_PROPERTY);
3013 if (!obj) { 3063 if (!obj) {
3014 ret = -EINVAL; 3064 ret = -ENOENT;
3015 goto done; 3065 goto done;
3016 } 3066 }
3017 property = obj_to_property(obj); 3067 property = obj_to_property(obj);
@@ -3140,7 +3190,7 @@ int drm_mode_getblob_ioctl(struct drm_device *dev,
3140 drm_modeset_lock_all(dev); 3190 drm_modeset_lock_all(dev);
3141 obj = drm_mode_object_find(dev, out_resp->blob_id, DRM_MODE_OBJECT_BLOB); 3191 obj = drm_mode_object_find(dev, out_resp->blob_id, DRM_MODE_OBJECT_BLOB);
3142 if (!obj) { 3192 if (!obj) {
3143 ret = -EINVAL; 3193 ret = -ENOENT;
3144 goto done; 3194 goto done;
3145 } 3195 }
3146 blob = obj_to_blob(obj); 3196 blob = obj_to_blob(obj);
@@ -3301,7 +3351,7 @@ int drm_mode_obj_get_properties_ioctl(struct drm_device *dev, void *data,
3301 3351
3302 obj = drm_mode_object_find(dev, arg->obj_id, arg->obj_type); 3352 obj = drm_mode_object_find(dev, arg->obj_id, arg->obj_type);
3303 if (!obj) { 3353 if (!obj) {
3304 ret = -EINVAL; 3354 ret = -ENOENT;
3305 goto out; 3355 goto out;
3306 } 3356 }
3307 if (!obj->properties) { 3357 if (!obj->properties) {
@@ -3354,8 +3404,10 @@ int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data,
3354 drm_modeset_lock_all(dev); 3404 drm_modeset_lock_all(dev);
3355 3405
3356 arg_obj = drm_mode_object_find(dev, arg->obj_id, arg->obj_type); 3406 arg_obj = drm_mode_object_find(dev, arg->obj_id, arg->obj_type);
3357 if (!arg_obj) 3407 if (!arg_obj) {
3408 ret = -ENOENT;
3358 goto out; 3409 goto out;
3410 }
3359 if (!arg_obj->properties) 3411 if (!arg_obj->properties)
3360 goto out; 3412 goto out;
3361 3413
@@ -3368,8 +3420,10 @@ int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data,
3368 3420
3369 prop_obj = drm_mode_object_find(dev, arg->prop_id, 3421 prop_obj = drm_mode_object_find(dev, arg->prop_id,
3370 DRM_MODE_OBJECT_PROPERTY); 3422 DRM_MODE_OBJECT_PROPERTY);
3371 if (!prop_obj) 3423 if (!prop_obj) {
3424 ret = -ENOENT;
3372 goto out; 3425 goto out;
3426 }
3373 property = obj_to_property(prop_obj); 3427 property = obj_to_property(prop_obj);
3374 3428
3375 if (!drm_property_change_is_valid(property, arg->value)) 3429 if (!drm_property_change_is_valid(property, arg->value))
@@ -3454,7 +3508,7 @@ int drm_mode_gamma_set_ioctl(struct drm_device *dev,
3454 drm_modeset_lock_all(dev); 3508 drm_modeset_lock_all(dev);
3455 obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC); 3509 obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC);
3456 if (!obj) { 3510 if (!obj) {
3457 ret = -EINVAL; 3511 ret = -ENOENT;
3458 goto out; 3512 goto out;
3459 } 3513 }
3460 crtc = obj_to_crtc(obj); 3514 crtc = obj_to_crtc(obj);
@@ -3513,7 +3567,7 @@ int drm_mode_gamma_get_ioctl(struct drm_device *dev,
3513 drm_modeset_lock_all(dev); 3567 drm_modeset_lock_all(dev);
3514 obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC); 3568 obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC);
3515 if (!obj) { 3569 if (!obj) {
3516 ret = -EINVAL; 3570 ret = -ENOENT;
3517 goto out; 3571 goto out;
3518 } 3572 }
3519 crtc = obj_to_crtc(obj); 3573 crtc = obj_to_crtc(obj);
@@ -3556,7 +3610,6 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
3556 struct drm_framebuffer *fb = NULL, *old_fb = NULL; 3610 struct drm_framebuffer *fb = NULL, *old_fb = NULL;
3557 struct drm_pending_vblank_event *e = NULL; 3611 struct drm_pending_vblank_event *e = NULL;
3558 unsigned long flags; 3612 unsigned long flags;
3559 int hdisplay, vdisplay;
3560 int ret = -EINVAL; 3613 int ret = -EINVAL;
3561 3614
3562 if (page_flip->flags & ~DRM_MODE_PAGE_FLIP_FLAGS || 3615 if (page_flip->flags & ~DRM_MODE_PAGE_FLIP_FLAGS ||
@@ -3568,7 +3621,7 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
3568 3621
3569 obj = drm_mode_object_find(dev, page_flip->crtc_id, DRM_MODE_OBJECT_CRTC); 3622 obj = drm_mode_object_find(dev, page_flip->crtc_id, DRM_MODE_OBJECT_CRTC);
3570 if (!obj) 3623 if (!obj)
3571 return -EINVAL; 3624 return -ENOENT;
3572 crtc = obj_to_crtc(obj); 3625 crtc = obj_to_crtc(obj);
3573 3626
3574 mutex_lock(&crtc->mutex); 3627 mutex_lock(&crtc->mutex);
@@ -3585,25 +3638,14 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
3585 goto out; 3638 goto out;
3586 3639
3587 fb = drm_framebuffer_lookup(dev, page_flip->fb_id); 3640 fb = drm_framebuffer_lookup(dev, page_flip->fb_id);
3588 if (!fb) 3641 if (!fb) {
3642 ret = -ENOENT;
3589 goto out; 3643 goto out;
3644 }
3590 3645
3591 hdisplay = crtc->mode.hdisplay; 3646 ret = drm_crtc_check_viewport(crtc, crtc->x, crtc->y, &crtc->mode, fb);
3592 vdisplay = crtc->mode.vdisplay; 3647 if (ret)
3593
3594 if (crtc->invert_dimensions)
3595 swap(hdisplay, vdisplay);
3596
3597 if (hdisplay > fb->width ||
3598 vdisplay > fb->height ||
3599 crtc->x > fb->width - hdisplay ||
3600 crtc->y > fb->height - vdisplay) {
3601 DRM_DEBUG_KMS("Invalid fb size %ux%u for CRTC viewport %ux%u+%d+%d%s.\n",
3602 fb->width, fb->height, hdisplay, vdisplay, crtc->x, crtc->y,
3603 crtc->invert_dimensions ? " (inverted)" : "");
3604 ret = -ENOSPC;
3605 goto out; 3648 goto out;
3606 }
3607 3649
3608 if (crtc->fb->pixel_format != fb->pixel_format) { 3650 if (crtc->fb->pixel_format != fb->pixel_format) {
3609 DRM_DEBUG_KMS("Page flip is not allowed to change frame buffer format.\n"); 3651 DRM_DEBUG_KMS("Page flip is not allowed to change frame buffer format.\n");
@@ -3788,7 +3830,8 @@ void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth,
3788 *bpp = 32; 3830 *bpp = 32;
3789 break; 3831 break;
3790 default: 3832 default:
3791 DRM_DEBUG_KMS("unsupported pixel format\n"); 3833 DRM_DEBUG_KMS("unsupported pixel format %s\n",
3834 drm_get_format_name(format));
3792 *depth = 0; 3835 *depth = 0;
3793 *bpp = 0; 3836 *bpp = 0;
3794 break; 3837 break;
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index c722c3b5404d..01361aba033b 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -39,6 +39,10 @@
39#include <drm/drm_fb_helper.h> 39#include <drm/drm_fb_helper.h>
40#include <drm/drm_edid.h> 40#include <drm/drm_edid.h>
41 41
42MODULE_AUTHOR("David Airlie, Jesse Barnes");
43MODULE_DESCRIPTION("DRM KMS helper");
44MODULE_LICENSE("GPL and additional rights");
45
42/** 46/**
43 * drm_helper_move_panel_connectors_to_head() - move panels to the front in the 47 * drm_helper_move_panel_connectors_to_head() - move panels to the front in the
44 * connector list 48 * connector list
@@ -76,7 +80,8 @@ static void drm_mode_validate_flag(struct drm_connector *connector,
76{ 80{
77 struct drm_display_mode *mode; 81 struct drm_display_mode *mode;
78 82
79 if (flags == (DRM_MODE_FLAG_DBLSCAN | DRM_MODE_FLAG_INTERLACE)) 83 if (flags == (DRM_MODE_FLAG_DBLSCAN | DRM_MODE_FLAG_INTERLACE |
84 DRM_MODE_FLAG_3D_MASK))
80 return; 85 return;
81 86
82 list_for_each_entry(mode, &connector->modes, head) { 87 list_for_each_entry(mode, &connector->modes, head) {
@@ -86,6 +91,9 @@ static void drm_mode_validate_flag(struct drm_connector *connector,
86 if ((mode->flags & DRM_MODE_FLAG_DBLSCAN) && 91 if ((mode->flags & DRM_MODE_FLAG_DBLSCAN) &&
87 !(flags & DRM_MODE_FLAG_DBLSCAN)) 92 !(flags & DRM_MODE_FLAG_DBLSCAN))
88 mode->status = MODE_NO_DBLESCAN; 93 mode->status = MODE_NO_DBLESCAN;
94 if ((mode->flags & DRM_MODE_FLAG_3D_MASK) &&
95 !(flags & DRM_MODE_FLAG_3D_MASK))
96 mode->status = MODE_NO_STEREO;
89 } 97 }
90 98
91 return; 99 return;
@@ -105,9 +113,9 @@ static void drm_mode_validate_flag(struct drm_connector *connector,
105 * then culled (based on validity and the @maxX, @maxY parameters) and put into 113 * then culled (based on validity and the @maxX, @maxY parameters) and put into
106 * the normal modes list. 114 * the normal modes list.
107 * 115 *
108 * Intended to be use as a generic implementation of the ->probe() @connector 116 * Intended to be use as a generic implementation of the ->fill_modes()
109 * callback for drivers that use the crtc helpers for output mode filtering and 117 * @connector vfunc for drivers that use the crtc helpers for output mode
110 * detection. 118 * filtering and detection.
111 * 119 *
112 * RETURNS: 120 * RETURNS:
113 * Number of modes found on @connector. 121 * Number of modes found on @connector.
@@ -175,6 +183,8 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
175 mode_flags |= DRM_MODE_FLAG_INTERLACE; 183 mode_flags |= DRM_MODE_FLAG_INTERLACE;
176 if (connector->doublescan_allowed) 184 if (connector->doublescan_allowed)
177 mode_flags |= DRM_MODE_FLAG_DBLSCAN; 185 mode_flags |= DRM_MODE_FLAG_DBLSCAN;
186 if (connector->stereo_allowed)
187 mode_flags |= DRM_MODE_FLAG_3D_MASK;
178 drm_mode_validate_flag(connector, mode_flags); 188 drm_mode_validate_flag(connector, mode_flags);
179 189
180 list_for_each_entry(mode, &connector->modes, head) { 190 list_for_each_entry(mode, &connector->modes, head) {
@@ -395,22 +405,25 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
395 struct drm_framebuffer *old_fb) 405 struct drm_framebuffer *old_fb)
396{ 406{
397 struct drm_device *dev = crtc->dev; 407 struct drm_device *dev = crtc->dev;
398 struct drm_display_mode *adjusted_mode, saved_mode, saved_hwmode; 408 struct drm_display_mode *adjusted_mode, saved_mode;
399 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; 409 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
400 struct drm_encoder_helper_funcs *encoder_funcs; 410 struct drm_encoder_helper_funcs *encoder_funcs;
401 int saved_x, saved_y; 411 int saved_x, saved_y;
412 bool saved_enabled;
402 struct drm_encoder *encoder; 413 struct drm_encoder *encoder;
403 bool ret = true; 414 bool ret = true;
404 415
416 saved_enabled = crtc->enabled;
405 crtc->enabled = drm_helper_crtc_in_use(crtc); 417 crtc->enabled = drm_helper_crtc_in_use(crtc);
406 if (!crtc->enabled) 418 if (!crtc->enabled)
407 return true; 419 return true;
408 420
409 adjusted_mode = drm_mode_duplicate(dev, mode); 421 adjusted_mode = drm_mode_duplicate(dev, mode);
410 if (!adjusted_mode) 422 if (!adjusted_mode) {
423 crtc->enabled = saved_enabled;
411 return false; 424 return false;
425 }
412 426
413 saved_hwmode = crtc->hwmode;
414 saved_mode = crtc->mode; 427 saved_mode = crtc->mode;
415 saved_x = crtc->x; 428 saved_x = crtc->x;
416 saved_y = crtc->y; 429 saved_y = crtc->y;
@@ -529,7 +542,7 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
529done: 542done:
530 drm_mode_destroy(dev, adjusted_mode); 543 drm_mode_destroy(dev, adjusted_mode);
531 if (!ret) { 544 if (!ret) {
532 crtc->hwmode = saved_hwmode; 545 crtc->enabled = saved_enabled;
533 crtc->mode = saved_mode; 546 crtc->mode = saved_mode;
534 crtc->x = saved_x; 547 crtc->x = saved_x;
535 crtc->y = saved_y; 548 crtc->y = saved_y;
@@ -557,6 +570,14 @@ drm_crtc_helper_disable(struct drm_crtc *crtc)
557 continue; 570 continue;
558 571
559 connector->encoder = NULL; 572 connector->encoder = NULL;
573
574 /*
575 * drm_helper_disable_unused_functions() ought to be
576 * doing this, but since we've decoupled the encoder
577 * from the connector above, the required connection
578 * between them is henceforth no longer available.
579 */
580 connector->dpms = DRM_MODE_DPMS_OFF;
560 } 581 }
561 } 582 }
562 583
@@ -583,9 +604,8 @@ drm_crtc_helper_disable(struct drm_crtc *crtc)
583int drm_crtc_helper_set_config(struct drm_mode_set *set) 604int drm_crtc_helper_set_config(struct drm_mode_set *set)
584{ 605{
585 struct drm_device *dev; 606 struct drm_device *dev;
586 struct drm_crtc *save_crtcs, *new_crtc, *crtc; 607 struct drm_crtc *new_crtc;
587 struct drm_encoder *save_encoders, *new_encoder, *encoder; 608 struct drm_encoder *save_encoders, *new_encoder, *encoder;
588 struct drm_framebuffer *old_fb = NULL;
589 bool mode_changed = false; /* if true do a full mode set */ 609 bool mode_changed = false; /* if true do a full mode set */
590 bool fb_changed = false; /* if true and !mode_changed just do a flip */ 610 bool fb_changed = false; /* if true and !mode_changed just do a flip */
591 struct drm_connector *save_connectors, *connector; 611 struct drm_connector *save_connectors, *connector;
@@ -621,38 +641,28 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
621 641
622 dev = set->crtc->dev; 642 dev = set->crtc->dev;
623 643
624 /* Allocate space for the backup of all (non-pointer) crtc, encoder and 644 /*
625 * connector data. */ 645 * Allocate space for the backup of all (non-pointer) encoder and
626 save_crtcs = kzalloc(dev->mode_config.num_crtc * 646 * connector data.
627 sizeof(struct drm_crtc), GFP_KERNEL); 647 */
628 if (!save_crtcs)
629 return -ENOMEM;
630
631 save_encoders = kzalloc(dev->mode_config.num_encoder * 648 save_encoders = kzalloc(dev->mode_config.num_encoder *
632 sizeof(struct drm_encoder), GFP_KERNEL); 649 sizeof(struct drm_encoder), GFP_KERNEL);
633 if (!save_encoders) { 650 if (!save_encoders)
634 kfree(save_crtcs);
635 return -ENOMEM; 651 return -ENOMEM;
636 }
637 652
638 save_connectors = kzalloc(dev->mode_config.num_connector * 653 save_connectors = kzalloc(dev->mode_config.num_connector *
639 sizeof(struct drm_connector), GFP_KERNEL); 654 sizeof(struct drm_connector), GFP_KERNEL);
640 if (!save_connectors) { 655 if (!save_connectors) {
641 kfree(save_crtcs);
642 kfree(save_encoders); 656 kfree(save_encoders);
643 return -ENOMEM; 657 return -ENOMEM;
644 } 658 }
645 659
646 /* Copy data. Note that driver private data is not affected. 660 /*
661 * Copy data. Note that driver private data is not affected.
647 * Should anything bad happen only the expected state is 662 * Should anything bad happen only the expected state is
648 * restored, not the drivers personal bookkeeping. 663 * restored, not the drivers personal bookkeeping.
649 */ 664 */
650 count = 0; 665 count = 0;
651 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
652 save_crtcs[count++] = *crtc;
653 }
654
655 count = 0;
656 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 666 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
657 save_encoders[count++] = *encoder; 667 save_encoders[count++] = *encoder;
658 } 668 }
@@ -775,19 +785,17 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
775 mode_changed = true; 785 mode_changed = true;
776 786
777 if (mode_changed) { 787 if (mode_changed) {
778 set->crtc->enabled = drm_helper_crtc_in_use(set->crtc); 788 if (drm_helper_crtc_in_use(set->crtc)) {
779 if (set->crtc->enabled) {
780 DRM_DEBUG_KMS("attempting to set mode from" 789 DRM_DEBUG_KMS("attempting to set mode from"
781 " userspace\n"); 790 " userspace\n");
782 drm_mode_debug_printmodeline(set->mode); 791 drm_mode_debug_printmodeline(set->mode);
783 old_fb = set->crtc->fb;
784 set->crtc->fb = set->fb; 792 set->crtc->fb = set->fb;
785 if (!drm_crtc_helper_set_mode(set->crtc, set->mode, 793 if (!drm_crtc_helper_set_mode(set->crtc, set->mode,
786 set->x, set->y, 794 set->x, set->y,
787 old_fb)) { 795 save_set.fb)) {
788 DRM_ERROR("failed to set mode on [CRTC:%d]\n", 796 DRM_ERROR("failed to set mode on [CRTC:%d]\n",
789 set->crtc->base.id); 797 set->crtc->base.id);
790 set->crtc->fb = old_fb; 798 set->crtc->fb = save_set.fb;
791 ret = -EINVAL; 799 ret = -EINVAL;
792 goto fail; 800 goto fail;
793 } 801 }
@@ -802,31 +810,24 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
802 } else if (fb_changed) { 810 } else if (fb_changed) {
803 set->crtc->x = set->x; 811 set->crtc->x = set->x;
804 set->crtc->y = set->y; 812 set->crtc->y = set->y;
805 813 set->crtc->fb = set->fb;
806 old_fb = set->crtc->fb;
807 if (set->crtc->fb != set->fb)
808 set->crtc->fb = set->fb;
809 ret = crtc_funcs->mode_set_base(set->crtc, 814 ret = crtc_funcs->mode_set_base(set->crtc,
810 set->x, set->y, old_fb); 815 set->x, set->y, save_set.fb);
811 if (ret != 0) { 816 if (ret != 0) {
812 set->crtc->fb = old_fb; 817 set->crtc->x = save_set.x;
818 set->crtc->y = save_set.y;
819 set->crtc->fb = save_set.fb;
813 goto fail; 820 goto fail;
814 } 821 }
815 } 822 }
816 823
817 kfree(save_connectors); 824 kfree(save_connectors);
818 kfree(save_encoders); 825 kfree(save_encoders);
819 kfree(save_crtcs);
820 return 0; 826 return 0;
821 827
822fail: 828fail:
823 /* Restore all previous data. */ 829 /* Restore all previous data. */
824 count = 0; 830 count = 0;
825 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
826 *crtc = save_crtcs[count++];
827 }
828
829 count = 0;
830 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 831 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
831 *encoder = save_encoders[count++]; 832 *encoder = save_encoders[count++];
832 } 833 }
@@ -844,7 +845,6 @@ fail:
844 845
845 kfree(save_connectors); 846 kfree(save_connectors);
846 kfree(save_encoders); 847 kfree(save_encoders);
847 kfree(save_crtcs);
848 return ret; 848 return ret;
849} 849}
850EXPORT_SYMBOL(drm_crtc_helper_set_config); 850EXPORT_SYMBOL(drm_crtc_helper_set_config);
@@ -1125,14 +1125,14 @@ void drm_kms_helper_poll_fini(struct drm_device *dev)
1125} 1125}
1126EXPORT_SYMBOL(drm_kms_helper_poll_fini); 1126EXPORT_SYMBOL(drm_kms_helper_poll_fini);
1127 1127
1128void drm_helper_hpd_irq_event(struct drm_device *dev) 1128bool drm_helper_hpd_irq_event(struct drm_device *dev)
1129{ 1129{
1130 struct drm_connector *connector; 1130 struct drm_connector *connector;
1131 enum drm_connector_status old_status; 1131 enum drm_connector_status old_status;
1132 bool changed = false; 1132 bool changed = false;
1133 1133
1134 if (!dev->mode_config.poll_enabled) 1134 if (!dev->mode_config.poll_enabled)
1135 return; 1135 return false;
1136 1136
1137 mutex_lock(&dev->mode_config.mutex); 1137 mutex_lock(&dev->mode_config.mutex);
1138 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 1138 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
@@ -1157,5 +1157,7 @@ void drm_helper_hpd_irq_event(struct drm_device *dev)
1157 1157
1158 if (changed) 1158 if (changed)
1159 drm_kms_helper_hotplug_event(dev); 1159 drm_kms_helper_hotplug_event(dev);
1160
1161 return changed;
1160} 1162}
1161EXPORT_SYMBOL(drm_helper_hpd_irq_event); 1163EXPORT_SYMBOL(drm_helper_hpd_irq_event);
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
index a05087cf846d..b4b51d46f339 100644
--- a/drivers/gpu/drm/drm_debugfs.c
+++ b/drivers/gpu/drm/drm_debugfs.c
@@ -42,7 +42,7 @@
42 * Initialization, etc. 42 * Initialization, etc.
43 **************************************************/ 43 **************************************************/
44 44
45static struct drm_info_list drm_debugfs_list[] = { 45static const struct drm_info_list drm_debugfs_list[] = {
46 {"name", drm_name_info, 0}, 46 {"name", drm_name_info, 0},
47 {"vm", drm_vm_info, 0}, 47 {"vm", drm_vm_info, 0},
48 {"clients", drm_clients_info, 0}, 48 {"clients", drm_clients_info, 0},
@@ -84,7 +84,7 @@ static const struct file_operations drm_debugfs_fops = {
84 * Create a given set of debugfs files represented by an array of 84 * Create a given set of debugfs files represented by an array of
85 * gdm_debugfs_lists in the given root directory. 85 * gdm_debugfs_lists in the given root directory.
86 */ 86 */
87int drm_debugfs_create_files(struct drm_info_list *files, int count, 87int drm_debugfs_create_files(const struct drm_info_list *files, int count,
88 struct dentry *root, struct drm_minor *minor) 88 struct dentry *root, struct drm_minor *minor)
89{ 89{
90 struct drm_device *dev = minor->dev; 90 struct drm_device *dev = minor->dev;
@@ -188,7 +188,7 @@ int drm_debugfs_init(struct drm_minor *minor, int minor_id,
188 * 188 *
189 * Remove all debugfs entries created by debugfs_init(). 189 * Remove all debugfs entries created by debugfs_init().
190 */ 190 */
191int drm_debugfs_remove_files(struct drm_info_list *files, int count, 191int drm_debugfs_remove_files(const struct drm_info_list *files, int count,
192 struct drm_minor *minor) 192 struct drm_minor *minor)
193{ 193{
194 struct list_head *pos, *q; 194 struct list_head *pos, *q;
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index 89e196627160..9e978aae8972 100644
--- a/drivers/gpu/drm/drm_dp_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -228,12 +228,12 @@ i2c_dp_aux_add_bus(struct i2c_adapter *adapter)
228EXPORT_SYMBOL(i2c_dp_aux_add_bus); 228EXPORT_SYMBOL(i2c_dp_aux_add_bus);
229 229
230/* Helpers for DP link training */ 230/* Helpers for DP link training */
231static u8 dp_link_status(u8 link_status[DP_LINK_STATUS_SIZE], int r) 231static u8 dp_link_status(const u8 link_status[DP_LINK_STATUS_SIZE], int r)
232{ 232{
233 return link_status[r - DP_LANE0_1_STATUS]; 233 return link_status[r - DP_LANE0_1_STATUS];
234} 234}
235 235
236static u8 dp_get_lane_status(u8 link_status[DP_LINK_STATUS_SIZE], 236static u8 dp_get_lane_status(const u8 link_status[DP_LINK_STATUS_SIZE],
237 int lane) 237 int lane)
238{ 238{
239 int i = DP_LANE0_1_STATUS + (lane >> 1); 239 int i = DP_LANE0_1_STATUS + (lane >> 1);
@@ -242,7 +242,7 @@ static u8 dp_get_lane_status(u8 link_status[DP_LINK_STATUS_SIZE],
242 return (l >> s) & 0xf; 242 return (l >> s) & 0xf;
243} 243}
244 244
245bool drm_dp_channel_eq_ok(u8 link_status[DP_LINK_STATUS_SIZE], 245bool drm_dp_channel_eq_ok(const u8 link_status[DP_LINK_STATUS_SIZE],
246 int lane_count) 246 int lane_count)
247{ 247{
248 u8 lane_align; 248 u8 lane_align;
@@ -262,7 +262,7 @@ bool drm_dp_channel_eq_ok(u8 link_status[DP_LINK_STATUS_SIZE],
262} 262}
263EXPORT_SYMBOL(drm_dp_channel_eq_ok); 263EXPORT_SYMBOL(drm_dp_channel_eq_ok);
264 264
265bool drm_dp_clock_recovery_ok(u8 link_status[DP_LINK_STATUS_SIZE], 265bool drm_dp_clock_recovery_ok(const u8 link_status[DP_LINK_STATUS_SIZE],
266 int lane_count) 266 int lane_count)
267{ 267{
268 int lane; 268 int lane;
@@ -277,7 +277,7 @@ bool drm_dp_clock_recovery_ok(u8 link_status[DP_LINK_STATUS_SIZE],
277} 277}
278EXPORT_SYMBOL(drm_dp_clock_recovery_ok); 278EXPORT_SYMBOL(drm_dp_clock_recovery_ok);
279 279
280u8 drm_dp_get_adjust_request_voltage(u8 link_status[DP_LINK_STATUS_SIZE], 280u8 drm_dp_get_adjust_request_voltage(const u8 link_status[DP_LINK_STATUS_SIZE],
281 int lane) 281 int lane)
282{ 282{
283 int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1); 283 int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
@@ -290,7 +290,7 @@ u8 drm_dp_get_adjust_request_voltage(u8 link_status[DP_LINK_STATUS_SIZE],
290} 290}
291EXPORT_SYMBOL(drm_dp_get_adjust_request_voltage); 291EXPORT_SYMBOL(drm_dp_get_adjust_request_voltage);
292 292
293u8 drm_dp_get_adjust_request_pre_emphasis(u8 link_status[DP_LINK_STATUS_SIZE], 293u8 drm_dp_get_adjust_request_pre_emphasis(const u8 link_status[DP_LINK_STATUS_SIZE],
294 int lane) 294 int lane)
295{ 295{
296 int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1); 296 int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
@@ -303,7 +303,7 @@ u8 drm_dp_get_adjust_request_pre_emphasis(u8 link_status[DP_LINK_STATUS_SIZE],
303} 303}
304EXPORT_SYMBOL(drm_dp_get_adjust_request_pre_emphasis); 304EXPORT_SYMBOL(drm_dp_get_adjust_request_pre_emphasis);
305 305
306void drm_dp_link_train_clock_recovery_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]) { 306void drm_dp_link_train_clock_recovery_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) {
307 if (dpcd[DP_TRAINING_AUX_RD_INTERVAL] == 0) 307 if (dpcd[DP_TRAINING_AUX_RD_INTERVAL] == 0)
308 udelay(100); 308 udelay(100);
309 else 309 else
@@ -311,7 +311,7 @@ void drm_dp_link_train_clock_recovery_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]) {
311} 311}
312EXPORT_SYMBOL(drm_dp_link_train_clock_recovery_delay); 312EXPORT_SYMBOL(drm_dp_link_train_clock_recovery_delay);
313 313
314void drm_dp_link_train_channel_eq_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]) { 314void drm_dp_link_train_channel_eq_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) {
315 if (dpcd[DP_TRAINING_AUX_RD_INTERVAL] == 0) 315 if (dpcd[DP_TRAINING_AUX_RD_INTERVAL] == 0)
316 udelay(400); 316 udelay(400);
317 else 317 else
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index fe58d0833a11..d9137e49c4e8 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -69,6 +69,7 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
69 DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, DRM_UNLOCKED), 69 DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, DRM_UNLOCKED),
70 DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, DRM_UNLOCKED), 70 DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, DRM_UNLOCKED),
71 DRM_IOCTL_DEF(DRM_IOCTL_GET_CAP, drm_getcap, DRM_UNLOCKED|DRM_RENDER_ALLOW), 71 DRM_IOCTL_DEF(DRM_IOCTL_GET_CAP, drm_getcap, DRM_UNLOCKED|DRM_RENDER_ALLOW),
72 DRM_IOCTL_DEF(DRM_IOCTL_SET_CLIENT_CAP, drm_setclientcap, 0),
72 DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER), 73 DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER),
73 74
74 DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 75 DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
@@ -170,76 +171,6 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
170 171
171#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls ) 172#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
172 173
173/**
174 * drm_legacy_dev_reinit
175 *
176 * Reinitializes a legacy/ums drm device in it's lastclose function.
177 */
178static void drm_legacy_dev_reinit(struct drm_device *dev)
179{
180 int i;
181
182 if (drm_core_check_feature(dev, DRIVER_MODESET))
183 return;
184
185 atomic_set(&dev->ioctl_count, 0);
186 atomic_set(&dev->vma_count, 0);
187
188 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
189 atomic_set(&dev->counts[i], 0);
190
191 dev->sigdata.lock = NULL;
192
193 dev->context_flag = 0;
194 dev->last_context = 0;
195 dev->if_version = 0;
196}
197
198/**
199 * Take down the DRM device.
200 *
201 * \param dev DRM device structure.
202 *
203 * Frees every resource in \p dev.
204 *
205 * \sa drm_device
206 */
207int drm_lastclose(struct drm_device * dev)
208{
209 struct drm_vma_entry *vma, *vma_temp;
210
211 DRM_DEBUG("\n");
212
213 if (dev->driver->lastclose)
214 dev->driver->lastclose(dev);
215 DRM_DEBUG("driver lastclose completed\n");
216
217 if (dev->irq_enabled && !drm_core_check_feature(dev, DRIVER_MODESET))
218 drm_irq_uninstall(dev);
219
220 mutex_lock(&dev->struct_mutex);
221
222 drm_agp_clear(dev);
223
224 drm_legacy_sg_cleanup(dev);
225
226 /* Clear vma list (only built for debugging) */
227 list_for_each_entry_safe(vma, vma_temp, &dev->vmalist, head) {
228 list_del(&vma->head);
229 kfree(vma);
230 }
231
232 drm_legacy_dma_takedown(dev);
233
234 dev->dev_mapping = NULL;
235 mutex_unlock(&dev->struct_mutex);
236
237 drm_legacy_dev_reinit(dev);
238
239 DRM_DEBUG("lastclose completed\n");
240 return 0;
241}
242
243/** File operations structure */ 174/** File operations structure */
244static const struct file_operations drm_stub_fops = { 175static const struct file_operations drm_stub_fops = {
245 .owner = THIS_MODULE, 176 .owner = THIS_MODULE,
@@ -385,7 +316,6 @@ long drm_ioctl(struct file *filp,
385 return -ENODEV; 316 return -ENODEV;
386 317
387 atomic_inc(&dev->ioctl_count); 318 atomic_inc(&dev->ioctl_count);
388 atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
389 ++file_priv->ioctl_count; 319 ++file_priv->ioctl_count;
390 320
391 if ((nr >= DRM_CORE_IOCTL_COUNT) && 321 if ((nr >= DRM_CORE_IOCTL_COUNT) &&
@@ -473,7 +403,7 @@ long drm_ioctl(struct file *filp,
473 403
474 err_i1: 404 err_i1:
475 if (!ioctl) 405 if (!ioctl)
476 DRM_DEBUG("invalid iotcl: pid=%d, dev=0x%lx, auth=%d, cmd=0x%02x, nr=0x%02x\n", 406 DRM_DEBUG("invalid ioctl: pid=%d, dev=0x%lx, auth=%d, cmd=0x%02x, nr=0x%02x\n",
477 task_pid_nr(current), 407 task_pid_nr(current),
478 (long)old_encode_dev(file_priv->minor->device), 408 (long)old_encode_dev(file_priv->minor->device),
479 file_priv->authenticated, cmd, nr); 409 file_priv->authenticated, cmd, nr);
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 830f7501cb4d..fb7cf0e796f6 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -458,6 +458,15 @@ static const struct drm_display_mode drm_dmt_modes[] = {
458 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, 458 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
459}; 459};
460 460
461/*
462 * These more or less come from the DMT spec. The 720x400 modes are
463 * inferred from historical 80x25 practice. The 640x480@67 and 832x624@75
464 * modes are old-school Mac modes. The EDID spec says the 1152x864@75 mode
465 * should be 1152x870, again for the Mac, but instead we use the x864 DMT
466 * mode.
467 *
468 * The DMT modes have been fact-checked; the rest are mild guesses.
469 */
461static const struct drm_display_mode edid_est_modes[] = { 470static const struct drm_display_mode edid_est_modes[] = {
462 { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840, 471 { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
463 968, 1056, 0, 600, 601, 605, 628, 0, 472 968, 1056, 0, 600, 601, 605, 628, 0,
@@ -560,7 +569,7 @@ static const struct minimode est3_modes[] = {
560 { 1600, 1200, 75, 0 }, 569 { 1600, 1200, 75, 0 },
561 { 1600, 1200, 85, 0 }, 570 { 1600, 1200, 85, 0 },
562 { 1792, 1344, 60, 0 }, 571 { 1792, 1344, 60, 0 },
563 { 1792, 1344, 85, 0 }, 572 { 1792, 1344, 75, 0 },
564 { 1856, 1392, 60, 0 }, 573 { 1856, 1392, 60, 0 },
565 { 1856, 1392, 75, 0 }, 574 { 1856, 1392, 75, 0 },
566 { 1920, 1200, 60, 1 }, 575 { 1920, 1200, 60, 1 },
@@ -1264,6 +1273,18 @@ struct edid *drm_get_edid(struct drm_connector *connector,
1264} 1273}
1265EXPORT_SYMBOL(drm_get_edid); 1274EXPORT_SYMBOL(drm_get_edid);
1266 1275
1276/**
1277 * drm_edid_duplicate - duplicate an EDID and the extensions
1278 * @edid: EDID to duplicate
1279 *
1280 * Return duplicate edid or NULL on allocation failure.
1281 */
1282struct edid *drm_edid_duplicate(const struct edid *edid)
1283{
1284 return kmemdup(edid, (edid->extensions + 1) * EDID_LENGTH, GFP_KERNEL);
1285}
1286EXPORT_SYMBOL(drm_edid_duplicate);
1287
1267/*** EDID parsing ***/ 1288/*** EDID parsing ***/
1268 1289
1269/** 1290/**
@@ -1308,7 +1329,7 @@ static u32 edid_get_quirks(struct edid *edid)
1308} 1329}
1309 1330
1310#define MODE_SIZE(m) ((m)->hdisplay * (m)->vdisplay) 1331#define MODE_SIZE(m) ((m)->hdisplay * (m)->vdisplay)
1311#define MODE_REFRESH_DIFF(m,r) (abs((m)->vrefresh - target_refresh)) 1332#define MODE_REFRESH_DIFF(c,t) (abs((c) - (t)))
1312 1333
1313/** 1334/**
1314 * edid_fixup_preferred - set preferred modes based on quirk list 1335 * edid_fixup_preferred - set preferred modes based on quirk list
@@ -1323,6 +1344,7 @@ static void edid_fixup_preferred(struct drm_connector *connector,
1323{ 1344{
1324 struct drm_display_mode *t, *cur_mode, *preferred_mode; 1345 struct drm_display_mode *t, *cur_mode, *preferred_mode;
1325 int target_refresh = 0; 1346 int target_refresh = 0;
1347 int cur_vrefresh, preferred_vrefresh;
1326 1348
1327 if (list_empty(&connector->probed_modes)) 1349 if (list_empty(&connector->probed_modes))
1328 return; 1350 return;
@@ -1345,10 +1367,14 @@ static void edid_fixup_preferred(struct drm_connector *connector,
1345 if (MODE_SIZE(cur_mode) > MODE_SIZE(preferred_mode)) 1367 if (MODE_SIZE(cur_mode) > MODE_SIZE(preferred_mode))
1346 preferred_mode = cur_mode; 1368 preferred_mode = cur_mode;
1347 1369
1370 cur_vrefresh = cur_mode->vrefresh ?
1371 cur_mode->vrefresh : drm_mode_vrefresh(cur_mode);
1372 preferred_vrefresh = preferred_mode->vrefresh ?
1373 preferred_mode->vrefresh : drm_mode_vrefresh(preferred_mode);
1348 /* At a given size, try to get closest to target refresh */ 1374 /* At a given size, try to get closest to target refresh */
1349 if ((MODE_SIZE(cur_mode) == MODE_SIZE(preferred_mode)) && 1375 if ((MODE_SIZE(cur_mode) == MODE_SIZE(preferred_mode)) &&
1350 MODE_REFRESH_DIFF(cur_mode, target_refresh) < 1376 MODE_REFRESH_DIFF(cur_vrefresh, target_refresh) <
1351 MODE_REFRESH_DIFF(preferred_mode, target_refresh)) { 1377 MODE_REFRESH_DIFF(preferred_vrefresh, target_refresh)) {
1352 preferred_mode = cur_mode; 1378 preferred_mode = cur_mode;
1353 } 1379 }
1354 } 1380 }
@@ -2068,7 +2094,7 @@ drm_est3_modes(struct drm_connector *connector, struct detailed_timing *timing)
2068 u8 *est = ((u8 *)timing) + 5; 2094 u8 *est = ((u8 *)timing) + 5;
2069 2095
2070 for (i = 0; i < 6; i++) { 2096 for (i = 0; i < 6; i++) {
2071 for (j = 7; j > 0; j--) { 2097 for (j = 7; j >= 0; j--) {
2072 m = (i * 8) + (7 - j); 2098 m = (i * 8) + (7 - j);
2073 if (m >= ARRAY_SIZE(est3_modes)) 2099 if (m >= ARRAY_SIZE(est3_modes))
2074 break; 2100 break;
@@ -2404,7 +2430,7 @@ u8 drm_match_cea_mode(const struct drm_display_mode *to_match)
2404 2430
2405 if ((KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock1) || 2431 if ((KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock1) ||
2406 KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock2)) && 2432 KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock2)) &&
2407 drm_mode_equal_no_clocks(to_match, cea_mode)) 2433 drm_mode_equal_no_clocks_no_stereo(to_match, cea_mode))
2408 return mode + 1; 2434 return mode + 1;
2409 } 2435 }
2410 return 0; 2436 return 0;
@@ -2453,7 +2479,7 @@ static u8 drm_match_hdmi_mode(const struct drm_display_mode *to_match)
2453 2479
2454 if ((KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock1) || 2480 if ((KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock1) ||
2455 KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock2)) && 2481 KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock2)) &&
2456 drm_mode_equal_no_clocks(to_match, hdmi_mode)) 2482 drm_mode_equal_no_clocks_no_stereo(to_match, hdmi_mode))
2457 return mode + 1; 2483 return mode + 1;
2458 } 2484 }
2459 return 0; 2485 return 0;
@@ -2507,6 +2533,9 @@ add_alternate_cea_modes(struct drm_connector *connector, struct edid *edid)
2507 if (!newmode) 2533 if (!newmode)
2508 continue; 2534 continue;
2509 2535
2536 /* Carry over the stereo flags */
2537 newmode->flags |= mode->flags & DRM_MODE_FLAG_3D_MASK;
2538
2510 /* 2539 /*
2511 * The current mode could be either variant. Make 2540 * The current mode could be either variant. Make
2512 * sure to pick the "other" clock for the new mode. 2541 * sure to pick the "other" clock for the new mode.
@@ -2553,20 +2582,151 @@ do_cea_modes(struct drm_connector *connector, const u8 *db, u8 len)
2553 return modes; 2582 return modes;
2554} 2583}
2555 2584
2585struct stereo_mandatory_mode {
2586 int width, height, vrefresh;
2587 unsigned int flags;
2588};
2589
2590static const struct stereo_mandatory_mode stereo_mandatory_modes[] = {
2591 { 1920, 1080, 24, DRM_MODE_FLAG_3D_TOP_AND_BOTTOM },
2592 { 1920, 1080, 24, DRM_MODE_FLAG_3D_FRAME_PACKING },
2593 { 1920, 1080, 50,
2594 DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF },
2595 { 1920, 1080, 60,
2596 DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF },
2597 { 1280, 720, 50, DRM_MODE_FLAG_3D_TOP_AND_BOTTOM },
2598 { 1280, 720, 50, DRM_MODE_FLAG_3D_FRAME_PACKING },
2599 { 1280, 720, 60, DRM_MODE_FLAG_3D_TOP_AND_BOTTOM },
2600 { 1280, 720, 60, DRM_MODE_FLAG_3D_FRAME_PACKING }
2601};
2602
2603static bool
2604stereo_match_mandatory(const struct drm_display_mode *mode,
2605 const struct stereo_mandatory_mode *stereo_mode)
2606{
2607 unsigned int interlaced = mode->flags & DRM_MODE_FLAG_INTERLACE;
2608
2609 return mode->hdisplay == stereo_mode->width &&
2610 mode->vdisplay == stereo_mode->height &&
2611 interlaced == (stereo_mode->flags & DRM_MODE_FLAG_INTERLACE) &&
2612 drm_mode_vrefresh(mode) == stereo_mode->vrefresh;
2613}
2614
2615static int add_hdmi_mandatory_stereo_modes(struct drm_connector *connector)
2616{
2617 struct drm_device *dev = connector->dev;
2618 const struct drm_display_mode *mode;
2619 struct list_head stereo_modes;
2620 int modes = 0, i;
2621
2622 INIT_LIST_HEAD(&stereo_modes);
2623
2624 list_for_each_entry(mode, &connector->probed_modes, head) {
2625 for (i = 0; i < ARRAY_SIZE(stereo_mandatory_modes); i++) {
2626 const struct stereo_mandatory_mode *mandatory;
2627 struct drm_display_mode *new_mode;
2628
2629 if (!stereo_match_mandatory(mode,
2630 &stereo_mandatory_modes[i]))
2631 continue;
2632
2633 mandatory = &stereo_mandatory_modes[i];
2634 new_mode = drm_mode_duplicate(dev, mode);
2635 if (!new_mode)
2636 continue;
2637
2638 new_mode->flags |= mandatory->flags;
2639 list_add_tail(&new_mode->head, &stereo_modes);
2640 modes++;
2641 }
2642 }
2643
2644 list_splice_tail(&stereo_modes, &connector->probed_modes);
2645
2646 return modes;
2647}
2648
2649static int add_hdmi_mode(struct drm_connector *connector, u8 vic)
2650{
2651 struct drm_device *dev = connector->dev;
2652 struct drm_display_mode *newmode;
2653
2654 vic--; /* VICs start at 1 */
2655 if (vic >= ARRAY_SIZE(edid_4k_modes)) {
2656 DRM_ERROR("Unknown HDMI VIC: %d\n", vic);
2657 return 0;
2658 }
2659
2660 newmode = drm_mode_duplicate(dev, &edid_4k_modes[vic]);
2661 if (!newmode)
2662 return 0;
2663
2664 drm_mode_probed_add(connector, newmode);
2665
2666 return 1;
2667}
2668
2669static int add_3d_struct_modes(struct drm_connector *connector, u16 structure,
2670 const u8 *video_db, u8 video_len, u8 video_index)
2671{
2672 struct drm_device *dev = connector->dev;
2673 struct drm_display_mode *newmode;
2674 int modes = 0;
2675 u8 cea_mode;
2676
2677 if (video_db == NULL || video_index > video_len)
2678 return 0;
2679
2680 /* CEA modes are numbered 1..127 */
2681 cea_mode = (video_db[video_index] & 127) - 1;
2682 if (cea_mode >= ARRAY_SIZE(edid_cea_modes))
2683 return 0;
2684
2685 if (structure & (1 << 0)) {
2686 newmode = drm_mode_duplicate(dev, &edid_cea_modes[cea_mode]);
2687 if (newmode) {
2688 newmode->flags |= DRM_MODE_FLAG_3D_FRAME_PACKING;
2689 drm_mode_probed_add(connector, newmode);
2690 modes++;
2691 }
2692 }
2693 if (structure & (1 << 6)) {
2694 newmode = drm_mode_duplicate(dev, &edid_cea_modes[cea_mode]);
2695 if (newmode) {
2696 newmode->flags |= DRM_MODE_FLAG_3D_TOP_AND_BOTTOM;
2697 drm_mode_probed_add(connector, newmode);
2698 modes++;
2699 }
2700 }
2701 if (structure & (1 << 8)) {
2702 newmode = drm_mode_duplicate(dev, &edid_cea_modes[cea_mode]);
2703 if (newmode) {
2704 newmode->flags = DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF;
2705 drm_mode_probed_add(connector, newmode);
2706 modes++;
2707 }
2708 }
2709
2710 return modes;
2711}
2712
2556/* 2713/*
2557 * do_hdmi_vsdb_modes - Parse the HDMI Vendor Specific data block 2714 * do_hdmi_vsdb_modes - Parse the HDMI Vendor Specific data block
2558 * @connector: connector corresponding to the HDMI sink 2715 * @connector: connector corresponding to the HDMI sink
2559 * @db: start of the CEA vendor specific block 2716 * @db: start of the CEA vendor specific block
2560 * @len: length of the CEA block payload, ie. one can access up to db[len] 2717 * @len: length of the CEA block payload, ie. one can access up to db[len]
2561 * 2718 *
2562 * Parses the HDMI VSDB looking for modes to add to @connector. 2719 * Parses the HDMI VSDB looking for modes to add to @connector. This function
2720 * also adds the stereo 3d modes when applicable.
2563 */ 2721 */
2564static int 2722static int
2565do_hdmi_vsdb_modes(struct drm_connector *connector, const u8 *db, u8 len) 2723do_hdmi_vsdb_modes(struct drm_connector *connector, const u8 *db, u8 len,
2724 const u8 *video_db, u8 video_len)
2566{ 2725{
2567 struct drm_device *dev = connector->dev; 2726 int modes = 0, offset = 0, i, multi_present = 0;
2568 int modes = 0, offset = 0, i; 2727 u8 vic_len, hdmi_3d_len = 0;
2569 u8 vic_len; 2728 u16 mask;
2729 u16 structure_all;
2570 2730
2571 if (len < 8) 2731 if (len < 8)
2572 goto out; 2732 goto out;
@@ -2585,30 +2745,56 @@ do_hdmi_vsdb_modes(struct drm_connector *connector, const u8 *db, u8 len)
2585 2745
2586 /* the declared length is not long enough for the 2 first bytes 2746 /* the declared length is not long enough for the 2 first bytes
2587 * of additional video format capabilities */ 2747 * of additional video format capabilities */
2588 offset += 2; 2748 if (len < (8 + offset + 2))
2589 if (len < (8 + offset))
2590 goto out; 2749 goto out;
2591 2750
2751 /* 3D_Present */
2752 offset++;
2753 if (db[8 + offset] & (1 << 7)) {
2754 modes += add_hdmi_mandatory_stereo_modes(connector);
2755
2756 /* 3D_Multi_present */
2757 multi_present = (db[8 + offset] & 0x60) >> 5;
2758 }
2759
2760 offset++;
2592 vic_len = db[8 + offset] >> 5; 2761 vic_len = db[8 + offset] >> 5;
2762 hdmi_3d_len = db[8 + offset] & 0x1f;
2593 2763
2594 for (i = 0; i < vic_len && len >= (9 + offset + i); i++) { 2764 for (i = 0; i < vic_len && len >= (9 + offset + i); i++) {
2595 struct drm_display_mode *newmode;
2596 u8 vic; 2765 u8 vic;
2597 2766
2598 vic = db[9 + offset + i]; 2767 vic = db[9 + offset + i];
2768 modes += add_hdmi_mode(connector, vic);
2769 }
2770 offset += 1 + vic_len;
2599 2771
2600 vic--; /* VICs start at 1 */ 2772 if (!(multi_present == 1 || multi_present == 2))
2601 if (vic >= ARRAY_SIZE(edid_4k_modes)) { 2773 goto out;
2602 DRM_ERROR("Unknown HDMI VIC: %d\n", vic);
2603 continue;
2604 }
2605 2774
2606 newmode = drm_mode_duplicate(dev, &edid_4k_modes[vic]); 2775 if ((multi_present == 1 && len < (9 + offset)) ||
2607 if (!newmode) 2776 (multi_present == 2 && len < (11 + offset)))
2608 continue; 2777 goto out;
2609 2778
2610 drm_mode_probed_add(connector, newmode); 2779 if ((multi_present == 1 && hdmi_3d_len < 2) ||
2611 modes++; 2780 (multi_present == 2 && hdmi_3d_len < 4))
2781 goto out;
2782
2783 /* 3D_Structure_ALL */
2784 structure_all = (db[8 + offset] << 8) | db[9 + offset];
2785
2786 /* check if 3D_MASK is present */
2787 if (multi_present == 2)
2788 mask = (db[10 + offset] << 8) | db[11 + offset];
2789 else
2790 mask = 0xffff;
2791
2792 for (i = 0; i < 16; i++) {
2793 if (mask & (1 << i))
2794 modes += add_3d_struct_modes(connector,
2795 structure_all,
2796 video_db,
2797 video_len, i);
2612 } 2798 }
2613 2799
2614out: 2800out:
@@ -2668,8 +2854,8 @@ static int
2668add_cea_modes(struct drm_connector *connector, struct edid *edid) 2854add_cea_modes(struct drm_connector *connector, struct edid *edid)
2669{ 2855{
2670 const u8 *cea = drm_find_cea_extension(edid); 2856 const u8 *cea = drm_find_cea_extension(edid);
2671 const u8 *db; 2857 const u8 *db, *hdmi = NULL, *video = NULL;
2672 u8 dbl; 2858 u8 dbl, hdmi_len, video_len = 0;
2673 int modes = 0; 2859 int modes = 0;
2674 2860
2675 if (cea && cea_revision(cea) >= 3) { 2861 if (cea && cea_revision(cea) >= 3) {
@@ -2682,13 +2868,26 @@ add_cea_modes(struct drm_connector *connector, struct edid *edid)
2682 db = &cea[i]; 2868 db = &cea[i];
2683 dbl = cea_db_payload_len(db); 2869 dbl = cea_db_payload_len(db);
2684 2870
2685 if (cea_db_tag(db) == VIDEO_BLOCK) 2871 if (cea_db_tag(db) == VIDEO_BLOCK) {
2686 modes += do_cea_modes(connector, db + 1, dbl); 2872 video = db + 1;
2687 else if (cea_db_is_hdmi_vsdb(db)) 2873 video_len = dbl;
2688 modes += do_hdmi_vsdb_modes(connector, db, dbl); 2874 modes += do_cea_modes(connector, video, dbl);
2875 }
2876 else if (cea_db_is_hdmi_vsdb(db)) {
2877 hdmi = db;
2878 hdmi_len = dbl;
2879 }
2689 } 2880 }
2690 } 2881 }
2691 2882
2883 /*
2884 * We parse the HDMI VSDB after having added the cea modes as we will
2885 * be patching their flags when the sink supports stereo 3D.
2886 */
2887 if (hdmi)
2888 modes += do_hdmi_vsdb_modes(connector, hdmi, hdmi_len, video,
2889 video_len);
2890
2692 return modes; 2891 return modes;
2693} 2892}
2694 2893
@@ -3288,6 +3487,19 @@ int drm_add_modes_noedid(struct drm_connector *connector,
3288} 3487}
3289EXPORT_SYMBOL(drm_add_modes_noedid); 3488EXPORT_SYMBOL(drm_add_modes_noedid);
3290 3489
3490void drm_set_preferred_mode(struct drm_connector *connector,
3491 int hpref, int vpref)
3492{
3493 struct drm_display_mode *mode;
3494
3495 list_for_each_entry(mode, &connector->probed_modes, head) {
3496 if (drm_mode_width(mode) == hpref &&
3497 drm_mode_height(mode) == vpref)
3498 mode->type |= DRM_MODE_TYPE_PREFERRED;
3499 }
3500}
3501EXPORT_SYMBOL(drm_set_preferred_mode);
3502
3291/** 3503/**
3292 * drm_hdmi_avi_infoframe_from_display_mode() - fill an HDMI AVI infoframe with 3504 * drm_hdmi_avi_infoframe_from_display_mode() - fill an HDMI AVI infoframe with
3293 * data from a DRM display mode 3505 * data from a DRM display mode
@@ -3321,6 +3533,33 @@ drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,
3321} 3533}
3322EXPORT_SYMBOL(drm_hdmi_avi_infoframe_from_display_mode); 3534EXPORT_SYMBOL(drm_hdmi_avi_infoframe_from_display_mode);
3323 3535
3536static enum hdmi_3d_structure
3537s3d_structure_from_display_mode(const struct drm_display_mode *mode)
3538{
3539 u32 layout = mode->flags & DRM_MODE_FLAG_3D_MASK;
3540
3541 switch (layout) {
3542 case DRM_MODE_FLAG_3D_FRAME_PACKING:
3543 return HDMI_3D_STRUCTURE_FRAME_PACKING;
3544 case DRM_MODE_FLAG_3D_FIELD_ALTERNATIVE:
3545 return HDMI_3D_STRUCTURE_FIELD_ALTERNATIVE;
3546 case DRM_MODE_FLAG_3D_LINE_ALTERNATIVE:
3547 return HDMI_3D_STRUCTURE_LINE_ALTERNATIVE;
3548 case DRM_MODE_FLAG_3D_SIDE_BY_SIDE_FULL:
3549 return HDMI_3D_STRUCTURE_SIDE_BY_SIDE_FULL;
3550 case DRM_MODE_FLAG_3D_L_DEPTH:
3551 return HDMI_3D_STRUCTURE_L_DEPTH;
3552 case DRM_MODE_FLAG_3D_L_DEPTH_GFX_GFX_DEPTH:
3553 return HDMI_3D_STRUCTURE_L_DEPTH_GFX_GFX_DEPTH;
3554 case DRM_MODE_FLAG_3D_TOP_AND_BOTTOM:
3555 return HDMI_3D_STRUCTURE_TOP_AND_BOTTOM;
3556 case DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF:
3557 return HDMI_3D_STRUCTURE_SIDE_BY_SIDE_HALF;
3558 default:
3559 return HDMI_3D_STRUCTURE_INVALID;
3560 }
3561}
3562
3324/** 3563/**
3325 * drm_hdmi_vendor_infoframe_from_display_mode() - fill an HDMI infoframe with 3564 * drm_hdmi_vendor_infoframe_from_display_mode() - fill an HDMI infoframe with
3326 * data from a DRM display mode 3565 * data from a DRM display mode
@@ -3338,20 +3577,29 @@ drm_hdmi_vendor_infoframe_from_display_mode(struct hdmi_vendor_infoframe *frame,
3338 const struct drm_display_mode *mode) 3577 const struct drm_display_mode *mode)
3339{ 3578{
3340 int err; 3579 int err;
3580 u32 s3d_flags;
3341 u8 vic; 3581 u8 vic;
3342 3582
3343 if (!frame || !mode) 3583 if (!frame || !mode)
3344 return -EINVAL; 3584 return -EINVAL;
3345 3585
3346 vic = drm_match_hdmi_mode(mode); 3586 vic = drm_match_hdmi_mode(mode);
3347 if (!vic) 3587 s3d_flags = mode->flags & DRM_MODE_FLAG_3D_MASK;
3588
3589 if (!vic && !s3d_flags)
3590 return -EINVAL;
3591
3592 if (vic && s3d_flags)
3348 return -EINVAL; 3593 return -EINVAL;
3349 3594
3350 err = hdmi_vendor_infoframe_init(frame); 3595 err = hdmi_vendor_infoframe_init(frame);
3351 if (err < 0) 3596 if (err < 0)
3352 return err; 3597 return err;
3353 3598
3354 frame->vic = vic; 3599 if (vic)
3600 frame->vic = vic;
3601 else
3602 frame->s3d_struct = s3d_structure_from_display_mode(mode);
3355 3603
3356 return 0; 3604 return 0;
3357} 3605}
diff --git a/drivers/gpu/drm/drm_edid_load.c b/drivers/gpu/drm/drm_edid_load.c
index 271b42bbfb72..9081172ef057 100644
--- a/drivers/gpu/drm/drm_edid_load.c
+++ b/drivers/gpu/drm/drm_edid_load.c
@@ -32,7 +32,7 @@ MODULE_PARM_DESC(edid_firmware, "Do not probe monitor, use specified EDID blob "
32 "from built-in data or /lib/firmware instead. "); 32 "from built-in data or /lib/firmware instead. ");
33 33
34#define GENERIC_EDIDS 5 34#define GENERIC_EDIDS 5
35static char *generic_edid_name[GENERIC_EDIDS] = { 35static const char *generic_edid_name[GENERIC_EDIDS] = {
36 "edid/1024x768.bin", 36 "edid/1024x768.bin",
37 "edid/1280x1024.bin", 37 "edid/1280x1024.bin",
38 "edid/1600x1200.bin", 38 "edid/1600x1200.bin",
@@ -40,7 +40,7 @@ static char *generic_edid_name[GENERIC_EDIDS] = {
40 "edid/1920x1080.bin", 40 "edid/1920x1080.bin",
41}; 41};
42 42
43static u8 generic_edid[GENERIC_EDIDS][128] = { 43static const u8 generic_edid[GENERIC_EDIDS][128] = {
44 { 44 {
45 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 45 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
46 0x31, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 46 0x31, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
@@ -133,63 +133,68 @@ static u8 generic_edid[GENERIC_EDIDS][128] = {
133 }, 133 },
134}; 134};
135 135
136static int edid_size(const u8 *edid, int data_size)
137{
138 if (data_size < EDID_LENGTH)
139 return 0;
140
141 return (edid[0x7e] + 1) * EDID_LENGTH;
142}
143
136static u8 *edid_load(struct drm_connector *connector, const char *name, 144static u8 *edid_load(struct drm_connector *connector, const char *name,
137 const char *connector_name) 145 const char *connector_name)
138{ 146{
139 const struct firmware *fw; 147 const struct firmware *fw = NULL;
140 struct platform_device *pdev; 148 const u8 *fwdata;
141 u8 *fwdata = NULL, *edid, *new_edid; 149 u8 *edid;
142 int fwsize, expected; 150 int fwsize, builtin;
143 int builtin = 0, err = 0;
144 int i, valid_extensions = 0; 151 int i, valid_extensions = 0;
145 bool print_bad_edid = !connector->bad_edid_counter || (drm_debug & DRM_UT_KMS); 152 bool print_bad_edid = !connector->bad_edid_counter || (drm_debug & DRM_UT_KMS);
146 153
147 pdev = platform_device_register_simple(connector_name, -1, NULL, 0); 154 builtin = 0;
148 if (IS_ERR(pdev)) { 155 for (i = 0; i < GENERIC_EDIDS; i++) {
149 DRM_ERROR("Failed to register EDID firmware platform device " 156 if (strcmp(name, generic_edid_name[i]) == 0) {
150 "for connector \"%s\"\n", connector_name);
151 err = -EINVAL;
152 goto out;
153 }
154
155 err = request_firmware(&fw, name, &pdev->dev);
156 platform_device_unregister(pdev);
157
158 if (err) {
159 i = 0;
160 while (i < GENERIC_EDIDS && strcmp(name, generic_edid_name[i]))
161 i++;
162 if (i < GENERIC_EDIDS) {
163 err = 0;
164 builtin = 1;
165 fwdata = generic_edid[i]; 157 fwdata = generic_edid[i];
166 fwsize = sizeof(generic_edid[i]); 158 fwsize = sizeof(generic_edid[i]);
159 builtin = 1;
160 break;
167 } 161 }
168 } 162 }
163 if (!builtin) {
164 struct platform_device *pdev;
165 int err;
169 166
170 if (err) { 167 pdev = platform_device_register_simple(connector_name, -1, NULL, 0);
171 DRM_ERROR("Requesting EDID firmware \"%s\" failed (err=%d)\n", 168 if (IS_ERR(pdev)) {
172 name, err); 169 DRM_ERROR("Failed to register EDID firmware platform device "
173 goto out; 170 "for connector \"%s\"\n", connector_name);
174 } 171 return ERR_CAST(pdev);
172 }
173
174 err = request_firmware(&fw, name, &pdev->dev);
175 platform_device_unregister(pdev);
176 if (err) {
177 DRM_ERROR("Requesting EDID firmware \"%s\" failed (err=%d)\n",
178 name, err);
179 return ERR_PTR(err);
180 }
175 181
176 if (fwdata == NULL) { 182 fwdata = fw->data;
177 fwdata = (u8 *) fw->data;
178 fwsize = fw->size; 183 fwsize = fw->size;
179 } 184 }
180 185
181 expected = (fwdata[0x7e] + 1) * EDID_LENGTH; 186 if (edid_size(fwdata, fwsize) != fwsize) {
182 if (expected != fwsize) {
183 DRM_ERROR("Size of EDID firmware \"%s\" is invalid " 187 DRM_ERROR("Size of EDID firmware \"%s\" is invalid "
184 "(expected %d, got %d)\n", name, expected, (int) fwsize); 188 "(expected %d, got %d\n", name,
185 err = -EINVAL; 189 edid_size(fwdata, fwsize), (int)fwsize);
186 goto relfw_out; 190 edid = ERR_PTR(-EINVAL);
191 goto out;
187 } 192 }
188 193
189 edid = kmemdup(fwdata, fwsize, GFP_KERNEL); 194 edid = kmemdup(fwdata, fwsize, GFP_KERNEL);
190 if (edid == NULL) { 195 if (edid == NULL) {
191 err = -ENOMEM; 196 edid = ERR_PTR(-ENOMEM);
192 goto relfw_out; 197 goto out;
193 } 198 }
194 199
195 if (!drm_edid_block_valid(edid, 0, print_bad_edid)) { 200 if (!drm_edid_block_valid(edid, 0, print_bad_edid)) {
@@ -197,8 +202,8 @@ static u8 *edid_load(struct drm_connector *connector, const char *name,
197 DRM_ERROR("Base block of EDID firmware \"%s\" is invalid ", 202 DRM_ERROR("Base block of EDID firmware \"%s\" is invalid ",
198 name); 203 name);
199 kfree(edid); 204 kfree(edid);
200 err = -EINVAL; 205 edid = ERR_PTR(-EINVAL);
201 goto relfw_out; 206 goto out;
202 } 207 }
203 208
204 for (i = 1; i <= edid[0x7e]; i++) { 209 for (i = 1; i <= edid[0x7e]; i++) {
@@ -210,19 +215,18 @@ static u8 *edid_load(struct drm_connector *connector, const char *name,
210 } 215 }
211 216
212 if (valid_extensions != edid[0x7e]) { 217 if (valid_extensions != edid[0x7e]) {
218 u8 *new_edid;
219
213 edid[EDID_LENGTH-1] += edid[0x7e] - valid_extensions; 220 edid[EDID_LENGTH-1] += edid[0x7e] - valid_extensions;
214 DRM_INFO("Found %d valid extensions instead of %d in EDID data " 221 DRM_INFO("Found %d valid extensions instead of %d in EDID data "
215 "\"%s\" for connector \"%s\"\n", valid_extensions, 222 "\"%s\" for connector \"%s\"\n", valid_extensions,
216 edid[0x7e], name, connector_name); 223 edid[0x7e], name, connector_name);
217 edid[0x7e] = valid_extensions; 224 edid[0x7e] = valid_extensions;
225
218 new_edid = krealloc(edid, (valid_extensions + 1) * EDID_LENGTH, 226 new_edid = krealloc(edid, (valid_extensions + 1) * EDID_LENGTH,
219 GFP_KERNEL); 227 GFP_KERNEL);
220 if (new_edid == NULL) { 228 if (new_edid)
221 err = -ENOMEM; 229 edid = new_edid;
222 kfree(edid);
223 goto relfw_out;
224 }
225 edid = new_edid;
226 } 230 }
227 231
228 DRM_INFO("Got %s EDID base block and %d extension%s from " 232 DRM_INFO("Got %s EDID base block and %d extension%s from "
@@ -230,13 +234,9 @@ static u8 *edid_load(struct drm_connector *connector, const char *name,
230 "external", valid_extensions, valid_extensions == 1 ? "" : "s", 234 "external", valid_extensions, valid_extensions == 1 ? "" : "s",
231 name, connector_name); 235 name, connector_name);
232 236
233relfw_out:
234 release_firmware(fw);
235
236out: 237out:
237 if (err) 238 if (fw)
238 return ERR_PTR(err); 239 release_firmware(fw);
239
240 return edid; 240 return edid;
241} 241}
242 242
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 3d13ca6e257f..0a19401aff80 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -39,10 +39,6 @@
39#include <drm/drm_fb_helper.h> 39#include <drm/drm_fb_helper.h>
40#include <drm/drm_crtc_helper.h> 40#include <drm/drm_crtc_helper.h>
41 41
42MODULE_AUTHOR("David Airlie, Jesse Barnes");
43MODULE_DESCRIPTION("DRM KMS helper");
44MODULE_LICENSE("GPL and additional rights");
45
46static LIST_HEAD(kernel_fb_helper_list); 42static LIST_HEAD(kernel_fb_helper_list);
47 43
48/** 44/**
@@ -844,7 +840,6 @@ int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
844 struct drm_fb_helper *fb_helper = info->par; 840 struct drm_fb_helper *fb_helper = info->par;
845 struct drm_device *dev = fb_helper->dev; 841 struct drm_device *dev = fb_helper->dev;
846 struct drm_mode_set *modeset; 842 struct drm_mode_set *modeset;
847 struct drm_crtc *crtc;
848 int ret = 0; 843 int ret = 0;
849 int i; 844 int i;
850 845
@@ -855,8 +850,6 @@ int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
855 } 850 }
856 851
857 for (i = 0; i < fb_helper->crtc_count; i++) { 852 for (i = 0; i < fb_helper->crtc_count; i++) {
858 crtc = fb_helper->crtc_info[i].mode_set.crtc;
859
860 modeset = &fb_helper->crtc_info[i].mode_set; 853 modeset = &fb_helper->crtc_info[i].mode_set;
861 854
862 modeset->x = var->xoffset; 855 modeset->x = var->xoffset;
@@ -1352,7 +1345,6 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
1352 struct drm_connector *connector; 1345 struct drm_connector *connector;
1353 struct drm_connector_helper_funcs *connector_funcs; 1346 struct drm_connector_helper_funcs *connector_funcs;
1354 struct drm_encoder *encoder; 1347 struct drm_encoder *encoder;
1355 struct drm_fb_helper_crtc *best_crtc;
1356 int my_score, best_score, score; 1348 int my_score, best_score, score;
1357 struct drm_fb_helper_crtc **crtcs, *crtc; 1349 struct drm_fb_helper_crtc **crtcs, *crtc;
1358 struct drm_fb_helper_connector *fb_helper_conn; 1350 struct drm_fb_helper_connector *fb_helper_conn;
@@ -1364,7 +1356,6 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
1364 connector = fb_helper_conn->connector; 1356 connector = fb_helper_conn->connector;
1365 1357
1366 best_crtcs[n] = NULL; 1358 best_crtcs[n] = NULL;
1367 best_crtc = NULL;
1368 best_score = drm_pick_crtcs(fb_helper, best_crtcs, modes, n+1, width, height); 1359 best_score = drm_pick_crtcs(fb_helper, best_crtcs, modes, n+1, width, height);
1369 if (modes[n] == NULL) 1360 if (modes[n] == NULL)
1370 return best_score; 1361 return best_score;
@@ -1413,7 +1404,6 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
1413 score = my_score + drm_pick_crtcs(fb_helper, crtcs, modes, n + 1, 1404 score = my_score + drm_pick_crtcs(fb_helper, crtcs, modes, n + 1,
1414 width, height); 1405 width, height);
1415 if (score > best_score) { 1406 if (score > best_score) {
1416 best_crtc = crtc;
1417 best_score = score; 1407 best_score = score;
1418 memcpy(best_crtcs, crtcs, 1408 memcpy(best_crtcs, crtcs,
1419 dev->mode_config.num_connector * 1409 dev->mode_config.num_connector *
@@ -1580,8 +1570,7 @@ EXPORT_SYMBOL(drm_fb_helper_initial_config);
1580int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper) 1570int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
1581{ 1571{
1582 struct drm_device *dev = fb_helper->dev; 1572 struct drm_device *dev = fb_helper->dev;
1583 int count = 0; 1573 u32 max_width, max_height;
1584 u32 max_width, max_height, bpp_sel;
1585 1574
1586 if (!fb_helper->fb) 1575 if (!fb_helper->fb)
1587 return 0; 1576 return 0;
@@ -1596,10 +1585,8 @@ int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
1596 1585
1597 max_width = fb_helper->fb->width; 1586 max_width = fb_helper->fb->width;
1598 max_height = fb_helper->fb->height; 1587 max_height = fb_helper->fb->height;
1599 bpp_sel = fb_helper->fb->bits_per_pixel;
1600 1588
1601 count = drm_fb_helper_probe_connector_modes(fb_helper, max_width, 1589 drm_fb_helper_probe_connector_modes(fb_helper, max_width, max_height);
1602 max_height);
1603 mutex_unlock(&fb_helper->dev->mode_config.mutex); 1590 mutex_unlock(&fb_helper->dev->mode_config.mutex);
1604 1591
1605 drm_modeset_lock_all(dev); 1592 drm_modeset_lock_all(dev);
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index 22d14ecbd3ec..c5b929c3f77a 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -113,7 +113,6 @@ int drm_open(struct inode *inode, struct file *filp)
113 retcode = drm_open_helper(inode, filp, dev); 113 retcode = drm_open_helper(inode, filp, dev);
114 if (retcode) 114 if (retcode)
115 goto err_undo; 115 goto err_undo;
116 atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
117 if (need_setup) { 116 if (need_setup) {
118 retcode = drm_setup(dev); 117 retcode = drm_setup(dev);
119 if (retcode) 118 if (retcode)
@@ -235,7 +234,8 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
235 234
236 priv->ioctl_count = 0; 235 priv->ioctl_count = 0;
237 /* for compatibility root is always authenticated */ 236 /* for compatibility root is always authenticated */
238 priv->authenticated = capable(CAP_SYS_ADMIN); 237 priv->always_authenticated = capable(CAP_SYS_ADMIN);
238 priv->authenticated = priv->always_authenticated;
239 priv->lock_count = 0; 239 priv->lock_count = 0;
240 240
241 INIT_LIST_HEAD(&priv->lhead); 241 INIT_LIST_HEAD(&priv->lhead);
@@ -374,13 +374,80 @@ static void drm_events_release(struct drm_file *file_priv)
374 } 374 }
375 375
376 /* Remove unconsumed events */ 376 /* Remove unconsumed events */
377 list_for_each_entry_safe(e, et, &file_priv->event_list, link) 377 list_for_each_entry_safe(e, et, &file_priv->event_list, link) {
378 list_del(&e->link);
378 e->destroy(e); 379 e->destroy(e);
380 }
379 381
380 spin_unlock_irqrestore(&dev->event_lock, flags); 382 spin_unlock_irqrestore(&dev->event_lock, flags);
381} 383}
382 384
383/** 385/**
386 * drm_legacy_dev_reinit
387 *
388 * Reinitializes a legacy/ums drm device in it's lastclose function.
389 */
390static void drm_legacy_dev_reinit(struct drm_device *dev)
391{
392 if (drm_core_check_feature(dev, DRIVER_MODESET))
393 return;
394
395 atomic_set(&dev->ioctl_count, 0);
396 atomic_set(&dev->vma_count, 0);
397
398 dev->sigdata.lock = NULL;
399
400 dev->context_flag = 0;
401 dev->last_context = 0;
402 dev->if_version = 0;
403}
404
405/**
406 * Take down the DRM device.
407 *
408 * \param dev DRM device structure.
409 *
410 * Frees every resource in \p dev.
411 *
412 * \sa drm_device
413 */
414int drm_lastclose(struct drm_device * dev)
415{
416 struct drm_vma_entry *vma, *vma_temp;
417
418 DRM_DEBUG("\n");
419
420 if (dev->driver->lastclose)
421 dev->driver->lastclose(dev);
422 DRM_DEBUG("driver lastclose completed\n");
423
424 if (dev->irq_enabled && !drm_core_check_feature(dev, DRIVER_MODESET))
425 drm_irq_uninstall(dev);
426
427 mutex_lock(&dev->struct_mutex);
428
429 drm_agp_clear(dev);
430
431 drm_legacy_sg_cleanup(dev);
432
433 /* Clear vma list (only built for debugging) */
434 list_for_each_entry_safe(vma, vma_temp, &dev->vmalist, head) {
435 list_del(&vma->head);
436 kfree(vma);
437 }
438
439 drm_legacy_dma_takedown(dev);
440
441 dev->dev_mapping = NULL;
442 mutex_unlock(&dev->struct_mutex);
443
444 drm_legacy_dev_reinit(dev);
445
446 DRM_DEBUG("lastclose completed\n");
447 return 0;
448}
449
450/**
384 * Release file. 451 * Release file.
385 * 452 *
386 * \param inode device inode 453 * \param inode device inode
@@ -449,7 +516,6 @@ int drm_release(struct inode *inode, struct file *filp)
449 516
450 list_del(&pos->head); 517 list_del(&pos->head);
451 kfree(pos); 518 kfree(pos);
452 --dev->ctx_count;
453 } 519 }
454 } 520 }
455 } 521 }
@@ -463,7 +529,7 @@ int drm_release(struct inode *inode, struct file *filp)
463 list_for_each_entry(temp, &dev->filelist, lhead) { 529 list_for_each_entry(temp, &dev->filelist, lhead) {
464 if ((temp->master == file_priv->master) && 530 if ((temp->master == file_priv->master) &&
465 (temp != file_priv)) 531 (temp != file_priv))
466 temp->authenticated = 0; 532 temp->authenticated = temp->always_authenticated;
467 } 533 }
468 534
469 /** 535 /**
@@ -511,7 +577,6 @@ int drm_release(struct inode *inode, struct file *filp)
511 * End inline drm_release 577 * End inline drm_release
512 */ 578 */
513 579
514 atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
515 if (!--dev->open_count) { 580 if (!--dev->open_count) {
516 if (atomic_read(&dev->ioctl_count)) { 581 if (atomic_read(&dev->ioctl_count)) {
517 DRM_ERROR("Device busy: %d\n", 582 DRM_ERROR("Device busy: %d\n",
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 49293bdc972a..4761adedad2a 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -160,35 +160,6 @@ void drm_gem_private_object_init(struct drm_device *dev,
160} 160}
161EXPORT_SYMBOL(drm_gem_private_object_init); 161EXPORT_SYMBOL(drm_gem_private_object_init);
162 162
163/**
164 * Allocate a GEM object of the specified size with shmfs backing store
165 */
166struct drm_gem_object *
167drm_gem_object_alloc(struct drm_device *dev, size_t size)
168{
169 struct drm_gem_object *obj;
170
171 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
172 if (!obj)
173 goto free;
174
175 if (drm_gem_object_init(dev, obj, size) != 0)
176 goto free;
177
178 if (dev->driver->gem_init_object != NULL &&
179 dev->driver->gem_init_object(obj) != 0) {
180 goto fput;
181 }
182 return obj;
183fput:
184 /* Object_init mangles the global counters - readjust them. */
185 fput(obj->filp);
186free:
187 kfree(obj);
188 return NULL;
189}
190EXPORT_SYMBOL(drm_gem_object_alloc);
191
192static void 163static void
193drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp) 164drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
194{ 165{
diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c
index f7311162a61d..3d2e91c4d78e 100644
--- a/drivers/gpu/drm/drm_global.c
+++ b/drivers/gpu/drm/drm_global.c
@@ -67,7 +67,6 @@ int drm_global_item_ref(struct drm_global_reference *ref)
67{ 67{
68 int ret; 68 int ret;
69 struct drm_global_item *item = &glob[ref->global_type]; 69 struct drm_global_item *item = &glob[ref->global_type];
70 void *object;
71 70
72 mutex_lock(&item->mutex); 71 mutex_lock(&item->mutex);
73 if (item->refcount == 0) { 72 if (item->refcount == 0) {
@@ -85,7 +84,6 @@ int drm_global_item_ref(struct drm_global_reference *ref)
85 } 84 }
86 ++item->refcount; 85 ++item->refcount;
87 ref->object = item->object; 86 ref->object = item->object;
88 object = item->object;
89 mutex_unlock(&item->mutex); 87 mutex_unlock(&item->mutex);
90 return 0; 88 return 0;
91out_err: 89out_err:
diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
index 53298320080b..7d5a152eeb02 100644
--- a/drivers/gpu/drm/drm_info.c
+++ b/drivers/gpu/drm/drm_info.c
@@ -163,13 +163,13 @@ int drm_vblank_info(struct seq_file *m, void *data)
163 mutex_lock(&dev->struct_mutex); 163 mutex_lock(&dev->struct_mutex);
164 for (crtc = 0; crtc < dev->num_crtcs; crtc++) { 164 for (crtc = 0; crtc < dev->num_crtcs; crtc++) {
165 seq_printf(m, "CRTC %d enable: %d\n", 165 seq_printf(m, "CRTC %d enable: %d\n",
166 crtc, atomic_read(&dev->vblank_refcount[crtc])); 166 crtc, atomic_read(&dev->vblank[crtc].refcount));
167 seq_printf(m, "CRTC %d counter: %d\n", 167 seq_printf(m, "CRTC %d counter: %d\n",
168 crtc, drm_vblank_count(dev, crtc)); 168 crtc, drm_vblank_count(dev, crtc));
169 seq_printf(m, "CRTC %d last wait: %d\n", 169 seq_printf(m, "CRTC %d last wait: %d\n",
170 crtc, dev->last_vblank_wait[crtc]); 170 crtc, dev->vblank[crtc].last_wait);
171 seq_printf(m, "CRTC %d in modeset: %d\n", 171 seq_printf(m, "CRTC %d in modeset: %d\n",
172 crtc, dev->vblank_inmodeset[crtc]); 172 crtc, dev->vblank[crtc].inmodeset);
173 } 173 }
174 mutex_unlock(&dev->struct_mutex); 174 mutex_unlock(&dev->struct_mutex);
175 return 0; 175 return 0;
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index 07247e2855a2..dffc836144cc 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -303,6 +303,27 @@ int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
303} 303}
304 304
305/** 305/**
306 * Set device/driver capabilities
307 */
308int
309drm_setclientcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
310{
311 struct drm_set_client_cap *req = data;
312
313 switch (req->capability) {
314 case DRM_CLIENT_CAP_STEREO_3D:
315 if (req->value > 1)
316 return -EINVAL;
317 file_priv->stereo_allowed = req->value;
318 break;
319 default:
320 return -EINVAL;
321 }
322
323 return 0;
324}
325
326/**
306 * Setversion ioctl. 327 * Setversion ioctl.
307 * 328 *
308 * \param inode device inode. 329 * \param inode device inode.
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index f92da0a32f0d..d80d95289e10 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -43,9 +43,8 @@
43#include <linux/export.h> 43#include <linux/export.h>
44 44
45/* Access macro for slots in vblank timestamp ringbuffer. */ 45/* Access macro for slots in vblank timestamp ringbuffer. */
46#define vblanktimestamp(dev, crtc, count) ( \ 46#define vblanktimestamp(dev, crtc, count) \
47 (dev)->_vblank_time[(crtc) * DRM_VBLANKTIME_RBSIZE + \ 47 ((dev)->vblank[crtc].time[(count) % DRM_VBLANKTIME_RBSIZE])
48 ((count) % DRM_VBLANKTIME_RBSIZE)])
49 48
50/* Retry timestamp calculation up to 3 times to satisfy 49/* Retry timestamp calculation up to 3 times to satisfy
51 * drm_timestamp_precision before giving up. 50 * drm_timestamp_precision before giving up.
@@ -89,8 +88,7 @@ int drm_irq_by_busid(struct drm_device *dev, void *data,
89 */ 88 */
90static void clear_vblank_timestamps(struct drm_device *dev, int crtc) 89static void clear_vblank_timestamps(struct drm_device *dev, int crtc)
91{ 90{
92 memset(&dev->_vblank_time[crtc * DRM_VBLANKTIME_RBSIZE], 0, 91 memset(dev->vblank[crtc].time, 0, sizeof(dev->vblank[crtc].time));
93 DRM_VBLANKTIME_RBSIZE * sizeof(struct timeval));
94} 92}
95 93
96/* 94/*
@@ -115,7 +113,7 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
115 spin_lock_irqsave(&dev->vblank_time_lock, irqflags); 113 spin_lock_irqsave(&dev->vblank_time_lock, irqflags);
116 114
117 dev->driver->disable_vblank(dev, crtc); 115 dev->driver->disable_vblank(dev, crtc);
118 dev->vblank_enabled[crtc] = 0; 116 dev->vblank[crtc].enabled = false;
119 117
120 /* No further vblank irq's will be processed after 118 /* No further vblank irq's will be processed after
121 * this point. Get current hardware vblank count and 119 * this point. Get current hardware vblank count and
@@ -130,9 +128,9 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
130 * delayed gpu counter increment. 128 * delayed gpu counter increment.
131 */ 129 */
132 do { 130 do {
133 dev->last_vblank[crtc] = dev->driver->get_vblank_counter(dev, crtc); 131 dev->vblank[crtc].last = dev->driver->get_vblank_counter(dev, crtc);
134 vblrc = drm_get_last_vbltimestamp(dev, crtc, &tvblank, 0); 132 vblrc = drm_get_last_vbltimestamp(dev, crtc, &tvblank, 0);
135 } while (dev->last_vblank[crtc] != dev->driver->get_vblank_counter(dev, crtc) && (--count) && vblrc); 133 } while (dev->vblank[crtc].last != dev->driver->get_vblank_counter(dev, crtc) && (--count) && vblrc);
136 134
137 if (!count) 135 if (!count)
138 vblrc = 0; 136 vblrc = 0;
@@ -140,7 +138,7 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
140 /* Compute time difference to stored timestamp of last vblank 138 /* Compute time difference to stored timestamp of last vblank
141 * as updated by last invocation of drm_handle_vblank() in vblank irq. 139 * as updated by last invocation of drm_handle_vblank() in vblank irq.
142 */ 140 */
143 vblcount = atomic_read(&dev->_vblank_count[crtc]); 141 vblcount = atomic_read(&dev->vblank[crtc].count);
144 diff_ns = timeval_to_ns(&tvblank) - 142 diff_ns = timeval_to_ns(&tvblank) -
145 timeval_to_ns(&vblanktimestamp(dev, crtc, vblcount)); 143 timeval_to_ns(&vblanktimestamp(dev, crtc, vblcount));
146 144
@@ -157,7 +155,7 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
157 * hope for the best. 155 * hope for the best.
158 */ 156 */
159 if ((vblrc > 0) && (abs64(diff_ns) > 1000000)) { 157 if ((vblrc > 0) && (abs64(diff_ns) > 1000000)) {
160 atomic_inc(&dev->_vblank_count[crtc]); 158 atomic_inc(&dev->vblank[crtc].count);
161 smp_mb__after_atomic_inc(); 159 smp_mb__after_atomic_inc();
162 } 160 }
163 161
@@ -178,8 +176,8 @@ static void vblank_disable_fn(unsigned long arg)
178 176
179 for (i = 0; i < dev->num_crtcs; i++) { 177 for (i = 0; i < dev->num_crtcs; i++) {
180 spin_lock_irqsave(&dev->vbl_lock, irqflags); 178 spin_lock_irqsave(&dev->vbl_lock, irqflags);
181 if (atomic_read(&dev->vblank_refcount[i]) == 0 && 179 if (atomic_read(&dev->vblank[i].refcount) == 0 &&
182 dev->vblank_enabled[i]) { 180 dev->vblank[i].enabled) {
183 DRM_DEBUG("disabling vblank on crtc %d\n", i); 181 DRM_DEBUG("disabling vblank on crtc %d\n", i);
184 vblank_disable_and_save(dev, i); 182 vblank_disable_and_save(dev, i);
185 } 183 }
@@ -197,14 +195,7 @@ void drm_vblank_cleanup(struct drm_device *dev)
197 195
198 vblank_disable_fn((unsigned long)dev); 196 vblank_disable_fn((unsigned long)dev);
199 197
200 kfree(dev->vbl_queue); 198 kfree(dev->vblank);
201 kfree(dev->_vblank_count);
202 kfree(dev->vblank_refcount);
203 kfree(dev->vblank_enabled);
204 kfree(dev->last_vblank);
205 kfree(dev->last_vblank_wait);
206 kfree(dev->vblank_inmodeset);
207 kfree(dev->_vblank_time);
208 199
209 dev->num_crtcs = 0; 200 dev->num_crtcs = 0;
210} 201}
@@ -221,42 +212,14 @@ int drm_vblank_init(struct drm_device *dev, int num_crtcs)
221 212
222 dev->num_crtcs = num_crtcs; 213 dev->num_crtcs = num_crtcs;
223 214
224 dev->vbl_queue = kmalloc(sizeof(wait_queue_head_t) * num_crtcs, 215 dev->vblank = kcalloc(num_crtcs, sizeof(*dev->vblank), GFP_KERNEL);
225 GFP_KERNEL); 216 if (!dev->vblank)
226 if (!dev->vbl_queue)
227 goto err; 217 goto err;
228 218
229 dev->_vblank_count = kmalloc(sizeof(atomic_t) * num_crtcs, GFP_KERNEL); 219 for (i = 0; i < num_crtcs; i++)
230 if (!dev->_vblank_count) 220 init_waitqueue_head(&dev->vblank[i].queue);
231 goto err;
232
233 dev->vblank_refcount = kmalloc(sizeof(atomic_t) * num_crtcs,
234 GFP_KERNEL);
235 if (!dev->vblank_refcount)
236 goto err;
237
238 dev->vblank_enabled = kcalloc(num_crtcs, sizeof(int), GFP_KERNEL);
239 if (!dev->vblank_enabled)
240 goto err;
241 221
242 dev->last_vblank = kcalloc(num_crtcs, sizeof(u32), GFP_KERNEL); 222 DRM_INFO("Supports vblank timestamp caching Rev 2 (21.10.2013).\n");
243 if (!dev->last_vblank)
244 goto err;
245
246 dev->last_vblank_wait = kcalloc(num_crtcs, sizeof(u32), GFP_KERNEL);
247 if (!dev->last_vblank_wait)
248 goto err;
249
250 dev->vblank_inmodeset = kcalloc(num_crtcs, sizeof(int), GFP_KERNEL);
251 if (!dev->vblank_inmodeset)
252 goto err;
253
254 dev->_vblank_time = kcalloc(num_crtcs * DRM_VBLANKTIME_RBSIZE,
255 sizeof(struct timeval), GFP_KERNEL);
256 if (!dev->_vblank_time)
257 goto err;
258
259 DRM_INFO("Supports vblank timestamp caching Rev 1 (10.10.2010).\n");
260 223
261 /* Driver specific high-precision vblank timestamping supported? */ 224 /* Driver specific high-precision vblank timestamping supported? */
262 if (dev->driver->get_vblank_timestamp) 225 if (dev->driver->get_vblank_timestamp)
@@ -264,14 +227,8 @@ int drm_vblank_init(struct drm_device *dev, int num_crtcs)
264 else 227 else
265 DRM_INFO("No driver support for vblank timestamp query.\n"); 228 DRM_INFO("No driver support for vblank timestamp query.\n");
266 229
267 /* Zero per-crtc vblank stuff */ 230 dev->vblank_disable_allowed = false;
268 for (i = 0; i < num_crtcs; i++) {
269 init_waitqueue_head(&dev->vbl_queue[i]);
270 atomic_set(&dev->_vblank_count[i], 0);
271 atomic_set(&dev->vblank_refcount[i], 0);
272 }
273 231
274 dev->vblank_disable_allowed = 0;
275 return 0; 232 return 0;
276 233
277err: 234err:
@@ -336,7 +293,7 @@ int drm_irq_install(struct drm_device *dev)
336 mutex_unlock(&dev->struct_mutex); 293 mutex_unlock(&dev->struct_mutex);
337 return -EBUSY; 294 return -EBUSY;
338 } 295 }
339 dev->irq_enabled = 1; 296 dev->irq_enabled = true;
340 mutex_unlock(&dev->struct_mutex); 297 mutex_unlock(&dev->struct_mutex);
341 298
342 DRM_DEBUG("irq=%d\n", drm_dev_to_irq(dev)); 299 DRM_DEBUG("irq=%d\n", drm_dev_to_irq(dev));
@@ -359,7 +316,7 @@ int drm_irq_install(struct drm_device *dev)
359 316
360 if (ret < 0) { 317 if (ret < 0) {
361 mutex_lock(&dev->struct_mutex); 318 mutex_lock(&dev->struct_mutex);
362 dev->irq_enabled = 0; 319 dev->irq_enabled = false;
363 mutex_unlock(&dev->struct_mutex); 320 mutex_unlock(&dev->struct_mutex);
364 return ret; 321 return ret;
365 } 322 }
@@ -373,7 +330,7 @@ int drm_irq_install(struct drm_device *dev)
373 330
374 if (ret < 0) { 331 if (ret < 0) {
375 mutex_lock(&dev->struct_mutex); 332 mutex_lock(&dev->struct_mutex);
376 dev->irq_enabled = 0; 333 dev->irq_enabled = false;
377 mutex_unlock(&dev->struct_mutex); 334 mutex_unlock(&dev->struct_mutex);
378 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 335 if (!drm_core_check_feature(dev, DRIVER_MODESET))
379 vga_client_register(dev->pdev, NULL, NULL, NULL); 336 vga_client_register(dev->pdev, NULL, NULL, NULL);
@@ -394,14 +351,15 @@ EXPORT_SYMBOL(drm_irq_install);
394int drm_irq_uninstall(struct drm_device *dev) 351int drm_irq_uninstall(struct drm_device *dev)
395{ 352{
396 unsigned long irqflags; 353 unsigned long irqflags;
397 int irq_enabled, i; 354 bool irq_enabled;
355 int i;
398 356
399 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) 357 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
400 return -EINVAL; 358 return -EINVAL;
401 359
402 mutex_lock(&dev->struct_mutex); 360 mutex_lock(&dev->struct_mutex);
403 irq_enabled = dev->irq_enabled; 361 irq_enabled = dev->irq_enabled;
404 dev->irq_enabled = 0; 362 dev->irq_enabled = false;
405 mutex_unlock(&dev->struct_mutex); 363 mutex_unlock(&dev->struct_mutex);
406 364
407 /* 365 /*
@@ -410,9 +368,9 @@ int drm_irq_uninstall(struct drm_device *dev)
410 if (dev->num_crtcs) { 368 if (dev->num_crtcs) {
411 spin_lock_irqsave(&dev->vbl_lock, irqflags); 369 spin_lock_irqsave(&dev->vbl_lock, irqflags);
412 for (i = 0; i < dev->num_crtcs; i++) { 370 for (i = 0; i < dev->num_crtcs; i++) {
413 DRM_WAKEUP(&dev->vbl_queue[i]); 371 DRM_WAKEUP(&dev->vblank[i].queue);
414 dev->vblank_enabled[i] = 0; 372 dev->vblank[i].enabled = false;
415 dev->last_vblank[i] = 373 dev->vblank[i].last =
416 dev->driver->get_vblank_counter(dev, i); 374 dev->driver->get_vblank_counter(dev, i);
417 } 375 }
418 spin_unlock_irqrestore(&dev->vbl_lock, irqflags); 376 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
@@ -628,24 +586,20 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
628 * code gets preempted or delayed for some reason. 586 * code gets preempted or delayed for some reason.
629 */ 587 */
630 for (i = 0; i < DRM_TIMESTAMP_MAXRETRIES; i++) { 588 for (i = 0; i < DRM_TIMESTAMP_MAXRETRIES; i++) {
631 /* Disable preemption to make it very likely to 589 /*
632 * succeed in the first iteration even on PREEMPT_RT kernel. 590 * Get vertical and horizontal scanout position vpos, hpos,
591 * and bounding timestamps stime, etime, pre/post query.
633 */ 592 */
634 preempt_disable(); 593 vbl_status = dev->driver->get_scanout_position(dev, crtc, &vpos,
594 &hpos, &stime, &etime);
635 595
636 /* Get system timestamp before query. */ 596 /*
637 stime = ktime_get(); 597 * Get correction for CLOCK_MONOTONIC -> CLOCK_REALTIME if
638 598 * CLOCK_REALTIME is requested.
639 /* Get vertical and horizontal scanout pos. vpos, hpos. */ 599 */
640 vbl_status = dev->driver->get_scanout_position(dev, crtc, &vpos, &hpos);
641
642 /* Get system timestamp after query. */
643 etime = ktime_get();
644 if (!drm_timestamp_monotonic) 600 if (!drm_timestamp_monotonic)
645 mono_time_offset = ktime_get_monotonic_offset(); 601 mono_time_offset = ktime_get_monotonic_offset();
646 602
647 preempt_enable();
648
649 /* Return as no-op if scanout query unsupported or failed. */ 603 /* Return as no-op if scanout query unsupported or failed. */
650 if (!(vbl_status & DRM_SCANOUTPOS_VALID)) { 604 if (!(vbl_status & DRM_SCANOUTPOS_VALID)) {
651 DRM_DEBUG("crtc %d : scanoutpos query failed [%d].\n", 605 DRM_DEBUG("crtc %d : scanoutpos query failed [%d].\n",
@@ -653,6 +607,7 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
653 return -EIO; 607 return -EIO;
654 } 608 }
655 609
610 /* Compute uncertainty in timestamp of scanout position query. */
656 duration_ns = ktime_to_ns(etime) - ktime_to_ns(stime); 611 duration_ns = ktime_to_ns(etime) - ktime_to_ns(stime);
657 612
658 /* Accept result with < max_error nsecs timing uncertainty. */ 613 /* Accept result with < max_error nsecs timing uncertainty. */
@@ -795,7 +750,7 @@ EXPORT_SYMBOL(drm_get_last_vbltimestamp);
795 */ 750 */
796u32 drm_vblank_count(struct drm_device *dev, int crtc) 751u32 drm_vblank_count(struct drm_device *dev, int crtc)
797{ 752{
798 return atomic_read(&dev->_vblank_count[crtc]); 753 return atomic_read(&dev->vblank[crtc].count);
799} 754}
800EXPORT_SYMBOL(drm_vblank_count); 755EXPORT_SYMBOL(drm_vblank_count);
801 756
@@ -824,10 +779,10 @@ u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc,
824 * a seqlock. 779 * a seqlock.
825 */ 780 */
826 do { 781 do {
827 cur_vblank = atomic_read(&dev->_vblank_count[crtc]); 782 cur_vblank = atomic_read(&dev->vblank[crtc].count);
828 *vblanktime = vblanktimestamp(dev, crtc, cur_vblank); 783 *vblanktime = vblanktimestamp(dev, crtc, cur_vblank);
829 smp_rmb(); 784 smp_rmb();
830 } while (cur_vblank != atomic_read(&dev->_vblank_count[crtc])); 785 } while (cur_vblank != atomic_read(&dev->vblank[crtc].count));
831 786
832 return cur_vblank; 787 return cur_vblank;
833} 788}
@@ -914,12 +869,12 @@ static void drm_update_vblank_count(struct drm_device *dev, int crtc)
914 } while (cur_vblank != dev->driver->get_vblank_counter(dev, crtc)); 869 } while (cur_vblank != dev->driver->get_vblank_counter(dev, crtc));
915 870
916 /* Deal with counter wrap */ 871 /* Deal with counter wrap */
917 diff = cur_vblank - dev->last_vblank[crtc]; 872 diff = cur_vblank - dev->vblank[crtc].last;
918 if (cur_vblank < dev->last_vblank[crtc]) { 873 if (cur_vblank < dev->vblank[crtc].last) {
919 diff += dev->max_vblank_count; 874 diff += dev->max_vblank_count;
920 875
921 DRM_DEBUG("last_vblank[%d]=0x%x, cur_vblank=0x%x => diff=0x%x\n", 876 DRM_DEBUG("last_vblank[%d]=0x%x, cur_vblank=0x%x => diff=0x%x\n",
922 crtc, dev->last_vblank[crtc], cur_vblank, diff); 877 crtc, dev->vblank[crtc].last, cur_vblank, diff);
923 } 878 }
924 879
925 DRM_DEBUG("enabling vblank interrupts on crtc %d, missed %d\n", 880 DRM_DEBUG("enabling vblank interrupts on crtc %d, missed %d\n",
@@ -930,12 +885,12 @@ static void drm_update_vblank_count(struct drm_device *dev, int crtc)
930 * reinitialize delayed at next vblank interrupt in that case. 885 * reinitialize delayed at next vblank interrupt in that case.
931 */ 886 */
932 if (rc) { 887 if (rc) {
933 tslot = atomic_read(&dev->_vblank_count[crtc]) + diff; 888 tslot = atomic_read(&dev->vblank[crtc].count) + diff;
934 vblanktimestamp(dev, crtc, tslot) = t_vblank; 889 vblanktimestamp(dev, crtc, tslot) = t_vblank;
935 } 890 }
936 891
937 smp_mb__before_atomic_inc(); 892 smp_mb__before_atomic_inc();
938 atomic_add(diff, &dev->_vblank_count[crtc]); 893 atomic_add(diff, &dev->vblank[crtc].count);
939 smp_mb__after_atomic_inc(); 894 smp_mb__after_atomic_inc();
940} 895}
941 896
@@ -957,9 +912,9 @@ int drm_vblank_get(struct drm_device *dev, int crtc)
957 912
958 spin_lock_irqsave(&dev->vbl_lock, irqflags); 913 spin_lock_irqsave(&dev->vbl_lock, irqflags);
959 /* Going from 0->1 means we have to enable interrupts again */ 914 /* Going from 0->1 means we have to enable interrupts again */
960 if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1) { 915 if (atomic_add_return(1, &dev->vblank[crtc].refcount) == 1) {
961 spin_lock_irqsave(&dev->vblank_time_lock, irqflags2); 916 spin_lock_irqsave(&dev->vblank_time_lock, irqflags2);
962 if (!dev->vblank_enabled[crtc]) { 917 if (!dev->vblank[crtc].enabled) {
963 /* Enable vblank irqs under vblank_time_lock protection. 918 /* Enable vblank irqs under vblank_time_lock protection.
964 * All vblank count & timestamp updates are held off 919 * All vblank count & timestamp updates are held off
965 * until we are done reinitializing master counter and 920 * until we are done reinitializing master counter and
@@ -970,16 +925,16 @@ int drm_vblank_get(struct drm_device *dev, int crtc)
970 DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n", 925 DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n",
971 crtc, ret); 926 crtc, ret);
972 if (ret) 927 if (ret)
973 atomic_dec(&dev->vblank_refcount[crtc]); 928 atomic_dec(&dev->vblank[crtc].refcount);
974 else { 929 else {
975 dev->vblank_enabled[crtc] = 1; 930 dev->vblank[crtc].enabled = true;
976 drm_update_vblank_count(dev, crtc); 931 drm_update_vblank_count(dev, crtc);
977 } 932 }
978 } 933 }
979 spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags2); 934 spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags2);
980 } else { 935 } else {
981 if (!dev->vblank_enabled[crtc]) { 936 if (!dev->vblank[crtc].enabled) {
982 atomic_dec(&dev->vblank_refcount[crtc]); 937 atomic_dec(&dev->vblank[crtc].refcount);
983 ret = -EINVAL; 938 ret = -EINVAL;
984 } 939 }
985 } 940 }
@@ -999,10 +954,10 @@ EXPORT_SYMBOL(drm_vblank_get);
999 */ 954 */
1000void drm_vblank_put(struct drm_device *dev, int crtc) 955void drm_vblank_put(struct drm_device *dev, int crtc)
1001{ 956{
1002 BUG_ON(atomic_read(&dev->vblank_refcount[crtc]) == 0); 957 BUG_ON(atomic_read(&dev->vblank[crtc].refcount) == 0);
1003 958
1004 /* Last user schedules interrupt disable */ 959 /* Last user schedules interrupt disable */
1005 if (atomic_dec_and_test(&dev->vblank_refcount[crtc]) && 960 if (atomic_dec_and_test(&dev->vblank[crtc].refcount) &&
1006 (drm_vblank_offdelay > 0)) 961 (drm_vblank_offdelay > 0))
1007 mod_timer(&dev->vblank_disable_timer, 962 mod_timer(&dev->vblank_disable_timer,
1008 jiffies + ((drm_vblank_offdelay * DRM_HZ)/1000)); 963 jiffies + ((drm_vblank_offdelay * DRM_HZ)/1000));
@@ -1025,7 +980,7 @@ void drm_vblank_off(struct drm_device *dev, int crtc)
1025 980
1026 spin_lock_irqsave(&dev->vbl_lock, irqflags); 981 spin_lock_irqsave(&dev->vbl_lock, irqflags);
1027 vblank_disable_and_save(dev, crtc); 982 vblank_disable_and_save(dev, crtc);
1028 DRM_WAKEUP(&dev->vbl_queue[crtc]); 983 DRM_WAKEUP(&dev->vblank[crtc].queue);
1029 984
1030 /* Send any queued vblank events, lest the natives grow disquiet */ 985 /* Send any queued vblank events, lest the natives grow disquiet */
1031 seq = drm_vblank_count_and_time(dev, crtc, &now); 986 seq = drm_vblank_count_and_time(dev, crtc, &now);
@@ -1067,10 +1022,10 @@ void drm_vblank_pre_modeset(struct drm_device *dev, int crtc)
1067 * to avoid corrupting the count if multiple, mismatch calls occur), 1022 * to avoid corrupting the count if multiple, mismatch calls occur),
1068 * so that interrupts remain enabled in the interim. 1023 * so that interrupts remain enabled in the interim.
1069 */ 1024 */
1070 if (!dev->vblank_inmodeset[crtc]) { 1025 if (!dev->vblank[crtc].inmodeset) {
1071 dev->vblank_inmodeset[crtc] = 0x1; 1026 dev->vblank[crtc].inmodeset = 0x1;
1072 if (drm_vblank_get(dev, crtc) == 0) 1027 if (drm_vblank_get(dev, crtc) == 0)
1073 dev->vblank_inmodeset[crtc] |= 0x2; 1028 dev->vblank[crtc].inmodeset |= 0x2;
1074 } 1029 }
1075} 1030}
1076EXPORT_SYMBOL(drm_vblank_pre_modeset); 1031EXPORT_SYMBOL(drm_vblank_pre_modeset);
@@ -1083,15 +1038,15 @@ void drm_vblank_post_modeset(struct drm_device *dev, int crtc)
1083 if (!dev->num_crtcs) 1038 if (!dev->num_crtcs)
1084 return; 1039 return;
1085 1040
1086 if (dev->vblank_inmodeset[crtc]) { 1041 if (dev->vblank[crtc].inmodeset) {
1087 spin_lock_irqsave(&dev->vbl_lock, irqflags); 1042 spin_lock_irqsave(&dev->vbl_lock, irqflags);
1088 dev->vblank_disable_allowed = 1; 1043 dev->vblank_disable_allowed = true;
1089 spin_unlock_irqrestore(&dev->vbl_lock, irqflags); 1044 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
1090 1045
1091 if (dev->vblank_inmodeset[crtc] & 0x2) 1046 if (dev->vblank[crtc].inmodeset & 0x2)
1092 drm_vblank_put(dev, crtc); 1047 drm_vblank_put(dev, crtc);
1093 1048
1094 dev->vblank_inmodeset[crtc] = 0; 1049 dev->vblank[crtc].inmodeset = 0;
1095 } 1050 }
1096} 1051}
1097EXPORT_SYMBOL(drm_vblank_post_modeset); 1052EXPORT_SYMBOL(drm_vblank_post_modeset);
@@ -1288,8 +1243,8 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
1288 1243
1289 DRM_DEBUG("waiting on vblank count %d, crtc %d\n", 1244 DRM_DEBUG("waiting on vblank count %d, crtc %d\n",
1290 vblwait->request.sequence, crtc); 1245 vblwait->request.sequence, crtc);
1291 dev->last_vblank_wait[crtc] = vblwait->request.sequence; 1246 dev->vblank[crtc].last_wait = vblwait->request.sequence;
1292 DRM_WAIT_ON(ret, dev->vbl_queue[crtc], 3 * DRM_HZ, 1247 DRM_WAIT_ON(ret, dev->vblank[crtc].queue, 3 * DRM_HZ,
1293 (((drm_vblank_count(dev, crtc) - 1248 (((drm_vblank_count(dev, crtc) -
1294 vblwait->request.sequence) <= (1 << 23)) || 1249 vblwait->request.sequence) <= (1 << 23)) ||
1295 !dev->irq_enabled)); 1250 !dev->irq_enabled));
@@ -1367,7 +1322,7 @@ bool drm_handle_vblank(struct drm_device *dev, int crtc)
1367 spin_lock_irqsave(&dev->vblank_time_lock, irqflags); 1322 spin_lock_irqsave(&dev->vblank_time_lock, irqflags);
1368 1323
1369 /* Vblank irq handling disabled. Nothing to do. */ 1324 /* Vblank irq handling disabled. Nothing to do. */
1370 if (!dev->vblank_enabled[crtc]) { 1325 if (!dev->vblank[crtc].enabled) {
1371 spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags); 1326 spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
1372 return false; 1327 return false;
1373 } 1328 }
@@ -1377,7 +1332,7 @@ bool drm_handle_vblank(struct drm_device *dev, int crtc)
1377 */ 1332 */
1378 1333
1379 /* Get current timestamp and count. */ 1334 /* Get current timestamp and count. */
1380 vblcount = atomic_read(&dev->_vblank_count[crtc]); 1335 vblcount = atomic_read(&dev->vblank[crtc].count);
1381 drm_get_last_vbltimestamp(dev, crtc, &tvblank, DRM_CALLED_FROM_VBLIRQ); 1336 drm_get_last_vbltimestamp(dev, crtc, &tvblank, DRM_CALLED_FROM_VBLIRQ);
1382 1337
1383 /* Compute time difference to timestamp of last vblank */ 1338 /* Compute time difference to timestamp of last vblank */
@@ -1401,14 +1356,14 @@ bool drm_handle_vblank(struct drm_device *dev, int crtc)
1401 * the timestamp computed above. 1356 * the timestamp computed above.
1402 */ 1357 */
1403 smp_mb__before_atomic_inc(); 1358 smp_mb__before_atomic_inc();
1404 atomic_inc(&dev->_vblank_count[crtc]); 1359 atomic_inc(&dev->vblank[crtc].count);
1405 smp_mb__after_atomic_inc(); 1360 smp_mb__after_atomic_inc();
1406 } else { 1361 } else {
1407 DRM_DEBUG("crtc %d: Redundant vblirq ignored. diff_ns = %d\n", 1362 DRM_DEBUG("crtc %d: Redundant vblirq ignored. diff_ns = %d\n",
1408 crtc, (int) diff_ns); 1363 crtc, (int) diff_ns);
1409 } 1364 }
1410 1365
1411 DRM_WAKEUP(&dev->vbl_queue[crtc]); 1366 DRM_WAKEUP(&dev->vblank[crtc].queue);
1412 drm_handle_vblank_events(dev, crtc); 1367 drm_handle_vblank_events(dev, crtc);
1413 1368
1414 spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags); 1369 spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
index d752c96d6090..f6452682141b 100644
--- a/drivers/gpu/drm/drm_lock.c
+++ b/drivers/gpu/drm/drm_lock.c
@@ -86,7 +86,6 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
86 if (drm_lock_take(&master->lock, lock->context)) { 86 if (drm_lock_take(&master->lock, lock->context)) {
87 master->lock.file_priv = file_priv; 87 master->lock.file_priv = file_priv;
88 master->lock.lock_time = jiffies; 88 master->lock.lock_time = jiffies;
89 atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
90 break; /* Got lock */ 89 break; /* Got lock */
91 } 90 }
92 91
@@ -157,8 +156,6 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
157 return -EINVAL; 156 return -EINVAL;
158 } 157 }
159 158
160 atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
161
162 if (drm_lock_free(&master->lock, lock->context)) { 159 if (drm_lock_free(&master->lock, lock->context)) {
163 /* FIXME: Should really bail out here. */ 160 /* FIXME: Should really bail out here. */
164 } 161 }
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index fc2adb62b757..85071a1c4547 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -707,18 +707,25 @@ EXPORT_SYMBOL(drm_mode_vrefresh);
707/** 707/**
708 * drm_mode_set_crtcinfo - set CRTC modesetting parameters 708 * drm_mode_set_crtcinfo - set CRTC modesetting parameters
709 * @p: mode 709 * @p: mode
710 * @adjust_flags: unused? (FIXME) 710 * @adjust_flags: a combination of adjustment flags
711 * 711 *
712 * LOCKING: 712 * LOCKING:
713 * None. 713 * None.
714 * 714 *
715 * Setup the CRTC modesetting parameters for @p, adjusting if necessary. 715 * Setup the CRTC modesetting parameters for @p, adjusting if necessary.
716 *
717 * - The CRTC_INTERLACE_HALVE_V flag can be used to halve vertical timings of
718 * interlaced modes.
719 * - The CRTC_STEREO_DOUBLE flag can be used to compute the timings for
720 * buffers containing two eyes (only adjust the timings when needed, eg. for
721 * "frame packing" or "side by side full").
716 */ 722 */
717void drm_mode_set_crtcinfo(struct drm_display_mode *p, int adjust_flags) 723void drm_mode_set_crtcinfo(struct drm_display_mode *p, int adjust_flags)
718{ 724{
719 if ((p == NULL) || ((p->type & DRM_MODE_TYPE_CRTC_C) == DRM_MODE_TYPE_BUILTIN)) 725 if ((p == NULL) || ((p->type & DRM_MODE_TYPE_CRTC_C) == DRM_MODE_TYPE_BUILTIN))
720 return; 726 return;
721 727
728 p->crtc_clock = p->clock;
722 p->crtc_hdisplay = p->hdisplay; 729 p->crtc_hdisplay = p->hdisplay;
723 p->crtc_hsync_start = p->hsync_start; 730 p->crtc_hsync_start = p->hsync_start;
724 p->crtc_hsync_end = p->hsync_end; 731 p->crtc_hsync_end = p->hsync_end;
@@ -752,6 +759,20 @@ void drm_mode_set_crtcinfo(struct drm_display_mode *p, int adjust_flags)
752 p->crtc_vtotal *= p->vscan; 759 p->crtc_vtotal *= p->vscan;
753 } 760 }
754 761
762 if (adjust_flags & CRTC_STEREO_DOUBLE) {
763 unsigned int layout = p->flags & DRM_MODE_FLAG_3D_MASK;
764
765 switch (layout) {
766 case DRM_MODE_FLAG_3D_FRAME_PACKING:
767 p->crtc_clock *= 2;
768 p->crtc_vdisplay += p->crtc_vtotal;
769 p->crtc_vsync_start += p->crtc_vtotal;
770 p->crtc_vsync_end += p->crtc_vtotal;
771 p->crtc_vtotal += p->crtc_vtotal;
772 break;
773 }
774 }
775
755 p->crtc_vblank_start = min(p->crtc_vsync_start, p->crtc_vdisplay); 776 p->crtc_vblank_start = min(p->crtc_vsync_start, p->crtc_vdisplay);
756 p->crtc_vblank_end = max(p->crtc_vsync_end, p->crtc_vtotal); 777 p->crtc_vblank_end = max(p->crtc_vsync_end, p->crtc_vtotal);
757 p->crtc_hblank_start = min(p->crtc_hsync_start, p->crtc_hdisplay); 778 p->crtc_hblank_start = min(p->crtc_hsync_start, p->crtc_hdisplay);
@@ -830,12 +851,16 @@ bool drm_mode_equal(const struct drm_display_mode *mode1, const struct drm_displ
830 } else if (mode1->clock != mode2->clock) 851 } else if (mode1->clock != mode2->clock)
831 return false; 852 return false;
832 853
833 return drm_mode_equal_no_clocks(mode1, mode2); 854 if ((mode1->flags & DRM_MODE_FLAG_3D_MASK) !=
855 (mode2->flags & DRM_MODE_FLAG_3D_MASK))
856 return false;
857
858 return drm_mode_equal_no_clocks_no_stereo(mode1, mode2);
834} 859}
835EXPORT_SYMBOL(drm_mode_equal); 860EXPORT_SYMBOL(drm_mode_equal);
836 861
837/** 862/**
838 * drm_mode_equal_no_clocks - test modes for equality 863 * drm_mode_equal_no_clocks_no_stereo - test modes for equality
839 * @mode1: first mode 864 * @mode1: first mode
840 * @mode2: second mode 865 * @mode2: second mode
841 * 866 *
@@ -843,12 +868,13 @@ EXPORT_SYMBOL(drm_mode_equal);
843 * None. 868 * None.
844 * 869 *
845 * Check to see if @mode1 and @mode2 are equivalent, but 870 * Check to see if @mode1 and @mode2 are equivalent, but
846 * don't check the pixel clocks. 871 * don't check the pixel clocks nor the stereo layout.
847 * 872 *
848 * RETURNS: 873 * RETURNS:
849 * True if the modes are equal, false otherwise. 874 * True if the modes are equal, false otherwise.
850 */ 875 */
851bool drm_mode_equal_no_clocks(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2) 876bool drm_mode_equal_no_clocks_no_stereo(const struct drm_display_mode *mode1,
877 const struct drm_display_mode *mode2)
852{ 878{
853 if (mode1->hdisplay == mode2->hdisplay && 879 if (mode1->hdisplay == mode2->hdisplay &&
854 mode1->hsync_start == mode2->hsync_start && 880 mode1->hsync_start == mode2->hsync_start &&
@@ -860,12 +886,13 @@ bool drm_mode_equal_no_clocks(const struct drm_display_mode *mode1, const struct
860 mode1->vsync_end == mode2->vsync_end && 886 mode1->vsync_end == mode2->vsync_end &&
861 mode1->vtotal == mode2->vtotal && 887 mode1->vtotal == mode2->vtotal &&
862 mode1->vscan == mode2->vscan && 888 mode1->vscan == mode2->vscan &&
863 mode1->flags == mode2->flags) 889 (mode1->flags & ~DRM_MODE_FLAG_3D_MASK) ==
890 (mode2->flags & ~DRM_MODE_FLAG_3D_MASK))
864 return true; 891 return true;
865 892
866 return false; 893 return false;
867} 894}
868EXPORT_SYMBOL(drm_mode_equal_no_clocks); 895EXPORT_SYMBOL(drm_mode_equal_no_clocks_no_stereo);
869 896
870/** 897/**
871 * drm_mode_validate_size - make sure modes adhere to size constraints 898 * drm_mode_validate_size - make sure modes adhere to size constraints
@@ -1014,7 +1041,7 @@ void drm_mode_connector_list_update(struct drm_connector *connector)
1014 /* if equal delete the probed mode */ 1041 /* if equal delete the probed mode */
1015 mode->status = pmode->status; 1042 mode->status = pmode->status;
1016 /* Merge type bits together */ 1043 /* Merge type bits together */
1017 mode->type |= pmode->type; 1044 mode->type = pmode->type;
1018 list_del(&pmode->head); 1045 list_del(&pmode->head);
1019 drm_mode_destroy(connector->dev, pmode); 1046 drm_mode_destroy(connector->dev, pmode);
1020 break; 1047 break;
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index 1f96cee6eee8..02679793c9e2 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -80,7 +80,7 @@ drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t ali
80 /* Reserve */ 80 /* Reserve */
81 for (addr = (unsigned long)dmah->vaddr, sz = size; 81 for (addr = (unsigned long)dmah->vaddr, sz = size;
82 sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) { 82 sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
83 SetPageReserved(virt_to_page(addr)); 83 SetPageReserved(virt_to_page((void *)addr));
84 } 84 }
85 85
86 return dmah; 86 return dmah;
@@ -103,7 +103,7 @@ void __drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
103 /* Unreserve */ 103 /* Unreserve */
104 for (addr = (unsigned long)dmah->vaddr, sz = dmah->size; 104 for (addr = (unsigned long)dmah->vaddr, sz = dmah->size;
105 sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) { 105 sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
106 ClearPageReserved(virt_to_page(addr)); 106 ClearPageReserved(virt_to_page((void *)addr));
107 } 107 }
108 dma_free_coherent(&dev->pdev->dev, dmah->size, dmah->vaddr, 108 dma_free_coherent(&dev->pdev->dev, dmah->size, dmah->vaddr,
109 dmah->busaddr); 109 dmah->busaddr);
@@ -322,83 +322,36 @@ int drm_get_pci_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
322 322
323 DRM_DEBUG("\n"); 323 DRM_DEBUG("\n");
324 324
325 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 325 dev = drm_dev_alloc(driver, &pdev->dev);
326 if (!dev) 326 if (!dev)
327 return -ENOMEM; 327 return -ENOMEM;
328 328
329 ret = pci_enable_device(pdev); 329 ret = pci_enable_device(pdev);
330 if (ret) 330 if (ret)
331 goto err_g1; 331 goto err_free;
332 332
333 dev->pdev = pdev; 333 dev->pdev = pdev;
334 dev->dev = &pdev->dev;
335
336 dev->pci_device = pdev->device;
337 dev->pci_vendor = pdev->vendor;
338
339#ifdef __alpha__ 334#ifdef __alpha__
340 dev->hose = pdev->sysdata; 335 dev->hose = pdev->sysdata;
341#endif 336#endif
342 337
343 mutex_lock(&drm_global_mutex); 338 if (drm_core_check_feature(dev, DRIVER_MODESET))
344
345 if ((ret = drm_fill_in_dev(dev, ent, driver))) {
346 printk(KERN_ERR "DRM: Fill_in_dev failed.\n");
347 goto err_g2;
348 }
349
350 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
351 pci_set_drvdata(pdev, dev); 339 pci_set_drvdata(pdev, dev);
352 ret = drm_get_minor(dev, &dev->control, DRM_MINOR_CONTROL);
353 if (ret)
354 goto err_g2;
355 }
356
357 if (drm_core_check_feature(dev, DRIVER_RENDER) && drm_rnodes) {
358 ret = drm_get_minor(dev, &dev->render, DRM_MINOR_RENDER);
359 if (ret)
360 goto err_g21;
361 }
362
363 if ((ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY)))
364 goto err_g3;
365
366 if (dev->driver->load) {
367 ret = dev->driver->load(dev, ent->driver_data);
368 if (ret)
369 goto err_g4;
370 }
371 340
372 /* setup the grouping for the legacy output */ 341 ret = drm_dev_register(dev, ent->driver_data);
373 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 342 if (ret)
374 ret = drm_mode_group_init_legacy_group(dev, 343 goto err_pci;
375 &dev->primary->mode_group);
376 if (ret)
377 goto err_g4;
378 }
379
380 list_add_tail(&dev->driver_item, &driver->device_list);
381 344
382 DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n", 345 DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n",
383 driver->name, driver->major, driver->minor, driver->patchlevel, 346 driver->name, driver->major, driver->minor, driver->patchlevel,
384 driver->date, pci_name(pdev), dev->primary->index); 347 driver->date, pci_name(pdev), dev->primary->index);
385 348
386 mutex_unlock(&drm_global_mutex);
387 return 0; 349 return 0;
388 350
389err_g4: 351err_pci:
390 drm_put_minor(&dev->primary);
391err_g3:
392 if (dev->render)
393 drm_put_minor(&dev->render);
394err_g21:
395 if (drm_core_check_feature(dev, DRIVER_MODESET))
396 drm_put_minor(&dev->control);
397err_g2:
398 pci_disable_device(pdev); 352 pci_disable_device(pdev);
399err_g1: 353err_free:
400 kfree(dev); 354 drm_dev_free(dev);
401 mutex_unlock(&drm_global_mutex);
402 return ret; 355 return ret;
403} 356}
404EXPORT_SYMBOL(drm_get_pci_dev); 357EXPORT_SYMBOL(drm_get_pci_dev);
diff --git a/drivers/gpu/drm/drm_platform.c b/drivers/gpu/drm/drm_platform.c
index f7a18c6ba4c4..fc24fee8ec83 100644
--- a/drivers/gpu/drm/drm_platform.c
+++ b/drivers/gpu/drm/drm_platform.c
@@ -47,55 +47,15 @@ static int drm_get_platform_dev(struct platform_device *platdev,
47 47
48 DRM_DEBUG("\n"); 48 DRM_DEBUG("\n");
49 49
50 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 50 dev = drm_dev_alloc(driver, &platdev->dev);
51 if (!dev) 51 if (!dev)
52 return -ENOMEM; 52 return -ENOMEM;
53 53
54 dev->platformdev = platdev; 54 dev->platformdev = platdev;
55 dev->dev = &platdev->dev;
56 55
57 mutex_lock(&drm_global_mutex); 56 ret = drm_dev_register(dev, 0);
58
59 ret = drm_fill_in_dev(dev, NULL, driver);
60
61 if (ret) {
62 printk(KERN_ERR "DRM: Fill_in_dev failed.\n");
63 goto err_g1;
64 }
65
66 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
67 ret = drm_get_minor(dev, &dev->control, DRM_MINOR_CONTROL);
68 if (ret)
69 goto err_g1;
70 }
71
72 if (drm_core_check_feature(dev, DRIVER_RENDER) && drm_rnodes) {
73 ret = drm_get_minor(dev, &dev->render, DRM_MINOR_RENDER);
74 if (ret)
75 goto err_g11;
76 }
77
78 ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY);
79 if (ret) 57 if (ret)
80 goto err_g2; 58 goto err_free;
81
82 if (dev->driver->load) {
83 ret = dev->driver->load(dev, 0);
84 if (ret)
85 goto err_g3;
86 }
87
88 /* setup the grouping for the legacy output */
89 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
90 ret = drm_mode_group_init_legacy_group(dev,
91 &dev->primary->mode_group);
92 if (ret)
93 goto err_g3;
94 }
95
96 list_add_tail(&dev->driver_item, &driver->device_list);
97
98 mutex_unlock(&drm_global_mutex);
99 59
100 DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n", 60 DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
101 driver->name, driver->major, driver->minor, driver->patchlevel, 61 driver->name, driver->major, driver->minor, driver->patchlevel,
@@ -103,17 +63,8 @@ static int drm_get_platform_dev(struct platform_device *platdev,
103 63
104 return 0; 64 return 0;
105 65
106err_g3: 66err_free:
107 drm_put_minor(&dev->primary); 67 drm_dev_free(dev);
108err_g2:
109 if (dev->render)
110 drm_put_minor(&dev->render);
111err_g11:
112 if (drm_core_check_feature(dev, DRIVER_MODESET))
113 drm_put_minor(&dev->control);
114err_g1:
115 kfree(dev);
116 mutex_unlock(&drm_global_mutex);
117 return ret; 68 return ret;
118} 69}
119 70
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 276d470f7b3e..56805c39c906 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -637,14 +637,13 @@ int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
637 unsigned count; 637 unsigned count;
638 struct scatterlist *sg; 638 struct scatterlist *sg;
639 struct page *page; 639 struct page *page;
640 u32 len, offset; 640 u32 len;
641 int pg_index; 641 int pg_index;
642 dma_addr_t addr; 642 dma_addr_t addr;
643 643
644 pg_index = 0; 644 pg_index = 0;
645 for_each_sg(sgt->sgl, sg, sgt->nents, count) { 645 for_each_sg(sgt->sgl, sg, sgt->nents, count) {
646 len = sg->length; 646 len = sg->length;
647 offset = sg->offset;
648 page = sg_page(sg); 647 page = sg_page(sg);
649 addr = sg_dma_address(sg); 648 addr = sg_dma_address(sg);
650 649
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index 39d864576be4..c200136a5d8e 100644
--- a/drivers/gpu/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
@@ -254,81 +254,21 @@ int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
254 return 0; 254 return 0;
255} 255}
256 256
257int drm_fill_in_dev(struct drm_device *dev,
258 const struct pci_device_id *ent,
259 struct drm_driver *driver)
260{
261 int retcode;
262
263 INIT_LIST_HEAD(&dev->filelist);
264 INIT_LIST_HEAD(&dev->ctxlist);
265 INIT_LIST_HEAD(&dev->vmalist);
266 INIT_LIST_HEAD(&dev->maplist);
267 INIT_LIST_HEAD(&dev->vblank_event_list);
268
269 spin_lock_init(&dev->count_lock);
270 spin_lock_init(&dev->event_lock);
271 mutex_init(&dev->struct_mutex);
272 mutex_init(&dev->ctxlist_mutex);
273
274 if (drm_ht_create(&dev->map_hash, 12)) {
275 return -ENOMEM;
276 }
277
278 /* the DRM has 6 basic counters */
279 dev->counters = 6;
280 dev->types[0] = _DRM_STAT_LOCK;
281 dev->types[1] = _DRM_STAT_OPENS;
282 dev->types[2] = _DRM_STAT_CLOSES;
283 dev->types[3] = _DRM_STAT_IOCTLS;
284 dev->types[4] = _DRM_STAT_LOCKS;
285 dev->types[5] = _DRM_STAT_UNLOCKS;
286
287 dev->driver = driver;
288
289 if (dev->driver->bus->agp_init) {
290 retcode = dev->driver->bus->agp_init(dev);
291 if (retcode)
292 goto error_out_unreg;
293 }
294
295
296
297 retcode = drm_ctxbitmap_init(dev);
298 if (retcode) {
299 DRM_ERROR("Cannot allocate memory for context bitmap.\n");
300 goto error_out_unreg;
301 }
302
303 if (driver->driver_features & DRIVER_GEM) {
304 retcode = drm_gem_init(dev);
305 if (retcode) {
306 DRM_ERROR("Cannot initialize graphics execution "
307 "manager (GEM)\n");
308 goto error_out_unreg;
309 }
310 }
311
312 return 0;
313
314 error_out_unreg:
315 drm_lastclose(dev);
316 return retcode;
317}
318EXPORT_SYMBOL(drm_fill_in_dev);
319
320
321/** 257/**
322 * Get a secondary minor number. 258 * drm_get_minor - Allocate and register new DRM minor
259 * @dev: DRM device
260 * @minor: Pointer to where new minor is stored
261 * @type: Type of minor
323 * 262 *
324 * \param dev device data structure 263 * Allocate a new minor of the given type and register it. A pointer to the new
325 * \param sec-minor structure to hold the assigned minor 264 * minor is returned in @minor.
326 * \return negative number on failure. 265 * Caller must hold the global DRM mutex.
327 * 266 *
328 * Search an empty entry and initialize it to the given parameters. This 267 * RETURNS:
329 * routines assigns minor numbers to secondary heads of multi-headed cards 268 * 0 on success, negative error code on failure.
330 */ 269 */
331int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int type) 270static int drm_get_minor(struct drm_device *dev, struct drm_minor **minor,
271 int type)
332{ 272{
333 struct drm_minor *new_minor; 273 struct drm_minor *new_minor;
334 int ret; 274 int ret;
@@ -385,37 +325,48 @@ err_idr:
385 *minor = NULL; 325 *minor = NULL;
386 return ret; 326 return ret;
387} 327}
388EXPORT_SYMBOL(drm_get_minor);
389 328
390/** 329/**
391 * Put a secondary minor number. 330 * drm_unplug_minor - Unplug DRM minor
331 * @minor: Minor to unplug
392 * 332 *
393 * \param sec_minor - structure to be released 333 * Unplugs the given DRM minor but keeps the object. So after this returns,
394 * \return always zero 334 * minor->dev is still valid so existing open-files can still access it to get
335 * device information from their drm_file ojects.
336 * If the minor is already unplugged or if @minor is NULL, nothing is done.
337 * The global DRM mutex must be held by the caller.
395 */ 338 */
396int drm_put_minor(struct drm_minor **minor_p) 339static void drm_unplug_minor(struct drm_minor *minor)
397{ 340{
398 struct drm_minor *minor = *minor_p; 341 if (!minor || !device_is_registered(minor->kdev))
399 342 return;
400 DRM_DEBUG("release secondary minor %d\n", minor->index);
401 343
402#if defined(CONFIG_DEBUG_FS) 344#if defined(CONFIG_DEBUG_FS)
403 drm_debugfs_cleanup(minor); 345 drm_debugfs_cleanup(minor);
404#endif 346#endif
405 347
406 drm_sysfs_device_remove(minor); 348 drm_sysfs_device_remove(minor);
407
408 idr_remove(&drm_minors_idr, minor->index); 349 idr_remove(&drm_minors_idr, minor->index);
409
410 kfree(minor);
411 *minor_p = NULL;
412 return 0;
413} 350}
414EXPORT_SYMBOL(drm_put_minor);
415 351
416static void drm_unplug_minor(struct drm_minor *minor) 352/**
353 * drm_put_minor - Destroy DRM minor
354 * @minor: Minor to destroy
355 *
356 * This calls drm_unplug_minor() on the given minor and then frees it. Nothing
357 * is done if @minor is NULL. It is fine to call this on already unplugged
358 * minors.
359 * The global DRM mutex must be held by the caller.
360 */
361static void drm_put_minor(struct drm_minor *minor)
417{ 362{
418 drm_sysfs_device_remove(minor); 363 if (!minor)
364 return;
365
366 DRM_DEBUG("release secondary minor %d\n", minor->index);
367
368 drm_unplug_minor(minor);
369 kfree(minor);
419} 370}
420 371
421/** 372/**
@@ -427,66 +378,237 @@ static void drm_unplug_minor(struct drm_minor *minor)
427 */ 378 */
428void drm_put_dev(struct drm_device *dev) 379void drm_put_dev(struct drm_device *dev)
429{ 380{
430 struct drm_driver *driver;
431 struct drm_map_list *r_list, *list_temp;
432
433 DRM_DEBUG("\n"); 381 DRM_DEBUG("\n");
434 382
435 if (!dev) { 383 if (!dev) {
436 DRM_ERROR("cleanup called no dev\n"); 384 DRM_ERROR("cleanup called no dev\n");
437 return; 385 return;
438 } 386 }
439 driver = dev->driver;
440 387
441 drm_lastclose(dev); 388 drm_dev_unregister(dev);
389 drm_dev_free(dev);
390}
391EXPORT_SYMBOL(drm_put_dev);
442 392
443 if (dev->driver->unload) 393void drm_unplug_dev(struct drm_device *dev)
444 dev->driver->unload(dev); 394{
395 /* for a USB device */
396 if (drm_core_check_feature(dev, DRIVER_MODESET))
397 drm_unplug_minor(dev->control);
398 if (dev->render)
399 drm_unplug_minor(dev->render);
400 drm_unplug_minor(dev->primary);
445 401
446 if (dev->driver->bus->agp_destroy) 402 mutex_lock(&drm_global_mutex);
447 dev->driver->bus->agp_destroy(dev);
448 403
449 drm_vblank_cleanup(dev); 404 drm_device_set_unplugged(dev);
450 405
451 list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) 406 if (dev->open_count == 0) {
452 drm_rmmap(dev, r_list->map); 407 drm_put_dev(dev);
453 drm_ht_remove(&dev->map_hash); 408 }
409 mutex_unlock(&drm_global_mutex);
410}
411EXPORT_SYMBOL(drm_unplug_dev);
454 412
455 drm_ctxbitmap_cleanup(dev); 413/**
414 * drm_dev_alloc - Allocate new drm device
415 * @driver: DRM driver to allocate device for
416 * @parent: Parent device object
417 *
418 * Allocate and initialize a new DRM device. No device registration is done.
419 * Call drm_dev_register() to advertice the device to user space and register it
420 * with other core subsystems.
421 *
422 * RETURNS:
423 * Pointer to new DRM device, or NULL if out of memory.
424 */
425struct drm_device *drm_dev_alloc(struct drm_driver *driver,
426 struct device *parent)
427{
428 struct drm_device *dev;
429 int ret;
456 430
457 if (drm_core_check_feature(dev, DRIVER_MODESET)) 431 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
458 drm_put_minor(&dev->control); 432 if (!dev)
433 return NULL;
459 434
460 if (dev->render) 435 dev->dev = parent;
461 drm_put_minor(&dev->render); 436 dev->driver = driver;
462 437
463 if (driver->driver_features & DRIVER_GEM) 438 INIT_LIST_HEAD(&dev->filelist);
439 INIT_LIST_HEAD(&dev->ctxlist);
440 INIT_LIST_HEAD(&dev->vmalist);
441 INIT_LIST_HEAD(&dev->maplist);
442 INIT_LIST_HEAD(&dev->vblank_event_list);
443
444 spin_lock_init(&dev->count_lock);
445 spin_lock_init(&dev->event_lock);
446 mutex_init(&dev->struct_mutex);
447 mutex_init(&dev->ctxlist_mutex);
448
449 if (drm_ht_create(&dev->map_hash, 12))
450 goto err_free;
451
452 ret = drm_ctxbitmap_init(dev);
453 if (ret) {
454 DRM_ERROR("Cannot allocate memory for context bitmap.\n");
455 goto err_ht;
456 }
457
458 if (driver->driver_features & DRIVER_GEM) {
459 ret = drm_gem_init(dev);
460 if (ret) {
461 DRM_ERROR("Cannot initialize graphics execution manager (GEM)\n");
462 goto err_ctxbitmap;
463 }
464 }
465
466 return dev;
467
468err_ctxbitmap:
469 drm_ctxbitmap_cleanup(dev);
470err_ht:
471 drm_ht_remove(&dev->map_hash);
472err_free:
473 kfree(dev);
474 return NULL;
475}
476EXPORT_SYMBOL(drm_dev_alloc);
477
478/**
479 * drm_dev_free - Free DRM device
480 * @dev: DRM device to free
481 *
482 * Free a DRM device that has previously been allocated via drm_dev_alloc().
483 * You must not use kfree() instead or you will leak memory.
484 *
485 * This must not be called once the device got registered. Use drm_put_dev()
486 * instead, which then calls drm_dev_free().
487 */
488void drm_dev_free(struct drm_device *dev)
489{
490 drm_put_minor(dev->control);
491 drm_put_minor(dev->render);
492 drm_put_minor(dev->primary);
493
494 if (dev->driver->driver_features & DRIVER_GEM)
464 drm_gem_destroy(dev); 495 drm_gem_destroy(dev);
465 496
466 drm_put_minor(&dev->primary); 497 drm_ctxbitmap_cleanup(dev);
498 drm_ht_remove(&dev->map_hash);
467 499
468 list_del(&dev->driver_item);
469 kfree(dev->devname); 500 kfree(dev->devname);
470 kfree(dev); 501 kfree(dev);
471} 502}
472EXPORT_SYMBOL(drm_put_dev); 503EXPORT_SYMBOL(drm_dev_free);
473 504
474void drm_unplug_dev(struct drm_device *dev) 505/**
506 * drm_dev_register - Register DRM device
507 * @dev: Device to register
508 *
509 * Register the DRM device @dev with the system, advertise device to user-space
510 * and start normal device operation. @dev must be allocated via drm_dev_alloc()
511 * previously.
512 *
513 * Never call this twice on any device!
514 *
515 * RETURNS:
516 * 0 on success, negative error code on failure.
517 */
518int drm_dev_register(struct drm_device *dev, unsigned long flags)
475{ 519{
476 /* for a USB device */ 520 int ret;
477 if (drm_core_check_feature(dev, DRIVER_MODESET))
478 drm_unplug_minor(dev->control);
479 if (dev->render)
480 drm_unplug_minor(dev->render);
481 drm_unplug_minor(dev->primary);
482 521
483 mutex_lock(&drm_global_mutex); 522 mutex_lock(&drm_global_mutex);
484 523
485 drm_device_set_unplugged(dev); 524 if (dev->driver->bus->agp_init) {
525 ret = dev->driver->bus->agp_init(dev);
526 if (ret)
527 goto out_unlock;
528 }
486 529
487 if (dev->open_count == 0) { 530 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
488 drm_put_dev(dev); 531 ret = drm_get_minor(dev, &dev->control, DRM_MINOR_CONTROL);
532 if (ret)
533 goto err_agp;
534 }
535
536 if (drm_core_check_feature(dev, DRIVER_RENDER) && drm_rnodes) {
537 ret = drm_get_minor(dev, &dev->render, DRM_MINOR_RENDER);
538 if (ret)
539 goto err_control_node;
540 }
541
542 ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY);
543 if (ret)
544 goto err_render_node;
545
546 if (dev->driver->load) {
547 ret = dev->driver->load(dev, flags);
548 if (ret)
549 goto err_primary_node;
489 } 550 }
551
552 /* setup grouping for legacy outputs */
553 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
554 ret = drm_mode_group_init_legacy_group(dev,
555 &dev->primary->mode_group);
556 if (ret)
557 goto err_unload;
558 }
559
560 list_add_tail(&dev->driver_item, &dev->driver->device_list);
561
562 ret = 0;
563 goto out_unlock;
564
565err_unload:
566 if (dev->driver->unload)
567 dev->driver->unload(dev);
568err_primary_node:
569 drm_put_minor(dev->primary);
570err_render_node:
571 drm_put_minor(dev->render);
572err_control_node:
573 drm_put_minor(dev->control);
574err_agp:
575 if (dev->driver->bus->agp_destroy)
576 dev->driver->bus->agp_destroy(dev);
577out_unlock:
490 mutex_unlock(&drm_global_mutex); 578 mutex_unlock(&drm_global_mutex);
579 return ret;
491} 580}
492EXPORT_SYMBOL(drm_unplug_dev); 581EXPORT_SYMBOL(drm_dev_register);
582
583/**
584 * drm_dev_unregister - Unregister DRM device
585 * @dev: Device to unregister
586 *
587 * Unregister the DRM device from the system. This does the reverse of
588 * drm_dev_register() but does not deallocate the device. The caller must call
589 * drm_dev_free() to free all resources.
590 */
591void drm_dev_unregister(struct drm_device *dev)
592{
593 struct drm_map_list *r_list, *list_temp;
594
595 drm_lastclose(dev);
596
597 if (dev->driver->unload)
598 dev->driver->unload(dev);
599
600 if (dev->driver->bus->agp_destroy)
601 dev->driver->bus->agp_destroy(dev);
602
603 drm_vblank_cleanup(dev);
604
605 list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head)
606 drm_rmmap(dev, r_list->map);
607
608 drm_unplug_minor(dev->control);
609 drm_unplug_minor(dev->render);
610 drm_unplug_minor(dev->primary);
611
612 list_del(&dev->driver_item);
613}
614EXPORT_SYMBOL(drm_dev_unregister);
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index 2290b3b73832..1a35ea53106b 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -22,8 +22,8 @@
22#include <drm/drm_core.h> 22#include <drm/drm_core.h>
23#include <drm/drmP.h> 23#include <drm/drmP.h>
24 24
25#define to_drm_minor(d) container_of(d, struct drm_minor, kdev) 25#define to_drm_minor(d) dev_get_drvdata(d)
26#define to_drm_connector(d) container_of(d, struct drm_connector, kdev) 26#define to_drm_connector(d) dev_get_drvdata(d)
27 27
28static struct device_type drm_sysfs_device_minor = { 28static struct device_type drm_sysfs_device_minor = {
29 .name = "drm_minor" 29 .name = "drm_minor"
@@ -162,20 +162,6 @@ void drm_sysfs_destroy(void)
162 drm_class = NULL; 162 drm_class = NULL;
163} 163}
164 164
165/**
166 * drm_sysfs_device_release - do nothing
167 * @dev: Linux device
168 *
169 * Normally, this would free the DRM device associated with @dev, along
170 * with cleaning up any other stuff. But we do that in the DRM core, so
171 * this function can just return and hope that the core does its job.
172 */
173static void drm_sysfs_device_release(struct device *dev)
174{
175 memset(dev, 0, sizeof(struct device));
176 return;
177}
178
179/* 165/*
180 * Connector properties 166 * Connector properties
181 */ 167 */
@@ -380,11 +366,6 @@ static struct bin_attribute edid_attr = {
380 * properties (so far, connection status, dpms, mode list & edid) and 366 * properties (so far, connection status, dpms, mode list & edid) and
381 * generate a hotplug event so userspace knows there's a new connector 367 * generate a hotplug event so userspace knows there's a new connector
382 * available. 368 * available.
383 *
384 * Note:
385 * This routine should only be called *once* for each registered connector.
386 * A second call for an already registered connector will trigger the BUG_ON
387 * below.
388 */ 369 */
389int drm_sysfs_connector_add(struct drm_connector *connector) 370int drm_sysfs_connector_add(struct drm_connector *connector)
390{ 371{
@@ -394,29 +375,25 @@ int drm_sysfs_connector_add(struct drm_connector *connector)
394 int i; 375 int i;
395 int ret; 376 int ret;
396 377
397 /* We shouldn't get called more than once for the same connector */ 378 if (connector->kdev)
398 BUG_ON(device_is_registered(&connector->kdev)); 379 return 0;
399
400 connector->kdev.parent = &dev->primary->kdev;
401 connector->kdev.class = drm_class;
402 connector->kdev.release = drm_sysfs_device_release;
403 380
381 connector->kdev = device_create(drm_class, dev->primary->kdev,
382 0, connector, "card%d-%s",
383 dev->primary->index, drm_get_connector_name(connector));
404 DRM_DEBUG("adding \"%s\" to sysfs\n", 384 DRM_DEBUG("adding \"%s\" to sysfs\n",
405 drm_get_connector_name(connector)); 385 drm_get_connector_name(connector));
406 386
407 dev_set_name(&connector->kdev, "card%d-%s", 387 if (IS_ERR(connector->kdev)) {
408 dev->primary->index, drm_get_connector_name(connector)); 388 DRM_ERROR("failed to register connector device: %ld\n", PTR_ERR(connector->kdev));
409 ret = device_register(&connector->kdev); 389 ret = PTR_ERR(connector->kdev);
410
411 if (ret) {
412 DRM_ERROR("failed to register connector device: %d\n", ret);
413 goto out; 390 goto out;
414 } 391 }
415 392
416 /* Standard attributes */ 393 /* Standard attributes */
417 394
418 for (attr_cnt = 0; attr_cnt < ARRAY_SIZE(connector_attrs); attr_cnt++) { 395 for (attr_cnt = 0; attr_cnt < ARRAY_SIZE(connector_attrs); attr_cnt++) {
419 ret = device_create_file(&connector->kdev, &connector_attrs[attr_cnt]); 396 ret = device_create_file(connector->kdev, &connector_attrs[attr_cnt]);
420 if (ret) 397 if (ret)
421 goto err_out_files; 398 goto err_out_files;
422 } 399 }
@@ -433,7 +410,7 @@ int drm_sysfs_connector_add(struct drm_connector *connector)
433 case DRM_MODE_CONNECTOR_Component: 410 case DRM_MODE_CONNECTOR_Component:
434 case DRM_MODE_CONNECTOR_TV: 411 case DRM_MODE_CONNECTOR_TV:
435 for (opt_cnt = 0; opt_cnt < ARRAY_SIZE(connector_attrs_opt1); opt_cnt++) { 412 for (opt_cnt = 0; opt_cnt < ARRAY_SIZE(connector_attrs_opt1); opt_cnt++) {
436 ret = device_create_file(&connector->kdev, &connector_attrs_opt1[opt_cnt]); 413 ret = device_create_file(connector->kdev, &connector_attrs_opt1[opt_cnt]);
437 if (ret) 414 if (ret)
438 goto err_out_files; 415 goto err_out_files;
439 } 416 }
@@ -442,7 +419,7 @@ int drm_sysfs_connector_add(struct drm_connector *connector)
442 break; 419 break;
443 } 420 }
444 421
445 ret = sysfs_create_bin_file(&connector->kdev.kobj, &edid_attr); 422 ret = sysfs_create_bin_file(&connector->kdev->kobj, &edid_attr);
446 if (ret) 423 if (ret)
447 goto err_out_files; 424 goto err_out_files;
448 425
@@ -453,10 +430,10 @@ int drm_sysfs_connector_add(struct drm_connector *connector)
453 430
454err_out_files: 431err_out_files:
455 for (i = 0; i < opt_cnt; i++) 432 for (i = 0; i < opt_cnt; i++)
456 device_remove_file(&connector->kdev, &connector_attrs_opt1[i]); 433 device_remove_file(connector->kdev, &connector_attrs_opt1[i]);
457 for (i = 0; i < attr_cnt; i++) 434 for (i = 0; i < attr_cnt; i++)
458 device_remove_file(&connector->kdev, &connector_attrs[i]); 435 device_remove_file(connector->kdev, &connector_attrs[i]);
459 device_unregister(&connector->kdev); 436 device_unregister(connector->kdev);
460 437
461out: 438out:
462 return ret; 439 return ret;
@@ -480,16 +457,16 @@ void drm_sysfs_connector_remove(struct drm_connector *connector)
480{ 457{
481 int i; 458 int i;
482 459
483 if (!connector->kdev.parent) 460 if (!connector->kdev)
484 return; 461 return;
485 DRM_DEBUG("removing \"%s\" from sysfs\n", 462 DRM_DEBUG("removing \"%s\" from sysfs\n",
486 drm_get_connector_name(connector)); 463 drm_get_connector_name(connector));
487 464
488 for (i = 0; i < ARRAY_SIZE(connector_attrs); i++) 465 for (i = 0; i < ARRAY_SIZE(connector_attrs); i++)
489 device_remove_file(&connector->kdev, &connector_attrs[i]); 466 device_remove_file(connector->kdev, &connector_attrs[i]);
490 sysfs_remove_bin_file(&connector->kdev.kobj, &edid_attr); 467 sysfs_remove_bin_file(&connector->kdev->kobj, &edid_attr);
491 device_unregister(&connector->kdev); 468 device_unregister(connector->kdev);
492 connector->kdev.parent = NULL; 469 connector->kdev = NULL;
493} 470}
494EXPORT_SYMBOL(drm_sysfs_connector_remove); 471EXPORT_SYMBOL(drm_sysfs_connector_remove);
495 472
@@ -508,7 +485,7 @@ void drm_sysfs_hotplug_event(struct drm_device *dev)
508 485
509 DRM_DEBUG("generating hotplug event\n"); 486 DRM_DEBUG("generating hotplug event\n");
510 487
511 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, envp); 488 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, envp);
512} 489}
513EXPORT_SYMBOL(drm_sysfs_hotplug_event); 490EXPORT_SYMBOL(drm_sysfs_hotplug_event);
514 491
@@ -523,15 +500,8 @@ EXPORT_SYMBOL(drm_sysfs_hotplug_event);
523 */ 500 */
524int drm_sysfs_device_add(struct drm_minor *minor) 501int drm_sysfs_device_add(struct drm_minor *minor)
525{ 502{
526 int err;
527 char *minor_str; 503 char *minor_str;
528 504
529 minor->kdev.parent = minor->dev->dev;
530
531 minor->kdev.class = drm_class;
532 minor->kdev.release = drm_sysfs_device_release;
533 minor->kdev.devt = minor->device;
534 minor->kdev.type = &drm_sysfs_device_minor;
535 if (minor->type == DRM_MINOR_CONTROL) 505 if (minor->type == DRM_MINOR_CONTROL)
536 minor_str = "controlD%d"; 506 minor_str = "controlD%d";
537 else if (minor->type == DRM_MINOR_RENDER) 507 else if (minor->type == DRM_MINOR_RENDER)
@@ -539,18 +509,14 @@ int drm_sysfs_device_add(struct drm_minor *minor)
539 else 509 else
540 minor_str = "card%d"; 510 minor_str = "card%d";
541 511
542 dev_set_name(&minor->kdev, minor_str, minor->index); 512 minor->kdev = device_create(drm_class, minor->dev->dev,
543 513 MKDEV(DRM_MAJOR, minor->index),
544 err = device_register(&minor->kdev); 514 minor, minor_str, minor->index);
545 if (err) { 515 if (IS_ERR(minor->kdev)) {
546 DRM_ERROR("device add failed: %d\n", err); 516 DRM_ERROR("device create failed %ld\n", PTR_ERR(minor->kdev));
547 goto err_out; 517 return PTR_ERR(minor->kdev);
548 } 518 }
549
550 return 0; 519 return 0;
551
552err_out:
553 return err;
554} 520}
555 521
556/** 522/**
@@ -562,9 +528,9 @@ err_out:
562 */ 528 */
563void drm_sysfs_device_remove(struct drm_minor *minor) 529void drm_sysfs_device_remove(struct drm_minor *minor)
564{ 530{
565 if (minor->kdev.parent) 531 if (minor->kdev)
566 device_unregister(&minor->kdev); 532 device_destroy(drm_class, MKDEV(DRM_MAJOR, minor->index));
567 minor->kdev.parent = NULL; 533 minor->kdev = NULL;
568} 534}
569 535
570 536
diff --git a/drivers/gpu/drm/drm_usb.c b/drivers/gpu/drm/drm_usb.c
index 87664723b9ce..b179b70e7853 100644
--- a/drivers/gpu/drm/drm_usb.c
+++ b/drivers/gpu/drm/drm_usb.c
@@ -7,57 +7,20 @@ int drm_get_usb_dev(struct usb_interface *interface,
7 struct drm_driver *driver) 7 struct drm_driver *driver)
8{ 8{
9 struct drm_device *dev; 9 struct drm_device *dev;
10 struct usb_device *usbdev;
11 int ret; 10 int ret;
12 11
13 DRM_DEBUG("\n"); 12 DRM_DEBUG("\n");
14 13
15 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 14 dev = drm_dev_alloc(driver, &interface->dev);
16 if (!dev) 15 if (!dev)
17 return -ENOMEM; 16 return -ENOMEM;
18 17
19 usbdev = interface_to_usbdev(interface); 18 dev->usbdev = interface_to_usbdev(interface);
20 dev->usbdev = usbdev;
21 dev->dev = &interface->dev;
22
23 mutex_lock(&drm_global_mutex);
24
25 ret = drm_fill_in_dev(dev, NULL, driver);
26 if (ret) {
27 printk(KERN_ERR "DRM: Fill_in_dev failed.\n");
28 goto err_g1;
29 }
30
31 usb_set_intfdata(interface, dev); 19 usb_set_intfdata(interface, dev);
32 ret = drm_get_minor(dev, &dev->control, DRM_MINOR_CONTROL);
33 if (ret)
34 goto err_g1;
35
36 if (drm_core_check_feature(dev, DRIVER_RENDER) && drm_rnodes) {
37 ret = drm_get_minor(dev, &dev->render, DRM_MINOR_RENDER);
38 if (ret)
39 goto err_g11;
40 }
41 20
42 ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY); 21 ret = drm_dev_register(dev, 0);
43 if (ret) 22 if (ret)
44 goto err_g2; 23 goto err_free;
45
46 if (dev->driver->load) {
47 ret = dev->driver->load(dev, 0);
48 if (ret)
49 goto err_g3;
50 }
51
52 /* setup the grouping for the legacy output */
53 ret = drm_mode_group_init_legacy_group(dev,
54 &dev->primary->mode_group);
55 if (ret)
56 goto err_g3;
57
58 list_add_tail(&dev->driver_item, &driver->device_list);
59
60 mutex_unlock(&drm_global_mutex);
61 24
62 DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n", 25 DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
63 driver->name, driver->major, driver->minor, driver->patchlevel, 26 driver->name, driver->major, driver->minor, driver->patchlevel,
@@ -65,16 +28,8 @@ int drm_get_usb_dev(struct usb_interface *interface,
65 28
66 return 0; 29 return 0;
67 30
68err_g3: 31err_free:
69 drm_put_minor(&dev->primary); 32 drm_dev_free(dev);
70err_g2:
71 if (dev->render)
72 drm_put_minor(&dev->render);
73err_g11:
74 drm_put_minor(&dev->control);
75err_g1:
76 kfree(dev);
77 mutex_unlock(&drm_global_mutex);
78 return ret; 33 return ret;
79 34
80} 35}
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
index b5c5af7328df..93e95d7efd57 100644
--- a/drivers/gpu/drm/drm_vm.c
+++ b/drivers/gpu/drm/drm_vm.c
@@ -301,7 +301,7 @@ static int drm_do_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
301 301
302 offset = (unsigned long)vmf->virtual_address - vma->vm_start; /* vm_[pg]off[set] should be 0 */ 302 offset = (unsigned long)vmf->virtual_address - vma->vm_start; /* vm_[pg]off[set] should be 0 */
303 page_nr = offset >> PAGE_SHIFT; /* page_nr could just be vmf->pgoff */ 303 page_nr = offset >> PAGE_SHIFT; /* page_nr could just be vmf->pgoff */
304 page = virt_to_page((dma->pagelist[page_nr] + (offset & (~PAGE_MASK)))); 304 page = virt_to_page((void *)dma->pagelist[page_nr]);
305 305
306 get_page(page); 306 get_page(page);
307 vmf->page = page; 307 vmf->page = page;
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index 45b6ef595965..f227f544aa36 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -2,6 +2,7 @@ config DRM_EXYNOS
2 tristate "DRM Support for Samsung SoC EXYNOS Series" 2 tristate "DRM Support for Samsung SoC EXYNOS Series"
3 depends on OF && DRM && (PLAT_SAMSUNG || ARCH_MULTIPLATFORM) 3 depends on OF && DRM && (PLAT_SAMSUNG || ARCH_MULTIPLATFORM)
4 select DRM_KMS_HELPER 4 select DRM_KMS_HELPER
5 select DRM_KMS_FB_HELPER
5 select FB_CFB_FILLRECT 6 select FB_CFB_FILLRECT
6 select FB_CFB_COPYAREA 7 select FB_CFB_COPYAREA
7 select FB_CFB_IMAGEBLIT 8 select FB_CFB_IMAGEBLIT
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 81192d00b39e..b676006a95a0 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -264,7 +264,6 @@ static struct drm_driver exynos_drm_driver = {
264 .get_vblank_counter = drm_vblank_count, 264 .get_vblank_counter = drm_vblank_count,
265 .enable_vblank = exynos_drm_crtc_enable_vblank, 265 .enable_vblank = exynos_drm_crtc_enable_vblank,
266 .disable_vblank = exynos_drm_crtc_disable_vblank, 266 .disable_vblank = exynos_drm_crtc_disable_vblank,
267 .gem_init_object = exynos_drm_gem_init_object,
268 .gem_free_object = exynos_drm_gem_free_object, 267 .gem_free_object = exynos_drm_gem_free_object,
269 .gem_vm_ops = &exynos_drm_gem_vm_ops, 268 .gem_vm_ops = &exynos_drm_gem_vm_ops,
270 .dumb_create = exynos_drm_gem_dumb_create, 269 .dumb_create = exynos_drm_gem_dumb_create,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 868a14d52995..23da72b5eae9 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -716,20 +716,20 @@ static int fimd_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
716{ 716{
717 /* 717 /*
718 * enable drm irq mode. 718 * enable drm irq mode.
719 * - with irq_enabled = 1, we can use the vblank feature. 719 * - with irq_enabled = true, we can use the vblank feature.
720 * 720 *
721 * P.S. note that we wouldn't use drm irq handler but 721 * P.S. note that we wouldn't use drm irq handler but
722 * just specific driver own one instead because 722 * just specific driver own one instead because
723 * drm framework supports only one irq handler. 723 * drm framework supports only one irq handler.
724 */ 724 */
725 drm_dev->irq_enabled = 1; 725 drm_dev->irq_enabled = true;
726 726
727 /* 727 /*
728 * with vblank_disable_allowed = 1, vblank interrupt will be disabled 728 * with vblank_disable_allowed = true, vblank interrupt will be disabled
729 * by drm timer once a current process gives up ownership of 729 * by drm timer once a current process gives up ownership of
730 * vblank event.(after drm_vblank_put function is called) 730 * vblank event.(after drm_vblank_put function is called)
731 */ 731 */
732 drm_dev->vblank_disable_allowed = 1; 732 drm_dev->vblank_disable_allowed = true;
733 733
734 /* attach this sub driver to iommu mapping if supported. */ 734 /* attach this sub driver to iommu mapping if supported. */
735 if (is_drm_iommu_supported(drm_dev)) 735 if (is_drm_iommu_supported(drm_dev))
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index 49f9cd232757..1ade191d84f4 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -630,11 +630,6 @@ void exynos_gem_unmap_sgt_from_dma(struct drm_device *drm_dev,
630 dma_unmap_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir); 630 dma_unmap_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir);
631} 631}
632 632
633int exynos_drm_gem_init_object(struct drm_gem_object *obj)
634{
635 return 0;
636}
637
638void exynos_drm_gem_free_object(struct drm_gem_object *obj) 633void exynos_drm_gem_free_object(struct drm_gem_object *obj)
639{ 634{
640 struct exynos_drm_gem_obj *exynos_gem_obj; 635 struct exynos_drm_gem_obj *exynos_gem_obj;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h
index 09555afdfe9c..702ec3abe85c 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h
@@ -135,9 +135,6 @@ unsigned long exynos_drm_gem_get_size(struct drm_device *dev,
135 unsigned int gem_handle, 135 unsigned int gem_handle,
136 struct drm_file *file_priv); 136 struct drm_file *file_priv);
137 137
138/* initialize gem object. */
139int exynos_drm_gem_init_object(struct drm_gem_object *obj);
140
141/* free gem object. */ 138/* free gem object. */
142void exynos_drm_gem_free_object(struct drm_gem_object *gem_obj); 139void exynos_drm_gem_free_object(struct drm_gem_object *gem_obj);
143 140
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index 4400330e4449..ddaaedde173d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -101,7 +101,6 @@ static struct edid *vidi_get_edid(struct device *dev,
101{ 101{
102 struct vidi_context *ctx = get_vidi_context(dev); 102 struct vidi_context *ctx = get_vidi_context(dev);
103 struct edid *edid; 103 struct edid *edid;
104 int edid_len;
105 104
106 /* 105 /*
107 * the edid data comes from user side and it would be set 106 * the edid data comes from user side and it would be set
@@ -112,8 +111,7 @@ static struct edid *vidi_get_edid(struct device *dev,
112 return ERR_PTR(-EFAULT); 111 return ERR_PTR(-EFAULT);
113 } 112 }
114 113
115 edid_len = (1 + ctx->raw_edid->extensions) * EDID_LENGTH; 114 edid = drm_edid_duplicate(ctx->raw_edid);
116 edid = kmemdup(ctx->raw_edid, edid_len, GFP_KERNEL);
117 if (!edid) { 115 if (!edid) {
118 DRM_DEBUG_KMS("failed to allocate edid\n"); 116 DRM_DEBUG_KMS("failed to allocate edid\n");
119 return ERR_PTR(-ENOMEM); 117 return ERR_PTR(-ENOMEM);
@@ -385,20 +383,20 @@ static int vidi_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
385{ 383{
386 /* 384 /*
387 * enable drm irq mode. 385 * enable drm irq mode.
388 * - with irq_enabled = 1, we can use the vblank feature. 386 * - with irq_enabled = true, we can use the vblank feature.
389 * 387 *
390 * P.S. note that we wouldn't use drm irq handler but 388 * P.S. note that we wouldn't use drm irq handler but
391 * just specific driver own one instead because 389 * just specific driver own one instead because
392 * drm framework supports only one irq handler. 390 * drm framework supports only one irq handler.
393 */ 391 */
394 drm_dev->irq_enabled = 1; 392 drm_dev->irq_enabled = true;
395 393
396 /* 394 /*
397 * with vblank_disable_allowed = 1, vblank interrupt will be disabled 395 * with vblank_disable_allowed = true, vblank interrupt will be disabled
398 * by drm timer once a current process gives up ownership of 396 * by drm timer once a current process gives up ownership of
399 * vblank event.(after drm_vblank_put function is called) 397 * vblank event.(after drm_vblank_put function is called)
400 */ 398 */
401 drm_dev->vblank_disable_allowed = 1; 399 drm_dev->vblank_disable_allowed = true;
402 400
403 return 0; 401 return 0;
404} 402}
@@ -485,7 +483,6 @@ int vidi_connection_ioctl(struct drm_device *drm_dev, void *data,
485 struct exynos_drm_manager *manager; 483 struct exynos_drm_manager *manager;
486 struct exynos_drm_display_ops *display_ops; 484 struct exynos_drm_display_ops *display_ops;
487 struct drm_exynos_vidi_connection *vidi = data; 485 struct drm_exynos_vidi_connection *vidi = data;
488 int edid_len;
489 486
490 if (!vidi) { 487 if (!vidi) {
491 DRM_DEBUG_KMS("user data for vidi is null.\n"); 488 DRM_DEBUG_KMS("user data for vidi is null.\n");
@@ -524,8 +521,7 @@ int vidi_connection_ioctl(struct drm_device *drm_dev, void *data,
524 DRM_DEBUG_KMS("edid data is invalid.\n"); 521 DRM_DEBUG_KMS("edid data is invalid.\n");
525 return -EINVAL; 522 return -EINVAL;
526 } 523 }
527 edid_len = (1 + raw_edid->extensions) * EDID_LENGTH; 524 ctx->raw_edid = drm_edid_duplicate(raw_edid);
528 ctx->raw_edid = kmemdup(raw_edid, edid_len, GFP_KERNEL);
529 if (!ctx->raw_edid) { 525 if (!ctx->raw_edid) {
530 DRM_DEBUG_KMS("failed to allocate raw_edid.\n"); 526 DRM_DEBUG_KMS("failed to allocate raw_edid.\n");
531 return -ENOMEM; 527 return -ENOMEM;
diff --git a/drivers/gpu/drm/gma500/Kconfig b/drivers/gpu/drm/gma500/Kconfig
index 1f6e2dfaaeae..508cf99a292d 100644
--- a/drivers/gpu/drm/gma500/Kconfig
+++ b/drivers/gpu/drm/gma500/Kconfig
@@ -5,6 +5,7 @@ config DRM_GMA500
5 select FB_CFB_FILLRECT 5 select FB_CFB_FILLRECT
6 select FB_CFB_IMAGEBLIT 6 select FB_CFB_IMAGEBLIT
7 select DRM_KMS_HELPER 7 select DRM_KMS_HELPER
8 select DRM_KMS_FB_HELPER
8 select DRM_TTM 9 select DRM_TTM
9 # GMA500 depends on ACPI_VIDEO when ACPI is enabled, just like i915 10 # GMA500 depends on ACPI_VIDEO when ACPI is enabled, just like i915
10 select ACPI_VIDEO if ACPI 11 select ACPI_VIDEO if ACPI
diff --git a/drivers/gpu/drm/gma500/cdv_device.c b/drivers/gpu/drm/gma500/cdv_device.c
index 162f686c532d..5a9a6a3063a8 100644
--- a/drivers/gpu/drm/gma500/cdv_device.c
+++ b/drivers/gpu/drm/gma500/cdv_device.c
@@ -634,6 +634,7 @@ const struct psb_ops cdv_chip_ops = {
634 .crtcs = 2, 634 .crtcs = 2,
635 .hdmi_mask = (1 << 0) | (1 << 1), 635 .hdmi_mask = (1 << 0) | (1 << 1),
636 .lvds_mask = (1 << 1), 636 .lvds_mask = (1 << 1),
637 .sdvo_mask = (1 << 0),
637 .cursor_needs_phys = 0, 638 .cursor_needs_phys = 0,
638 .sgx_offset = MRST_SGX_OFFSET, 639 .sgx_offset = MRST_SGX_OFFSET,
639 .chip_setup = cdv_chip_setup, 640 .chip_setup = cdv_chip_setup,
diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c
index f4eb43573cad..f88a1815d87c 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_dp.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c
@@ -666,7 +666,7 @@ cdv_intel_dp_i2c_init(struct gma_connector *connector,
666 strncpy (intel_dp->adapter.name, name, sizeof(intel_dp->adapter.name) - 1); 666 strncpy (intel_dp->adapter.name, name, sizeof(intel_dp->adapter.name) - 1);
667 intel_dp->adapter.name[sizeof(intel_dp->adapter.name) - 1] = '\0'; 667 intel_dp->adapter.name[sizeof(intel_dp->adapter.name) - 1] = '\0';
668 intel_dp->adapter.algo_data = &intel_dp->algo; 668 intel_dp->adapter.algo_data = &intel_dp->algo;
669 intel_dp->adapter.dev.parent = &connector->base.kdev; 669 intel_dp->adapter.dev.parent = connector->base.kdev;
670 670
671 if (is_edp(encoder)) 671 if (is_edp(encoder))
672 cdv_intel_edp_panel_vdd_on(encoder); 672 cdv_intel_edp_panel_vdd_on(encoder);
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index 01dd7d225762..94b3fec22c28 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -714,7 +714,7 @@ static void psb_setup_outputs(struct drm_device *dev)
714 clone_mask = (1 << INTEL_OUTPUT_ANALOG); 714 clone_mask = (1 << INTEL_OUTPUT_ANALOG);
715 break; 715 break;
716 case INTEL_OUTPUT_SDVO: 716 case INTEL_OUTPUT_SDVO:
717 crtc_mask = ((1 << 0) | (1 << 1)); 717 crtc_mask = dev_priv->ops->sdvo_mask;
718 clone_mask = (1 << INTEL_OUTPUT_SDVO); 718 clone_mask = (1 << INTEL_OUTPUT_SDVO);
719 break; 719 break;
720 case INTEL_OUTPUT_LVDS: 720 case INTEL_OUTPUT_LVDS:
diff --git a/drivers/gpu/drm/gma500/gem.c b/drivers/gpu/drm/gma500/gem.c
index 10ae8c52d06f..e2db48a81ed0 100644
--- a/drivers/gpu/drm/gma500/gem.c
+++ b/drivers/gpu/drm/gma500/gem.c
@@ -29,11 +29,6 @@
29#include <drm/drm_vma_manager.h> 29#include <drm/drm_vma_manager.h>
30#include "psb_drv.h" 30#include "psb_drv.h"
31 31
32int psb_gem_init_object(struct drm_gem_object *obj)
33{
34 return -EINVAL;
35}
36
37void psb_gem_free_object(struct drm_gem_object *obj) 32void psb_gem_free_object(struct drm_gem_object *obj)
38{ 33{
39 struct gtt_range *gtt = container_of(obj, struct gtt_range, gem); 34 struct gtt_range *gtt = container_of(obj, struct gtt_range, gem);
diff --git a/drivers/gpu/drm/gma500/intel_gmbus.c b/drivers/gpu/drm/gma500/intel_gmbus.c
index 62cd42e88f28..566d330aaeea 100644
--- a/drivers/gpu/drm/gma500/intel_gmbus.c
+++ b/drivers/gpu/drm/gma500/intel_gmbus.c
@@ -51,6 +51,9 @@
51#define wait_for(COND, MS) _wait_for(COND, MS, 1) 51#define wait_for(COND, MS) _wait_for(COND, MS, 1)
52#define wait_for_atomic(COND, MS) _wait_for(COND, MS, 0) 52#define wait_for_atomic(COND, MS) _wait_for(COND, MS, 0)
53 53
54#define GMBUS_REG_READ(reg) ioread32(dev_priv->gmbus_reg + (reg))
55#define GMBUS_REG_WRITE(reg, val) iowrite32((val), dev_priv->gmbus_reg + (reg))
56
54/* Intel GPIO access functions */ 57/* Intel GPIO access functions */
55 58
56#define I2C_RISEFALL_TIME 20 59#define I2C_RISEFALL_TIME 20
@@ -71,7 +74,8 @@ struct intel_gpio {
71void 74void
72gma_intel_i2c_reset(struct drm_device *dev) 75gma_intel_i2c_reset(struct drm_device *dev)
73{ 76{
74 REG_WRITE(GMBUS0, 0); 77 struct drm_psb_private *dev_priv = dev->dev_private;
78 GMBUS_REG_WRITE(GMBUS0, 0);
75} 79}
76 80
77static void intel_i2c_quirk_set(struct drm_psb_private *dev_priv, bool enable) 81static void intel_i2c_quirk_set(struct drm_psb_private *dev_priv, bool enable)
@@ -98,11 +102,10 @@ static void intel_i2c_quirk_set(struct drm_psb_private *dev_priv, bool enable)
98static u32 get_reserved(struct intel_gpio *gpio) 102static u32 get_reserved(struct intel_gpio *gpio)
99{ 103{
100 struct drm_psb_private *dev_priv = gpio->dev_priv; 104 struct drm_psb_private *dev_priv = gpio->dev_priv;
101 struct drm_device *dev = dev_priv->dev;
102 u32 reserved = 0; 105 u32 reserved = 0;
103 106
104 /* On most chips, these bits must be preserved in software. */ 107 /* On most chips, these bits must be preserved in software. */
105 reserved = REG_READ(gpio->reg) & 108 reserved = GMBUS_REG_READ(gpio->reg) &
106 (GPIO_DATA_PULLUP_DISABLE | 109 (GPIO_DATA_PULLUP_DISABLE |
107 GPIO_CLOCK_PULLUP_DISABLE); 110 GPIO_CLOCK_PULLUP_DISABLE);
108 111
@@ -113,29 +116,26 @@ static int get_clock(void *data)
113{ 116{
114 struct intel_gpio *gpio = data; 117 struct intel_gpio *gpio = data;
115 struct drm_psb_private *dev_priv = gpio->dev_priv; 118 struct drm_psb_private *dev_priv = gpio->dev_priv;
116 struct drm_device *dev = dev_priv->dev;
117 u32 reserved = get_reserved(gpio); 119 u32 reserved = get_reserved(gpio);
118 REG_WRITE(gpio->reg, reserved | GPIO_CLOCK_DIR_MASK); 120 GMBUS_REG_WRITE(gpio->reg, reserved | GPIO_CLOCK_DIR_MASK);
119 REG_WRITE(gpio->reg, reserved); 121 GMBUS_REG_WRITE(gpio->reg, reserved);
120 return (REG_READ(gpio->reg) & GPIO_CLOCK_VAL_IN) != 0; 122 return (GMBUS_REG_READ(gpio->reg) & GPIO_CLOCK_VAL_IN) != 0;
121} 123}
122 124
123static int get_data(void *data) 125static int get_data(void *data)
124{ 126{
125 struct intel_gpio *gpio = data; 127 struct intel_gpio *gpio = data;
126 struct drm_psb_private *dev_priv = gpio->dev_priv; 128 struct drm_psb_private *dev_priv = gpio->dev_priv;
127 struct drm_device *dev = dev_priv->dev;
128 u32 reserved = get_reserved(gpio); 129 u32 reserved = get_reserved(gpio);
129 REG_WRITE(gpio->reg, reserved | GPIO_DATA_DIR_MASK); 130 GMBUS_REG_WRITE(gpio->reg, reserved | GPIO_DATA_DIR_MASK);
130 REG_WRITE(gpio->reg, reserved); 131 GMBUS_REG_WRITE(gpio->reg, reserved);
131 return (REG_READ(gpio->reg) & GPIO_DATA_VAL_IN) != 0; 132 return (GMBUS_REG_READ(gpio->reg) & GPIO_DATA_VAL_IN) != 0;
132} 133}
133 134
134static void set_clock(void *data, int state_high) 135static void set_clock(void *data, int state_high)
135{ 136{
136 struct intel_gpio *gpio = data; 137 struct intel_gpio *gpio = data;
137 struct drm_psb_private *dev_priv = gpio->dev_priv; 138 struct drm_psb_private *dev_priv = gpio->dev_priv;
138 struct drm_device *dev = dev_priv->dev;
139 u32 reserved = get_reserved(gpio); 139 u32 reserved = get_reserved(gpio);
140 u32 clock_bits; 140 u32 clock_bits;
141 141
@@ -145,15 +145,14 @@ static void set_clock(void *data, int state_high)
145 clock_bits = GPIO_CLOCK_DIR_OUT | GPIO_CLOCK_DIR_MASK | 145 clock_bits = GPIO_CLOCK_DIR_OUT | GPIO_CLOCK_DIR_MASK |
146 GPIO_CLOCK_VAL_MASK; 146 GPIO_CLOCK_VAL_MASK;
147 147
148 REG_WRITE(gpio->reg, reserved | clock_bits); 148 GMBUS_REG_WRITE(gpio->reg, reserved | clock_bits);
149 REG_READ(gpio->reg); /* Posting */ 149 GMBUS_REG_READ(gpio->reg); /* Posting */
150} 150}
151 151
152static void set_data(void *data, int state_high) 152static void set_data(void *data, int state_high)
153{ 153{
154 struct intel_gpio *gpio = data; 154 struct intel_gpio *gpio = data;
155 struct drm_psb_private *dev_priv = gpio->dev_priv; 155 struct drm_psb_private *dev_priv = gpio->dev_priv;
156 struct drm_device *dev = dev_priv->dev;
157 u32 reserved = get_reserved(gpio); 156 u32 reserved = get_reserved(gpio);
158 u32 data_bits; 157 u32 data_bits;
159 158
@@ -163,8 +162,8 @@ static void set_data(void *data, int state_high)
163 data_bits = GPIO_DATA_DIR_OUT | GPIO_DATA_DIR_MASK | 162 data_bits = GPIO_DATA_DIR_OUT | GPIO_DATA_DIR_MASK |
164 GPIO_DATA_VAL_MASK; 163 GPIO_DATA_VAL_MASK;
165 164
166 REG_WRITE(gpio->reg, reserved | data_bits); 165 GMBUS_REG_WRITE(gpio->reg, reserved | data_bits);
167 REG_READ(gpio->reg); 166 GMBUS_REG_READ(gpio->reg);
168} 167}
169 168
170static struct i2c_adapter * 169static struct i2c_adapter *
@@ -251,7 +250,6 @@ gmbus_xfer(struct i2c_adapter *adapter,
251 struct intel_gmbus, 250 struct intel_gmbus,
252 adapter); 251 adapter);
253 struct drm_psb_private *dev_priv = adapter->algo_data; 252 struct drm_psb_private *dev_priv = adapter->algo_data;
254 struct drm_device *dev = dev_priv->dev;
255 int i, reg_offset; 253 int i, reg_offset;
256 254
257 if (bus->force_bit) 255 if (bus->force_bit)
@@ -260,28 +258,30 @@ gmbus_xfer(struct i2c_adapter *adapter,
260 258
261 reg_offset = 0; 259 reg_offset = 0;
262 260
263 REG_WRITE(GMBUS0 + reg_offset, bus->reg0); 261 GMBUS_REG_WRITE(GMBUS0 + reg_offset, bus->reg0);
264 262
265 for (i = 0; i < num; i++) { 263 for (i = 0; i < num; i++) {
266 u16 len = msgs[i].len; 264 u16 len = msgs[i].len;
267 u8 *buf = msgs[i].buf; 265 u8 *buf = msgs[i].buf;
268 266
269 if (msgs[i].flags & I2C_M_RD) { 267 if (msgs[i].flags & I2C_M_RD) {
270 REG_WRITE(GMBUS1 + reg_offset, 268 GMBUS_REG_WRITE(GMBUS1 + reg_offset,
271 GMBUS_CYCLE_WAIT | (i + 1 == num ? GMBUS_CYCLE_STOP : 0) | 269 GMBUS_CYCLE_WAIT |
272 (len << GMBUS_BYTE_COUNT_SHIFT) | 270 (i + 1 == num ? GMBUS_CYCLE_STOP : 0) |
273 (msgs[i].addr << GMBUS_SLAVE_ADDR_SHIFT) | 271 (len << GMBUS_BYTE_COUNT_SHIFT) |
274 GMBUS_SLAVE_READ | GMBUS_SW_RDY); 272 (msgs[i].addr << GMBUS_SLAVE_ADDR_SHIFT) |
275 REG_READ(GMBUS2+reg_offset); 273 GMBUS_SLAVE_READ | GMBUS_SW_RDY);
274 GMBUS_REG_READ(GMBUS2+reg_offset);
276 do { 275 do {
277 u32 val, loop = 0; 276 u32 val, loop = 0;
278 277
279 if (wait_for(REG_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_RDY), 50)) 278 if (wait_for(GMBUS_REG_READ(GMBUS2 + reg_offset) &
279 (GMBUS_SATOER | GMBUS_HW_RDY), 50))
280 goto timeout; 280 goto timeout;
281 if (REG_READ(GMBUS2 + reg_offset) & GMBUS_SATOER) 281 if (GMBUS_REG_READ(GMBUS2 + reg_offset) & GMBUS_SATOER)
282 goto clear_err; 282 goto clear_err;
283 283
284 val = REG_READ(GMBUS3 + reg_offset); 284 val = GMBUS_REG_READ(GMBUS3 + reg_offset);
285 do { 285 do {
286 *buf++ = val & 0xff; 286 *buf++ = val & 0xff;
287 val >>= 8; 287 val >>= 8;
@@ -295,18 +295,20 @@ gmbus_xfer(struct i2c_adapter *adapter,
295 val |= *buf++ << (8 * loop); 295 val |= *buf++ << (8 * loop);
296 } while (--len && ++loop < 4); 296 } while (--len && ++loop < 4);
297 297
298 REG_WRITE(GMBUS3 + reg_offset, val); 298 GMBUS_REG_WRITE(GMBUS3 + reg_offset, val);
299 REG_WRITE(GMBUS1 + reg_offset, 299 GMBUS_REG_WRITE(GMBUS1 + reg_offset,
300 (i + 1 == num ? GMBUS_CYCLE_STOP : GMBUS_CYCLE_WAIT) | 300 (i + 1 == num ? GMBUS_CYCLE_STOP : GMBUS_CYCLE_WAIT) |
301 (msgs[i].len << GMBUS_BYTE_COUNT_SHIFT) | 301 (msgs[i].len << GMBUS_BYTE_COUNT_SHIFT) |
302 (msgs[i].addr << GMBUS_SLAVE_ADDR_SHIFT) | 302 (msgs[i].addr << GMBUS_SLAVE_ADDR_SHIFT) |
303 GMBUS_SLAVE_WRITE | GMBUS_SW_RDY); 303 GMBUS_SLAVE_WRITE | GMBUS_SW_RDY);
304 REG_READ(GMBUS2+reg_offset); 304 GMBUS_REG_READ(GMBUS2+reg_offset);
305 305
306 while (len) { 306 while (len) {
307 if (wait_for(REG_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_RDY), 50)) 307 if (wait_for(GMBUS_REG_READ(GMBUS2 + reg_offset) &
308 (GMBUS_SATOER | GMBUS_HW_RDY), 50))
308 goto timeout; 309 goto timeout;
309 if (REG_READ(GMBUS2 + reg_offset) & GMBUS_SATOER) 310 if (GMBUS_REG_READ(GMBUS2 + reg_offset) &
311 GMBUS_SATOER)
310 goto clear_err; 312 goto clear_err;
311 313
312 val = loop = 0; 314 val = loop = 0;
@@ -314,14 +316,14 @@ gmbus_xfer(struct i2c_adapter *adapter,
314 val |= *buf++ << (8 * loop); 316 val |= *buf++ << (8 * loop);
315 } while (--len && ++loop < 4); 317 } while (--len && ++loop < 4);
316 318
317 REG_WRITE(GMBUS3 + reg_offset, val); 319 GMBUS_REG_WRITE(GMBUS3 + reg_offset, val);
318 REG_READ(GMBUS2+reg_offset); 320 GMBUS_REG_READ(GMBUS2+reg_offset);
319 } 321 }
320 } 322 }
321 323
322 if (i + 1 < num && wait_for(REG_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_WAIT_PHASE), 50)) 324 if (i + 1 < num && wait_for(GMBUS_REG_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_WAIT_PHASE), 50))
323 goto timeout; 325 goto timeout;
324 if (REG_READ(GMBUS2 + reg_offset) & GMBUS_SATOER) 326 if (GMBUS_REG_READ(GMBUS2 + reg_offset) & GMBUS_SATOER)
325 goto clear_err; 327 goto clear_err;
326 } 328 }
327 329
@@ -332,20 +334,20 @@ clear_err:
332 * of resetting the GMBUS controller and so clearing the 334 * of resetting the GMBUS controller and so clearing the
333 * BUS_ERROR raised by the slave's NAK. 335 * BUS_ERROR raised by the slave's NAK.
334 */ 336 */
335 REG_WRITE(GMBUS1 + reg_offset, GMBUS_SW_CLR_INT); 337 GMBUS_REG_WRITE(GMBUS1 + reg_offset, GMBUS_SW_CLR_INT);
336 REG_WRITE(GMBUS1 + reg_offset, 0); 338 GMBUS_REG_WRITE(GMBUS1 + reg_offset, 0);
337 339
338done: 340done:
339 /* Mark the GMBUS interface as disabled. We will re-enable it at the 341 /* Mark the GMBUS interface as disabled. We will re-enable it at the
340 * start of the next xfer, till then let it sleep. 342 * start of the next xfer, till then let it sleep.
341 */ 343 */
342 REG_WRITE(GMBUS0 + reg_offset, 0); 344 GMBUS_REG_WRITE(GMBUS0 + reg_offset, 0);
343 return i; 345 return i;
344 346
345timeout: 347timeout:
346 DRM_INFO("GMBUS timed out, falling back to bit banging on pin %d [%s]\n", 348 DRM_INFO("GMBUS timed out, falling back to bit banging on pin %d [%s]\n",
347 bus->reg0 & 0xff, bus->adapter.name); 349 bus->reg0 & 0xff, bus->adapter.name);
348 REG_WRITE(GMBUS0 + reg_offset, 0); 350 GMBUS_REG_WRITE(GMBUS0 + reg_offset, 0);
349 351
350 /* Hardware may not support GMBUS over these pins? Try GPIO bitbanging instead. */ 352 /* Hardware may not support GMBUS over these pins? Try GPIO bitbanging instead. */
351 bus->force_bit = intel_gpio_create(dev_priv, bus->reg0 & 0xff); 353 bus->force_bit = intel_gpio_create(dev_priv, bus->reg0 & 0xff);
@@ -399,6 +401,11 @@ int gma_intel_setup_gmbus(struct drm_device *dev)
399 if (dev_priv->gmbus == NULL) 401 if (dev_priv->gmbus == NULL)
400 return -ENOMEM; 402 return -ENOMEM;
401 403
404 if (IS_MRST(dev))
405 dev_priv->gmbus_reg = dev_priv->aux_reg;
406 else
407 dev_priv->gmbus_reg = dev_priv->vdc_reg;
408
402 for (i = 0; i < GMBUS_NUM_PORTS; i++) { 409 for (i = 0; i < GMBUS_NUM_PORTS; i++) {
403 struct intel_gmbus *bus = &dev_priv->gmbus[i]; 410 struct intel_gmbus *bus = &dev_priv->gmbus[i];
404 411
@@ -487,6 +494,7 @@ void gma_intel_teardown_gmbus(struct drm_device *dev)
487 i2c_del_adapter(&bus->adapter); 494 i2c_del_adapter(&bus->adapter);
488 } 495 }
489 496
497 dev_priv->gmbus_reg = NULL; /* iounmap is done in driver_unload */
490 kfree(dev_priv->gmbus); 498 kfree(dev_priv->gmbus);
491 dev_priv->gmbus = NULL; 499 dev_priv->gmbus = NULL;
492} 500}
diff --git a/drivers/gpu/drm/gma500/oaktrail_crtc.c b/drivers/gpu/drm/gma500/oaktrail_crtc.c
index 54c98962b73e..8195e8592107 100644
--- a/drivers/gpu/drm/gma500/oaktrail_crtc.c
+++ b/drivers/gpu/drm/gma500/oaktrail_crtc.c
@@ -26,24 +26,10 @@
26#include "gma_display.h" 26#include "gma_display.h"
27#include "power.h" 27#include "power.h"
28 28
29struct psb_intel_range_t { 29#define MRST_LIMIT_LVDS_100L 0
30 int min, max; 30#define MRST_LIMIT_LVDS_83 1
31}; 31#define MRST_LIMIT_LVDS_100 2
32 32#define MRST_LIMIT_SDVO 3
33struct oaktrail_limit_t {
34 struct psb_intel_range_t dot, m, p1;
35};
36
37struct oaktrail_clock_t {
38 /* derived values */
39 int dot;
40 int m;
41 int p1;
42};
43
44#define MRST_LIMIT_LVDS_100L 0
45#define MRST_LIMIT_LVDS_83 1
46#define MRST_LIMIT_LVDS_100 2
47 33
48#define MRST_DOT_MIN 19750 34#define MRST_DOT_MIN 19750
49#define MRST_DOT_MAX 120000 35#define MRST_DOT_MAX 120000
@@ -57,21 +43,40 @@ struct oaktrail_clock_t {
57#define MRST_P1_MAX_0 7 43#define MRST_P1_MAX_0 7
58#define MRST_P1_MAX_1 8 44#define MRST_P1_MAX_1 8
59 45
60static const struct oaktrail_limit_t oaktrail_limits[] = { 46static bool mrst_lvds_find_best_pll(const struct gma_limit_t *limit,
47 struct drm_crtc *crtc, int target,
48 int refclk, struct gma_clock_t *best_clock);
49
50static bool mrst_sdvo_find_best_pll(const struct gma_limit_t *limit,
51 struct drm_crtc *crtc, int target,
52 int refclk, struct gma_clock_t *best_clock);
53
54static const struct gma_limit_t mrst_limits[] = {
61 { /* MRST_LIMIT_LVDS_100L */ 55 { /* MRST_LIMIT_LVDS_100L */
62 .dot = {.min = MRST_DOT_MIN, .max = MRST_DOT_MAX}, 56 .dot = {.min = MRST_DOT_MIN, .max = MRST_DOT_MAX},
63 .m = {.min = MRST_M_MIN_100L, .max = MRST_M_MAX_100L}, 57 .m = {.min = MRST_M_MIN_100L, .max = MRST_M_MAX_100L},
64 .p1 = {.min = MRST_P1_MIN, .max = MRST_P1_MAX_1}, 58 .p1 = {.min = MRST_P1_MIN, .max = MRST_P1_MAX_1},
59 .find_pll = mrst_lvds_find_best_pll,
65 }, 60 },
66 { /* MRST_LIMIT_LVDS_83L */ 61 { /* MRST_LIMIT_LVDS_83L */
67 .dot = {.min = MRST_DOT_MIN, .max = MRST_DOT_MAX}, 62 .dot = {.min = MRST_DOT_MIN, .max = MRST_DOT_MAX},
68 .m = {.min = MRST_M_MIN_83, .max = MRST_M_MAX_83}, 63 .m = {.min = MRST_M_MIN_83, .max = MRST_M_MAX_83},
69 .p1 = {.min = MRST_P1_MIN, .max = MRST_P1_MAX_0}, 64 .p1 = {.min = MRST_P1_MIN, .max = MRST_P1_MAX_0},
65 .find_pll = mrst_lvds_find_best_pll,
70 }, 66 },
71 { /* MRST_LIMIT_LVDS_100 */ 67 { /* MRST_LIMIT_LVDS_100 */
72 .dot = {.min = MRST_DOT_MIN, .max = MRST_DOT_MAX}, 68 .dot = {.min = MRST_DOT_MIN, .max = MRST_DOT_MAX},
73 .m = {.min = MRST_M_MIN_100, .max = MRST_M_MAX_100}, 69 .m = {.min = MRST_M_MIN_100, .max = MRST_M_MAX_100},
74 .p1 = {.min = MRST_P1_MIN, .max = MRST_P1_MAX_1}, 70 .p1 = {.min = MRST_P1_MIN, .max = MRST_P1_MAX_1},
71 .find_pll = mrst_lvds_find_best_pll,
72 },
73 { /* MRST_LIMIT_SDVO */
74 .vco = {.min = 1400000, .max = 2800000},
75 .n = {.min = 3, .max = 7},
76 .m = {.min = 80, .max = 137},
77 .p1 = {.min = 1, .max = 2},
78 .p2 = {.dot_limit = 200000, .p2_slow = 10, .p2_fast = 10},
79 .find_pll = mrst_sdvo_find_best_pll,
75 }, 80 },
76}; 81};
77 82
@@ -82,9 +87,10 @@ static const u32 oaktrail_m_converts[] = {
82 0x12, 0x09, 0x24, 0x32, 0x39, 0x1c, 87 0x12, 0x09, 0x24, 0x32, 0x39, 0x1c,
83}; 88};
84 89
85static const struct oaktrail_limit_t *oaktrail_limit(struct drm_crtc *crtc) 90static const struct gma_limit_t *mrst_limit(struct drm_crtc *crtc,
91 int refclk)
86{ 92{
87 const struct oaktrail_limit_t *limit = NULL; 93 const struct gma_limit_t *limit = NULL;
88 struct drm_device *dev = crtc->dev; 94 struct drm_device *dev = crtc->dev;
89 struct drm_psb_private *dev_priv = dev->dev_private; 95 struct drm_psb_private *dev_priv = dev->dev_private;
90 96
@@ -92,45 +98,100 @@ static const struct oaktrail_limit_t *oaktrail_limit(struct drm_crtc *crtc)
92 || gma_pipe_has_type(crtc, INTEL_OUTPUT_MIPI)) { 98 || gma_pipe_has_type(crtc, INTEL_OUTPUT_MIPI)) {
93 switch (dev_priv->core_freq) { 99 switch (dev_priv->core_freq) {
94 case 100: 100 case 100:
95 limit = &oaktrail_limits[MRST_LIMIT_LVDS_100L]; 101 limit = &mrst_limits[MRST_LIMIT_LVDS_100L];
96 break; 102 break;
97 case 166: 103 case 166:
98 limit = &oaktrail_limits[MRST_LIMIT_LVDS_83]; 104 limit = &mrst_limits[MRST_LIMIT_LVDS_83];
99 break; 105 break;
100 case 200: 106 case 200:
101 limit = &oaktrail_limits[MRST_LIMIT_LVDS_100]; 107 limit = &mrst_limits[MRST_LIMIT_LVDS_100];
102 break; 108 break;
103 } 109 }
110 } else if (gma_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) {
111 limit = &mrst_limits[MRST_LIMIT_SDVO];
104 } else { 112 } else {
105 limit = NULL; 113 limit = NULL;
106 dev_err(dev->dev, "oaktrail_limit Wrong display type.\n"); 114 dev_err(dev->dev, "mrst_limit Wrong display type.\n");
107 } 115 }
108 116
109 return limit; 117 return limit;
110} 118}
111 119
112/** Derive the pixel clock for the given refclk and divisors for 8xx chips. */ 120/** Derive the pixel clock for the given refclk and divisors for 8xx chips. */
113static void oaktrail_clock(int refclk, struct oaktrail_clock_t *clock) 121static void mrst_lvds_clock(int refclk, struct gma_clock_t *clock)
114{ 122{
115 clock->dot = (refclk * clock->m) / (14 * clock->p1); 123 clock->dot = (refclk * clock->m) / (14 * clock->p1);
116} 124}
117 125
118static void mrstPrintPll(char *prefix, struct oaktrail_clock_t *clock) 126static void mrst_print_pll(struct gma_clock_t *clock)
119{ 127{
120 pr_debug("%s: dotclock = %d, m = %d, p1 = %d.\n", 128 DRM_DEBUG_DRIVER("dotclock=%d, m=%d, m1=%d, m2=%d, n=%d, p1=%d, p2=%d\n",
121 prefix, clock->dot, clock->m, clock->p1); 129 clock->dot, clock->m, clock->m1, clock->m2, clock->n,
130 clock->p1, clock->p2);
131}
132
133static bool mrst_sdvo_find_best_pll(const struct gma_limit_t *limit,
134 struct drm_crtc *crtc, int target,
135 int refclk, struct gma_clock_t *best_clock)
136{
137 struct gma_clock_t clock;
138 u32 target_vco, actual_freq;
139 s32 freq_error, min_error = 100000;
140
141 memset(best_clock, 0, sizeof(*best_clock));
142
143 for (clock.m = limit->m.min; clock.m <= limit->m.max; clock.m++) {
144 for (clock.n = limit->n.min; clock.n <= limit->n.max;
145 clock.n++) {
146 for (clock.p1 = limit->p1.min;
147 clock.p1 <= limit->p1.max; clock.p1++) {
148 /* p2 value always stored in p2_slow on SDVO */
149 clock.p = clock.p1 * limit->p2.p2_slow;
150 target_vco = target * clock.p;
151
152 /* VCO will increase at this point so break */
153 if (target_vco > limit->vco.max)
154 break;
155
156 if (target_vco < limit->vco.min)
157 continue;
158
159 actual_freq = (refclk * clock.m) /
160 (clock.n * clock.p);
161 freq_error = 10000 -
162 ((target * 10000) / actual_freq);
163
164 if (freq_error < -min_error) {
165 /* freq_error will start to decrease at
166 this point so break */
167 break;
168 }
169
170 if (freq_error < 0)
171 freq_error = -freq_error;
172
173 if (freq_error < min_error) {
174 min_error = freq_error;
175 *best_clock = clock;
176 }
177 }
178 }
179 if (min_error == 0)
180 break;
181 }
182
183 return min_error == 0;
122} 184}
123 185
124/** 186/**
125 * Returns a set of divisors for the desired target clock with the given refclk, 187 * Returns a set of divisors for the desired target clock with the given refclk,
126 * or FALSE. Divisor values are the actual divisors for 188 * or FALSE. Divisor values are the actual divisors for
127 */ 189 */
128static bool 190static bool mrst_lvds_find_best_pll(const struct gma_limit_t *limit,
129mrstFindBestPLL(struct drm_crtc *crtc, int target, int refclk, 191 struct drm_crtc *crtc, int target,
130 struct oaktrail_clock_t *best_clock) 192 int refclk, struct gma_clock_t *best_clock)
131{ 193{
132 struct oaktrail_clock_t clock; 194 struct gma_clock_t clock;
133 const struct oaktrail_limit_t *limit = oaktrail_limit(crtc);
134 int err = target; 195 int err = target;
135 196
136 memset(best_clock, 0, sizeof(*best_clock)); 197 memset(best_clock, 0, sizeof(*best_clock));
@@ -140,7 +201,7 @@ mrstFindBestPLL(struct drm_crtc *crtc, int target, int refclk,
140 clock.p1++) { 201 clock.p1++) {
141 int this_err; 202 int this_err;
142 203
143 oaktrail_clock(refclk, &clock); 204 mrst_lvds_clock(refclk, &clock);
144 205
145 this_err = abs(clock.dot - target); 206 this_err = abs(clock.dot - target);
146 if (this_err < err) { 207 if (this_err < err) {
@@ -149,7 +210,6 @@ mrstFindBestPLL(struct drm_crtc *crtc, int target, int refclk,
149 } 210 }
150 } 211 }
151 } 212 }
152 dev_dbg(crtc->dev->dev, "mrstFindBestPLL err = %d.\n", err);
153 return err != target; 213 return err != target;
154} 214}
155 215
@@ -167,8 +227,10 @@ static void oaktrail_crtc_dpms(struct drm_crtc *crtc, int mode)
167 int pipe = gma_crtc->pipe; 227 int pipe = gma_crtc->pipe;
168 const struct psb_offset *map = &dev_priv->regmap[pipe]; 228 const struct psb_offset *map = &dev_priv->regmap[pipe];
169 u32 temp; 229 u32 temp;
230 int i;
231 int need_aux = gma_pipe_has_type(crtc, INTEL_OUTPUT_SDVO) ? 1 : 0;
170 232
171 if (pipe == 1) { 233 if (gma_pipe_has_type(crtc, INTEL_OUTPUT_HDMI)) {
172 oaktrail_crtc_hdmi_dpms(crtc, mode); 234 oaktrail_crtc_hdmi_dpms(crtc, mode);
173 return; 235 return;
174 } 236 }
@@ -183,35 +245,45 @@ static void oaktrail_crtc_dpms(struct drm_crtc *crtc, int mode)
183 case DRM_MODE_DPMS_ON: 245 case DRM_MODE_DPMS_ON:
184 case DRM_MODE_DPMS_STANDBY: 246 case DRM_MODE_DPMS_STANDBY:
185 case DRM_MODE_DPMS_SUSPEND: 247 case DRM_MODE_DPMS_SUSPEND:
186 /* Enable the DPLL */ 248 for (i = 0; i <= need_aux; i++) {
187 temp = REG_READ(map->dpll); 249 /* Enable the DPLL */
188 if ((temp & DPLL_VCO_ENABLE) == 0) { 250 temp = REG_READ_WITH_AUX(map->dpll, i);
189 REG_WRITE(map->dpll, temp); 251 if ((temp & DPLL_VCO_ENABLE) == 0) {
190 REG_READ(map->dpll); 252 REG_WRITE_WITH_AUX(map->dpll, temp, i);
191 /* Wait for the clocks to stabilize. */ 253 REG_READ_WITH_AUX(map->dpll, i);
192 udelay(150); 254 /* Wait for the clocks to stabilize. */
193 REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE); 255 udelay(150);
194 REG_READ(map->dpll); 256 REG_WRITE_WITH_AUX(map->dpll,
195 /* Wait for the clocks to stabilize. */ 257 temp | DPLL_VCO_ENABLE, i);
196 udelay(150); 258 REG_READ_WITH_AUX(map->dpll, i);
197 REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE); 259 /* Wait for the clocks to stabilize. */
198 REG_READ(map->dpll); 260 udelay(150);
199 /* Wait for the clocks to stabilize. */ 261 REG_WRITE_WITH_AUX(map->dpll,
200 udelay(150); 262 temp | DPLL_VCO_ENABLE, i);
201 } 263 REG_READ_WITH_AUX(map->dpll, i);
202 /* Enable the pipe */ 264 /* Wait for the clocks to stabilize. */
203 temp = REG_READ(map->conf); 265 udelay(150);
204 if ((temp & PIPEACONF_ENABLE) == 0) 266 }
205 REG_WRITE(map->conf, temp | PIPEACONF_ENABLE); 267
206 /* Enable the plane */ 268 /* Enable the pipe */
207 temp = REG_READ(map->cntr); 269 temp = REG_READ_WITH_AUX(map->conf, i);
208 if ((temp & DISPLAY_PLANE_ENABLE) == 0) { 270 if ((temp & PIPEACONF_ENABLE) == 0) {
209 REG_WRITE(map->cntr, 271 REG_WRITE_WITH_AUX(map->conf,
210 temp | DISPLAY_PLANE_ENABLE); 272 temp | PIPEACONF_ENABLE, i);
211 /* Flush the plane changes */ 273 }
212 REG_WRITE(map->base, REG_READ(map->base)); 274
213 } 275 /* Enable the plane */
276 temp = REG_READ_WITH_AUX(map->cntr, i);
277 if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
278 REG_WRITE_WITH_AUX(map->cntr,
279 temp | DISPLAY_PLANE_ENABLE,
280 i);
281 /* Flush the plane changes */
282 REG_WRITE_WITH_AUX(map->base,
283 REG_READ_WITH_AUX(map->base, i), i);
284 }
214 285
286 }
215 gma_crtc_load_lut(crtc); 287 gma_crtc_load_lut(crtc);
216 288
217 /* Give the overlay scaler a chance to enable 289 /* Give the overlay scaler a chance to enable
@@ -223,48 +295,52 @@ static void oaktrail_crtc_dpms(struct drm_crtc *crtc, int mode)
223 * if it's on this pipe */ 295 * if it's on this pipe */
224 /* psb_intel_crtc_dpms_video(crtc, FALSE); TODO */ 296 /* psb_intel_crtc_dpms_video(crtc, FALSE); TODO */
225 297
226 /* Disable the VGA plane that we never use */ 298 for (i = 0; i <= need_aux; i++) {
227 REG_WRITE(VGACNTRL, VGA_DISP_DISABLE); 299 /* Disable the VGA plane that we never use */
228 /* Disable display plane */ 300 REG_WRITE_WITH_AUX(VGACNTRL, VGA_DISP_DISABLE, i);
229 temp = REG_READ(map->cntr); 301 /* Disable display plane */
230 if ((temp & DISPLAY_PLANE_ENABLE) != 0) { 302 temp = REG_READ_WITH_AUX(map->cntr, i);
231 REG_WRITE(map->cntr, 303 if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
232 temp & ~DISPLAY_PLANE_ENABLE); 304 REG_WRITE_WITH_AUX(map->cntr,
233 /* Flush the plane changes */ 305 temp & ~DISPLAY_PLANE_ENABLE, i);
234 REG_WRITE(map->base, REG_READ(map->base)); 306 /* Flush the plane changes */
235 REG_READ(map->base); 307 REG_WRITE_WITH_AUX(map->base,
236 } 308 REG_READ(map->base), i);
309 REG_READ_WITH_AUX(map->base, i);
310 }
237 311
238 /* Next, disable display pipes */ 312 /* Next, disable display pipes */
239 temp = REG_READ(map->conf); 313 temp = REG_READ_WITH_AUX(map->conf, i);
240 if ((temp & PIPEACONF_ENABLE) != 0) { 314 if ((temp & PIPEACONF_ENABLE) != 0) {
241 REG_WRITE(map->conf, temp & ~PIPEACONF_ENABLE); 315 REG_WRITE_WITH_AUX(map->conf,
242 REG_READ(map->conf); 316 temp & ~PIPEACONF_ENABLE, i);
243 } 317 REG_READ_WITH_AUX(map->conf, i);
244 /* Wait for for the pipe disable to take effect. */ 318 }
245 gma_wait_for_vblank(dev); 319 /* Wait for for the pipe disable to take effect. */
320 gma_wait_for_vblank(dev);
321
322 temp = REG_READ_WITH_AUX(map->dpll, i);
323 if ((temp & DPLL_VCO_ENABLE) != 0) {
324 REG_WRITE_WITH_AUX(map->dpll,
325 temp & ~DPLL_VCO_ENABLE, i);
326 REG_READ_WITH_AUX(map->dpll, i);
327 }
246 328
247 temp = REG_READ(map->dpll); 329 /* Wait for the clocks to turn off. */
248 if ((temp & DPLL_VCO_ENABLE) != 0) { 330 udelay(150);
249 REG_WRITE(map->dpll, temp & ~DPLL_VCO_ENABLE);
250 REG_READ(map->dpll);
251 } 331 }
252
253 /* Wait for the clocks to turn off. */
254 udelay(150);
255 break; 332 break;
256 } 333 }
257 334
258 /*Set FIFO Watermarks*/ 335 /* Set FIFO Watermarks (values taken from EMGD) */
259 REG_WRITE(DSPARB, 0x3FFF); 336 REG_WRITE(DSPARB, 0x3f80);
260 REG_WRITE(DSPFW1, 0x3F88080A); 337 REG_WRITE(DSPFW1, 0x3f8f0404);
261 REG_WRITE(DSPFW2, 0x0b060808); 338 REG_WRITE(DSPFW2, 0x04040f04);
262 REG_WRITE(DSPFW3, 0x0); 339 REG_WRITE(DSPFW3, 0x0);
263 REG_WRITE(DSPFW4, 0x08030404); 340 REG_WRITE(DSPFW4, 0x04040404);
264 REG_WRITE(DSPFW5, 0x04040404); 341 REG_WRITE(DSPFW5, 0x04040404);
265 REG_WRITE(DSPFW6, 0x78); 342 REG_WRITE(DSPFW6, 0x78);
266 REG_WRITE(0x70400, REG_READ(0x70400) | 0x4000); 343 REG_WRITE(DSPCHICKENBIT, REG_READ(DSPCHICKENBIT) | 0xc040);
267 /* Must write Bit 14 of the Chicken Bit Register */
268 344
269 gma_power_end(dev); 345 gma_power_end(dev);
270} 346}
@@ -297,7 +373,8 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
297 int pipe = gma_crtc->pipe; 373 int pipe = gma_crtc->pipe;
298 const struct psb_offset *map = &dev_priv->regmap[pipe]; 374 const struct psb_offset *map = &dev_priv->regmap[pipe];
299 int refclk = 0; 375 int refclk = 0;
300 struct oaktrail_clock_t clock; 376 struct gma_clock_t clock;
377 const struct gma_limit_t *limit;
301 u32 dpll = 0, fp = 0, dspcntr, pipeconf; 378 u32 dpll = 0, fp = 0, dspcntr, pipeconf;
302 bool ok, is_sdvo = false; 379 bool ok, is_sdvo = false;
303 bool is_lvds = false; 380 bool is_lvds = false;
@@ -306,8 +383,10 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
306 struct gma_encoder *gma_encoder = NULL; 383 struct gma_encoder *gma_encoder = NULL;
307 uint64_t scalingType = DRM_MODE_SCALE_FULLSCREEN; 384 uint64_t scalingType = DRM_MODE_SCALE_FULLSCREEN;
308 struct drm_connector *connector; 385 struct drm_connector *connector;
386 int i;
387 int need_aux = gma_pipe_has_type(crtc, INTEL_OUTPUT_SDVO) ? 1 : 0;
309 388
310 if (pipe == 1) 389 if (gma_pipe_has_type(crtc, INTEL_OUTPUT_HDMI))
311 return oaktrail_crtc_hdmi_mode_set(crtc, mode, adjusted_mode, x, y, old_fb); 390 return oaktrail_crtc_hdmi_mode_set(crtc, mode, adjusted_mode, x, y, old_fb);
312 391
313 if (!gma_power_begin(dev, true)) 392 if (!gma_power_begin(dev, true))
@@ -340,15 +419,17 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
340 } 419 }
341 420
342 /* Disable the VGA plane that we never use */ 421 /* Disable the VGA plane that we never use */
343 REG_WRITE(VGACNTRL, VGA_DISP_DISABLE); 422 for (i = 0; i <= need_aux; i++)
423 REG_WRITE_WITH_AUX(VGACNTRL, VGA_DISP_DISABLE, i);
344 424
345 /* Disable the panel fitter if it was on our pipe */ 425 /* Disable the panel fitter if it was on our pipe */
346 if (oaktrail_panel_fitter_pipe(dev) == pipe) 426 if (oaktrail_panel_fitter_pipe(dev) == pipe)
347 REG_WRITE(PFIT_CONTROL, 0); 427 REG_WRITE(PFIT_CONTROL, 0);
348 428
349 REG_WRITE(map->src, 429 for (i = 0; i <= need_aux; i++) {
350 ((mode->crtc_hdisplay - 1) << 16) | 430 REG_WRITE_WITH_AUX(map->src, ((mode->crtc_hdisplay - 1) << 16) |
351 (mode->crtc_vdisplay - 1)); 431 (mode->crtc_vdisplay - 1), i);
432 }
352 433
353 if (gma_encoder) 434 if (gma_encoder)
354 drm_object_property_get_value(&connector->base, 435 drm_object_property_get_value(&connector->base,
@@ -365,35 +446,39 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
365 offsetY = (adjusted_mode->crtc_vdisplay - 446 offsetY = (adjusted_mode->crtc_vdisplay -
366 mode->crtc_vdisplay) / 2; 447 mode->crtc_vdisplay) / 2;
367 448
368 REG_WRITE(map->htotal, (mode->crtc_hdisplay - 1) | 449 for (i = 0; i <= need_aux; i++) {
369 ((adjusted_mode->crtc_htotal - 1) << 16)); 450 REG_WRITE_WITH_AUX(map->htotal, (mode->crtc_hdisplay - 1) |
370 REG_WRITE(map->vtotal, (mode->crtc_vdisplay - 1) | 451 ((adjusted_mode->crtc_htotal - 1) << 16), i);
371 ((adjusted_mode->crtc_vtotal - 1) << 16)); 452 REG_WRITE_WITH_AUX(map->vtotal, (mode->crtc_vdisplay - 1) |
372 REG_WRITE(map->hblank, 453 ((adjusted_mode->crtc_vtotal - 1) << 16), i);
373 (adjusted_mode->crtc_hblank_start - offsetX - 1) | 454 REG_WRITE_WITH_AUX(map->hblank,
374 ((adjusted_mode->crtc_hblank_end - offsetX - 1) << 16)); 455 (adjusted_mode->crtc_hblank_start - offsetX - 1) |
375 REG_WRITE(map->hsync, 456 ((adjusted_mode->crtc_hblank_end - offsetX - 1) << 16), i);
376 (adjusted_mode->crtc_hsync_start - offsetX - 1) | 457 REG_WRITE_WITH_AUX(map->hsync,
377 ((adjusted_mode->crtc_hsync_end - offsetX - 1) << 16)); 458 (adjusted_mode->crtc_hsync_start - offsetX - 1) |
378 REG_WRITE(map->vblank, 459 ((adjusted_mode->crtc_hsync_end - offsetX - 1) << 16), i);
379 (adjusted_mode->crtc_vblank_start - offsetY - 1) | 460 REG_WRITE_WITH_AUX(map->vblank,
380 ((adjusted_mode->crtc_vblank_end - offsetY - 1) << 16)); 461 (adjusted_mode->crtc_vblank_start - offsetY - 1) |
381 REG_WRITE(map->vsync, 462 ((adjusted_mode->crtc_vblank_end - offsetY - 1) << 16), i);
382 (adjusted_mode->crtc_vsync_start - offsetY - 1) | 463 REG_WRITE_WITH_AUX(map->vsync,
383 ((adjusted_mode->crtc_vsync_end - offsetY - 1) << 16)); 464 (adjusted_mode->crtc_vsync_start - offsetY - 1) |
465 ((adjusted_mode->crtc_vsync_end - offsetY - 1) << 16), i);
466 }
384 } else { 467 } else {
385 REG_WRITE(map->htotal, (adjusted_mode->crtc_hdisplay - 1) | 468 for (i = 0; i <= need_aux; i++) {
386 ((adjusted_mode->crtc_htotal - 1) << 16)); 469 REG_WRITE_WITH_AUX(map->htotal, (adjusted_mode->crtc_hdisplay - 1) |
387 REG_WRITE(map->vtotal, (adjusted_mode->crtc_vdisplay - 1) | 470 ((adjusted_mode->crtc_htotal - 1) << 16), i);
388 ((adjusted_mode->crtc_vtotal - 1) << 16)); 471 REG_WRITE_WITH_AUX(map->vtotal, (adjusted_mode->crtc_vdisplay - 1) |
389 REG_WRITE(map->hblank, (adjusted_mode->crtc_hblank_start - 1) | 472 ((adjusted_mode->crtc_vtotal - 1) << 16), i);
390 ((adjusted_mode->crtc_hblank_end - 1) << 16)); 473 REG_WRITE_WITH_AUX(map->hblank, (adjusted_mode->crtc_hblank_start - 1) |
391 REG_WRITE(map->hsync, (adjusted_mode->crtc_hsync_start - 1) | 474 ((adjusted_mode->crtc_hblank_end - 1) << 16), i);
392 ((adjusted_mode->crtc_hsync_end - 1) << 16)); 475 REG_WRITE_WITH_AUX(map->hsync, (adjusted_mode->crtc_hsync_start - 1) |
393 REG_WRITE(map->vblank, (adjusted_mode->crtc_vblank_start - 1) | 476 ((adjusted_mode->crtc_hsync_end - 1) << 16), i);
394 ((adjusted_mode->crtc_vblank_end - 1) << 16)); 477 REG_WRITE_WITH_AUX(map->vblank, (adjusted_mode->crtc_vblank_start - 1) |
395 REG_WRITE(map->vsync, (adjusted_mode->crtc_vsync_start - 1) | 478 ((adjusted_mode->crtc_vblank_end - 1) << 16), i);
396 ((adjusted_mode->crtc_vsync_end - 1) << 16)); 479 REG_WRITE_WITH_AUX(map->vsync, (adjusted_mode->crtc_vsync_start - 1) |
480 ((adjusted_mode->crtc_vsync_end - 1) << 16), i);
481 }
397 } 482 }
398 483
399 /* Flush the plane changes */ 484 /* Flush the plane changes */
@@ -418,21 +503,30 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
418 if (is_mipi) 503 if (is_mipi)
419 goto oaktrail_crtc_mode_set_exit; 504 goto oaktrail_crtc_mode_set_exit;
420 505
421 refclk = dev_priv->core_freq * 1000;
422 506
423 dpll = 0; /*BIT16 = 0 for 100MHz reference */ 507 dpll = 0; /*BIT16 = 0 for 100MHz reference */
424 508
425 ok = mrstFindBestPLL(crtc, adjusted_mode->clock, refclk, &clock); 509 refclk = is_sdvo ? 96000 : dev_priv->core_freq * 1000;
510 limit = mrst_limit(crtc, refclk);
511 ok = limit->find_pll(limit, crtc, adjusted_mode->clock,
512 refclk, &clock);
426 513
427 if (!ok) { 514 if (is_sdvo) {
428 dev_dbg(dev->dev, "mrstFindBestPLL fail in oaktrail_crtc_mode_set.\n"); 515 /* Convert calculated values to register values */
429 } else { 516 clock.p1 = (1L << (clock.p1 - 1));
430 dev_dbg(dev->dev, "oaktrail_crtc_mode_set pixel clock = %d," 517 clock.m -= 2;
431 "m = %x, p1 = %x.\n", clock.dot, clock.m, 518 clock.n = (1L << (clock.n - 1));
432 clock.p1);
433 } 519 }
434 520
435 fp = oaktrail_m_converts[(clock.m - MRST_M_MIN)] << 8; 521 if (!ok)
522 DRM_ERROR("Failed to find proper PLL settings");
523
524 mrst_print_pll(&clock);
525
526 if (is_sdvo)
527 fp = clock.n << 16 | clock.m;
528 else
529 fp = oaktrail_m_converts[(clock.m - MRST_M_MIN)] << 8;
436 530
437 dpll |= DPLL_VGA_MODE_DIS; 531 dpll |= DPLL_VGA_MODE_DIS;
438 532
@@ -456,38 +550,43 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
456 550
457 551
458 /* compute bitmask from p1 value */ 552 /* compute bitmask from p1 value */
459 dpll |= (1 << (clock.p1 - 2)) << 17; 553 if (is_sdvo)
554 dpll |= clock.p1 << 16; // dpll |= (1 << (clock.p1 - 1)) << 16;
555 else
556 dpll |= (1 << (clock.p1 - 2)) << 17;
460 557
461 dpll |= DPLL_VCO_ENABLE; 558 dpll |= DPLL_VCO_ENABLE;
462 559
463 mrstPrintPll("chosen", &clock);
464
465 if (dpll & DPLL_VCO_ENABLE) { 560 if (dpll & DPLL_VCO_ENABLE) {
466 REG_WRITE(map->fp0, fp); 561 for (i = 0; i <= need_aux; i++) {
467 REG_WRITE(map->dpll, dpll & ~DPLL_VCO_ENABLE); 562 REG_WRITE_WITH_AUX(map->fp0, fp, i);
468 REG_READ(map->dpll); 563 REG_WRITE_WITH_AUX(map->dpll, dpll & ~DPLL_VCO_ENABLE, i);
469 /* Check the DPLLA lock bit PIPEACONF[29] */ 564 REG_READ_WITH_AUX(map->dpll, i);
470 udelay(150); 565 /* Check the DPLLA lock bit PIPEACONF[29] */
566 udelay(150);
567 }
471 } 568 }
472 569
473 REG_WRITE(map->fp0, fp); 570 for (i = 0; i <= need_aux; i++) {
474 REG_WRITE(map->dpll, dpll); 571 REG_WRITE_WITH_AUX(map->fp0, fp, i);
475 REG_READ(map->dpll); 572 REG_WRITE_WITH_AUX(map->dpll, dpll, i);
476 /* Wait for the clocks to stabilize. */ 573 REG_READ_WITH_AUX(map->dpll, i);
477 udelay(150); 574 /* Wait for the clocks to stabilize. */
575 udelay(150);
478 576
479 /* write it again -- the BIOS does, after all */ 577 /* write it again -- the BIOS does, after all */
480 REG_WRITE(map->dpll, dpll); 578 REG_WRITE_WITH_AUX(map->dpll, dpll, i);
481 REG_READ(map->dpll); 579 REG_READ_WITH_AUX(map->dpll, i);
482 /* Wait for the clocks to stabilize. */ 580 /* Wait for the clocks to stabilize. */
483 udelay(150); 581 udelay(150);
484 582
485 REG_WRITE(map->conf, pipeconf); 583 REG_WRITE_WITH_AUX(map->conf, pipeconf, i);
486 REG_READ(map->conf); 584 REG_READ_WITH_AUX(map->conf, i);
487 gma_wait_for_vblank(dev); 585 gma_wait_for_vblank(dev);
488 586
489 REG_WRITE(map->cntr, dspcntr); 587 REG_WRITE_WITH_AUX(map->cntr, dspcntr, i);
490 gma_wait_for_vblank(dev); 588 gma_wait_for_vblank(dev);
589 }
491 590
492oaktrail_crtc_mode_set_exit: 591oaktrail_crtc_mode_set_exit:
493 gma_power_end(dev); 592 gma_power_end(dev);
@@ -565,3 +664,9 @@ const struct drm_crtc_helper_funcs oaktrail_helper_funcs = {
565 .commit = gma_crtc_commit, 664 .commit = gma_crtc_commit,
566}; 665};
567 666
667/* Not used yet */
668const struct gma_clock_funcs mrst_clock_funcs = {
669 .clock = mrst_lvds_clock,
670 .limit = mrst_limit,
671 .pll_is_valid = gma_pll_is_valid,
672};
diff --git a/drivers/gpu/drm/gma500/oaktrail_device.c b/drivers/gpu/drm/gma500/oaktrail_device.c
index 7a9ce000fd86..368a03ae3010 100644
--- a/drivers/gpu/drm/gma500/oaktrail_device.c
+++ b/drivers/gpu/drm/gma500/oaktrail_device.c
@@ -40,6 +40,9 @@ static int oaktrail_output_init(struct drm_device *dev)
40 dev_err(dev->dev, "DSI is not supported\n"); 40 dev_err(dev->dev, "DSI is not supported\n");
41 if (dev_priv->hdmi_priv) 41 if (dev_priv->hdmi_priv)
42 oaktrail_hdmi_init(dev, &dev_priv->mode_dev); 42 oaktrail_hdmi_init(dev, &dev_priv->mode_dev);
43
44 psb_intel_sdvo_init(dev, SDVOB);
45
43 return 0; 46 return 0;
44} 47}
45 48
@@ -526,6 +529,7 @@ static int oaktrail_chip_setup(struct drm_device *dev)
526 psb_intel_opregion_init(dev); 529 psb_intel_opregion_init(dev);
527 psb_intel_init_bios(dev); 530 psb_intel_init_bios(dev);
528 } 531 }
532 gma_intel_setup_gmbus(dev);
529 oaktrail_hdmi_setup(dev); 533 oaktrail_hdmi_setup(dev);
530 return 0; 534 return 0;
531} 535}
@@ -534,6 +538,7 @@ static void oaktrail_teardown(struct drm_device *dev)
534{ 538{
535 struct drm_psb_private *dev_priv = dev->dev_private; 539 struct drm_psb_private *dev_priv = dev->dev_private;
536 540
541 gma_intel_teardown_gmbus(dev);
537 oaktrail_hdmi_teardown(dev); 542 oaktrail_hdmi_teardown(dev);
538 if (!dev_priv->has_gct) 543 if (!dev_priv->has_gct)
539 psb_intel_destroy_bios(dev); 544 psb_intel_destroy_bios(dev);
@@ -546,6 +551,7 @@ const struct psb_ops oaktrail_chip_ops = {
546 .crtcs = 2, 551 .crtcs = 2,
547 .hdmi_mask = (1 << 1), 552 .hdmi_mask = (1 << 1),
548 .lvds_mask = (1 << 0), 553 .lvds_mask = (1 << 0),
554 .sdvo_mask = (1 << 1),
549 .cursor_needs_phys = 0, 555 .cursor_needs_phys = 0,
550 .sgx_offset = MRST_SGX_OFFSET, 556 .sgx_offset = MRST_SGX_OFFSET,
551 557
diff --git a/drivers/gpu/drm/gma500/oaktrail_lvds.c b/drivers/gpu/drm/gma500/oaktrail_lvds.c
index 3ece553311fe..5e0697862736 100644
--- a/drivers/gpu/drm/gma500/oaktrail_lvds.c
+++ b/drivers/gpu/drm/gma500/oaktrail_lvds.c
@@ -218,30 +218,6 @@ static const struct drm_encoder_helper_funcs oaktrail_lvds_helper_funcs = {
218 .commit = oaktrail_lvds_commit, 218 .commit = oaktrail_lvds_commit,
219}; 219};
220 220
221static struct drm_display_mode lvds_configuration_modes[] = {
222 /* hard coded fixed mode for TPO LTPS LPJ040K001A */
223 { DRM_MODE("800x480", DRM_MODE_TYPE_DRIVER, 33264, 800, 836,
224 846, 1056, 0, 480, 489, 491, 525, 0, 0) },
225 /* hard coded fixed mode for LVDS 800x480 */
226 { DRM_MODE("800x480", DRM_MODE_TYPE_DRIVER, 30994, 800, 801,
227 802, 1024, 0, 480, 481, 482, 525, 0, 0) },
228 /* hard coded fixed mode for Samsung 480wsvga LVDS 1024x600@75 */
229 { DRM_MODE("1024x600", DRM_MODE_TYPE_DRIVER, 53990, 1024, 1072,
230 1104, 1184, 0, 600, 603, 604, 608, 0, 0) },
231 /* hard coded fixed mode for Samsung 480wsvga LVDS 1024x600@75 */
232 { DRM_MODE("1024x600", DRM_MODE_TYPE_DRIVER, 53990, 1024, 1104,
233 1136, 1184, 0, 600, 603, 604, 608, 0, 0) },
234 /* hard coded fixed mode for Sharp wsvga LVDS 1024x600 */
235 { DRM_MODE("1024x600", DRM_MODE_TYPE_DRIVER, 48885, 1024, 1124,
236 1204, 1312, 0, 600, 607, 610, 621, 0, 0) },
237 /* hard coded fixed mode for LVDS 1024x768 */
238 { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
239 1184, 1344, 0, 768, 771, 777, 806, 0, 0) },
240 /* hard coded fixed mode for LVDS 1366x768 */
241 { DRM_MODE("1366x768", DRM_MODE_TYPE_DRIVER, 77500, 1366, 1430,
242 1558, 1664, 0, 768, 769, 770, 776, 0, 0) },
243};
244
245/* Returns the panel fixed mode from configuration. */ 221/* Returns the panel fixed mode from configuration. */
246 222
247static void oaktrail_lvds_get_configuration_mode(struct drm_device *dev, 223static void oaktrail_lvds_get_configuration_mode(struct drm_device *dev,
@@ -303,10 +279,10 @@ static void oaktrail_lvds_get_configuration_mode(struct drm_device *dev,
303 mode_dev->panel_fixed_mode = 279 mode_dev->panel_fixed_mode =
304 drm_mode_duplicate(dev, 280 drm_mode_duplicate(dev,
305 dev_priv->lfp_lvds_vbt_mode); 281 dev_priv->lfp_lvds_vbt_mode);
306 /* Then guess */ 282
283 /* If we still got no mode then bail */
307 if (mode_dev->panel_fixed_mode == NULL) 284 if (mode_dev->panel_fixed_mode == NULL)
308 mode_dev->panel_fixed_mode 285 return;
309 = drm_mode_duplicate(dev, &lvds_configuration_modes[2]);
310 286
311 drm_mode_set_name(mode_dev->panel_fixed_mode); 287 drm_mode_set_name(mode_dev->panel_fixed_mode);
312 drm_mode_set_crtcinfo(mode_dev->panel_fixed_mode, 0); 288 drm_mode_set_crtcinfo(mode_dev->panel_fixed_mode, 0);
diff --git a/drivers/gpu/drm/gma500/psb_device.c b/drivers/gpu/drm/gma500/psb_device.c
index 697678619bd1..23fb33f1471b 100644
--- a/drivers/gpu/drm/gma500/psb_device.c
+++ b/drivers/gpu/drm/gma500/psb_device.c
@@ -373,6 +373,7 @@ const struct psb_ops psb_chip_ops = {
373 .crtcs = 2, 373 .crtcs = 2,
374 .hdmi_mask = (1 << 0), 374 .hdmi_mask = (1 << 0),
375 .lvds_mask = (1 << 1), 375 .lvds_mask = (1 << 1),
376 .sdvo_mask = (1 << 0),
376 .cursor_needs_phys = 1, 377 .cursor_needs_phys = 1,
377 .sgx_offset = PSB_SGX_OFFSET, 378 .sgx_offset = PSB_SGX_OFFSET,
378 .chip_setup = psb_chip_setup, 379 .chip_setup = psb_chip_setup,
diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
index fcb4e9ff1f20..1199180667c9 100644
--- a/drivers/gpu/drm/gma500/psb_drv.c
+++ b/drivers/gpu/drm/gma500/psb_drv.c
@@ -251,6 +251,12 @@ static int psb_driver_unload(struct drm_device *dev)
251 iounmap(dev_priv->sgx_reg); 251 iounmap(dev_priv->sgx_reg);
252 dev_priv->sgx_reg = NULL; 252 dev_priv->sgx_reg = NULL;
253 } 253 }
254 if (dev_priv->aux_reg) {
255 iounmap(dev_priv->aux_reg);
256 dev_priv->aux_reg = NULL;
257 }
258 if (dev_priv->aux_pdev)
259 pci_dev_put(dev_priv->aux_pdev);
254 260
255 /* Destroy VBT data */ 261 /* Destroy VBT data */
256 psb_intel_destroy_bios(dev); 262 psb_intel_destroy_bios(dev);
@@ -266,7 +272,7 @@ static int psb_driver_unload(struct drm_device *dev)
266static int psb_driver_load(struct drm_device *dev, unsigned long chipset) 272static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
267{ 273{
268 struct drm_psb_private *dev_priv; 274 struct drm_psb_private *dev_priv;
269 unsigned long resource_start; 275 unsigned long resource_start, resource_len;
270 unsigned long irqflags; 276 unsigned long irqflags;
271 int ret = -ENOMEM; 277 int ret = -ENOMEM;
272 struct drm_connector *connector; 278 struct drm_connector *connector;
@@ -296,6 +302,30 @@ static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
296 if (!dev_priv->sgx_reg) 302 if (!dev_priv->sgx_reg)
297 goto out_err; 303 goto out_err;
298 304
305 if (IS_MRST(dev)) {
306 dev_priv->aux_pdev = pci_get_bus_and_slot(0, PCI_DEVFN(3, 0));
307
308 if (dev_priv->aux_pdev) {
309 resource_start = pci_resource_start(dev_priv->aux_pdev,
310 PSB_AUX_RESOURCE);
311 resource_len = pci_resource_len(dev_priv->aux_pdev,
312 PSB_AUX_RESOURCE);
313 dev_priv->aux_reg = ioremap_nocache(resource_start,
314 resource_len);
315 if (!dev_priv->aux_reg)
316 goto out_err;
317
318 DRM_DEBUG_KMS("Found aux vdc");
319 } else {
320 /* Couldn't find the aux vdc so map to primary vdc */
321 dev_priv->aux_reg = dev_priv->vdc_reg;
322 DRM_DEBUG_KMS("Couldn't find aux pci device");
323 }
324 dev_priv->gmbus_reg = dev_priv->aux_reg;
325 } else {
326 dev_priv->gmbus_reg = dev_priv->vdc_reg;
327 }
328
299 psb_intel_opregion_setup(dev); 329 psb_intel_opregion_setup(dev);
300 330
301 ret = dev_priv->ops->chip_setup(dev); 331 ret = dev_priv->ops->chip_setup(dev);
@@ -359,7 +389,7 @@ static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
359 389
360 drm_irq_install(dev); 390 drm_irq_install(dev);
361 391
362 dev->vblank_disable_allowed = 1; 392 dev->vblank_disable_allowed = true;
363 393
364 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 394 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
365 395
@@ -449,7 +479,7 @@ static int psb_gamma_ioctl(struct drm_device *dev, void *data,
449 obj = drm_mode_object_find(dev, obj_id, DRM_MODE_OBJECT_CONNECTOR); 479 obj = drm_mode_object_find(dev, obj_id, DRM_MODE_OBJECT_CONNECTOR);
450 if (!obj) { 480 if (!obj) {
451 dev_dbg(dev->dev, "Invalid Connector object.\n"); 481 dev_dbg(dev->dev, "Invalid Connector object.\n");
452 return -EINVAL; 482 return -ENOENT;
453 } 483 }
454 484
455 connector = obj_to_connector(obj); 485 connector = obj_to_connector(obj);
@@ -491,7 +521,7 @@ static int psb_mode_operation_ioctl(struct drm_device *dev, void *data,
491 obj = drm_mode_object_find(dev, obj_id, 521 obj = drm_mode_object_find(dev, obj_id,
492 DRM_MODE_OBJECT_CONNECTOR); 522 DRM_MODE_OBJECT_CONNECTOR);
493 if (!obj) { 523 if (!obj) {
494 ret = -EINVAL; 524 ret = -ENOENT;
495 goto mode_op_out; 525 goto mode_op_out;
496 } 526 }
497 527
@@ -646,7 +676,6 @@ static struct drm_driver driver = {
646 .preclose = psb_driver_preclose, 676 .preclose = psb_driver_preclose,
647 .postclose = psb_driver_close, 677 .postclose = psb_driver_close,
648 678
649 .gem_init_object = psb_gem_init_object,
650 .gem_free_object = psb_gem_free_object, 679 .gem_free_object = psb_gem_free_object,
651 .gem_vm_ops = &psb_gem_vm_ops, 680 .gem_vm_ops = &psb_gem_vm_ops,
652 .dumb_create = psb_gem_dumb_create, 681 .dumb_create = psb_gem_dumb_create,
diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
index 4535ac7708f8..b59e6588c343 100644
--- a/drivers/gpu/drm/gma500/psb_drv.h
+++ b/drivers/gpu/drm/gma500/psb_drv.h
@@ -44,10 +44,10 @@ enum {
44 CHIP_MFLD_0130 = 3, /* Medfield */ 44 CHIP_MFLD_0130 = 3, /* Medfield */
45}; 45};
46 46
47#define IS_PSB(dev) (((dev)->pci_device & 0xfffe) == 0x8108) 47#define IS_PSB(dev) (((dev)->pdev->device & 0xfffe) == 0x8108)
48#define IS_MRST(dev) (((dev)->pci_device & 0xfffc) == 0x4100) 48#define IS_MRST(dev) (((dev)->pdev->device & 0xfff0) == 0x4100)
49#define IS_MFLD(dev) (((dev)->pci_device & 0xfff8) == 0x0130) 49#define IS_MFLD(dev) (((dev)->pdev->device & 0xfff8) == 0x0130)
50#define IS_CDV(dev) (((dev)->pci_device & 0xfff0) == 0x0be0) 50#define IS_CDV(dev) (((dev)->pdev->device & 0xfff0) == 0x0be0)
51 51
52/* 52/*
53 * Driver definitions 53 * Driver definitions
@@ -75,6 +75,7 @@ enum {
75 * PCI resource identifiers 75 * PCI resource identifiers
76 */ 76 */
77#define PSB_MMIO_RESOURCE 0 77#define PSB_MMIO_RESOURCE 0
78#define PSB_AUX_RESOURCE 0
78#define PSB_GATT_RESOURCE 2 79#define PSB_GATT_RESOURCE 2
79#define PSB_GTT_RESOURCE 3 80#define PSB_GTT_RESOURCE 3
80/* 81/*
@@ -455,6 +456,7 @@ struct psb_ops;
455 456
456struct drm_psb_private { 457struct drm_psb_private {
457 struct drm_device *dev; 458 struct drm_device *dev;
459 struct pci_dev *aux_pdev; /* Currently only used by mrst */
458 const struct psb_ops *ops; 460 const struct psb_ops *ops;
459 const struct psb_offset *regmap; 461 const struct psb_offset *regmap;
460 462
@@ -486,6 +488,7 @@ struct drm_psb_private {
486 488
487 uint8_t __iomem *sgx_reg; 489 uint8_t __iomem *sgx_reg;
488 uint8_t __iomem *vdc_reg; 490 uint8_t __iomem *vdc_reg;
491 uint8_t __iomem *aux_reg; /* Auxillary vdc pipe regs */
489 uint32_t gatt_free_offset; 492 uint32_t gatt_free_offset;
490 493
491 /* 494 /*
@@ -532,6 +535,7 @@ struct drm_psb_private {
532 535
533 /* gmbus */ 536 /* gmbus */
534 struct intel_gmbus *gmbus; 537 struct intel_gmbus *gmbus;
538 uint8_t __iomem *gmbus_reg;
535 539
536 /* Used by SDVO */ 540 /* Used by SDVO */
537 int crt_ddc_pin; 541 int crt_ddc_pin;
@@ -672,6 +676,7 @@ struct psb_ops {
672 int sgx_offset; /* Base offset of SGX device */ 676 int sgx_offset; /* Base offset of SGX device */
673 int hdmi_mask; /* Mask of HDMI CRTCs */ 677 int hdmi_mask; /* Mask of HDMI CRTCs */
674 int lvds_mask; /* Mask of LVDS CRTCs */ 678 int lvds_mask; /* Mask of LVDS CRTCs */
679 int sdvo_mask; /* Mask of SDVO CRTCs */
675 int cursor_needs_phys; /* If cursor base reg need physical address */ 680 int cursor_needs_phys; /* If cursor base reg need physical address */
676 681
677 /* Sub functions */ 682 /* Sub functions */
@@ -837,7 +842,6 @@ extern const struct drm_connector_helper_funcs
837extern const struct drm_connector_funcs psb_intel_lvds_connector_funcs; 842extern const struct drm_connector_funcs psb_intel_lvds_connector_funcs;
838 843
839/* gem.c */ 844/* gem.c */
840extern int psb_gem_init_object(struct drm_gem_object *obj);
841extern void psb_gem_free_object(struct drm_gem_object *obj); 845extern void psb_gem_free_object(struct drm_gem_object *obj);
842extern int psb_gem_get_aperture(struct drm_device *dev, void *data, 846extern int psb_gem_get_aperture(struct drm_device *dev, void *data,
843 struct drm_file *file); 847 struct drm_file *file);
@@ -928,16 +932,58 @@ static inline uint32_t REGISTER_READ(struct drm_device *dev, uint32_t reg)
928 return ioread32(dev_priv->vdc_reg + reg); 932 return ioread32(dev_priv->vdc_reg + reg);
929} 933}
930 934
935static inline uint32_t REGISTER_READ_AUX(struct drm_device *dev, uint32_t reg)
936{
937 struct drm_psb_private *dev_priv = dev->dev_private;
938 return ioread32(dev_priv->aux_reg + reg);
939}
940
931#define REG_READ(reg) REGISTER_READ(dev, (reg)) 941#define REG_READ(reg) REGISTER_READ(dev, (reg))
942#define REG_READ_AUX(reg) REGISTER_READ_AUX(dev, (reg))
943
944/* Useful for post reads */
945static inline uint32_t REGISTER_READ_WITH_AUX(struct drm_device *dev,
946 uint32_t reg, int aux)
947{
948 uint32_t val;
949
950 if (aux)
951 val = REG_READ_AUX(reg);
952 else
953 val = REG_READ(reg);
954
955 return val;
956}
957
958#define REG_READ_WITH_AUX(reg, aux) REGISTER_READ_WITH_AUX(dev, (reg), (aux))
932 959
933static inline void REGISTER_WRITE(struct drm_device *dev, uint32_t reg, 960static inline void REGISTER_WRITE(struct drm_device *dev, uint32_t reg,
934 uint32_t val) 961 uint32_t val)
935{ 962{
936 struct drm_psb_private *dev_priv = dev->dev_private; 963 struct drm_psb_private *dev_priv = dev->dev_private;
937 iowrite32((val), dev_priv->vdc_reg + (reg)); 964 iowrite32((val), dev_priv->vdc_reg + (reg));
938} 965}
939 966
967static inline void REGISTER_WRITE_AUX(struct drm_device *dev, uint32_t reg,
968 uint32_t val)
969{
970 struct drm_psb_private *dev_priv = dev->dev_private;
971 iowrite32((val), dev_priv->aux_reg + (reg));
972}
973
940#define REG_WRITE(reg, val) REGISTER_WRITE(dev, (reg), (val)) 974#define REG_WRITE(reg, val) REGISTER_WRITE(dev, (reg), (val))
975#define REG_WRITE_AUX(reg, val) REGISTER_WRITE_AUX(dev, (reg), (val))
976
977static inline void REGISTER_WRITE_WITH_AUX(struct drm_device *dev, uint32_t reg,
978 uint32_t val, int aux)
979{
980 if (aux)
981 REG_WRITE_AUX(reg, val);
982 else
983 REG_WRITE(reg, val);
984}
985
986#define REG_WRITE_WITH_AUX(reg, val, aux) REGISTER_WRITE_WITH_AUX(dev, (reg), (val), (aux))
941 987
942static inline void REGISTER_WRITE16(struct drm_device *dev, 988static inline void REGISTER_WRITE16(struct drm_device *dev,
943 uint32_t reg, uint32_t val) 989 uint32_t reg, uint32_t val)
diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c
index 97f8a03fee43..c8841ac6c8f1 100644
--- a/drivers/gpu/drm/gma500/psb_intel_display.c
+++ b/drivers/gpu/drm/gma500/psb_intel_display.c
@@ -572,7 +572,7 @@ int psb_intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
572 572
573 if (!drmmode_obj) { 573 if (!drmmode_obj) {
574 dev_err(dev->dev, "no such CRTC id\n"); 574 dev_err(dev->dev, "no such CRTC id\n");
575 return -EINVAL; 575 return -ENOENT;
576 } 576 }
577 577
578 crtc = to_gma_crtc(obj_to_crtc(drmmode_obj)); 578 crtc = to_gma_crtc(obj_to_crtc(drmmode_obj));
diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
index 6f01cdf5e125..07d3a9e6d79b 100644
--- a/drivers/gpu/drm/gma500/psb_intel_sdvo.c
+++ b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
@@ -228,24 +228,26 @@ static void psb_intel_sdvo_write_sdvox(struct psb_intel_sdvo *psb_intel_sdvo, u3
228{ 228{
229 struct drm_device *dev = psb_intel_sdvo->base.base.dev; 229 struct drm_device *dev = psb_intel_sdvo->base.base.dev;
230 u32 bval = val, cval = val; 230 u32 bval = val, cval = val;
231 int i; 231 int i, j;
232 int need_aux = IS_MRST(dev) ? 1 : 0;
232 233
233 if (psb_intel_sdvo->sdvo_reg == SDVOB) { 234 for (j = 0; j <= need_aux; j++) {
234 cval = REG_READ(SDVOC); 235 if (psb_intel_sdvo->sdvo_reg == SDVOB)
235 } else { 236 cval = REG_READ_WITH_AUX(SDVOC, j);
236 bval = REG_READ(SDVOB); 237 else
237 } 238 bval = REG_READ_WITH_AUX(SDVOB, j);
238 /* 239
239 * Write the registers twice for luck. Sometimes, 240 /*
240 * writing them only once doesn't appear to 'stick'. 241 * Write the registers twice for luck. Sometimes,
241 * The BIOS does this too. Yay, magic 242 * writing them only once doesn't appear to 'stick'.
242 */ 243 * The BIOS does this too. Yay, magic
243 for (i = 0; i < 2; i++) 244 */
244 { 245 for (i = 0; i < 2; i++) {
245 REG_WRITE(SDVOB, bval); 246 REG_WRITE_WITH_AUX(SDVOB, bval, j);
246 REG_READ(SDVOB); 247 REG_READ_WITH_AUX(SDVOB, j);
247 REG_WRITE(SDVOC, cval); 248 REG_WRITE_WITH_AUX(SDVOC, cval, j);
248 REG_READ(SDVOC); 249 REG_READ_WITH_AUX(SDVOC, j);
250 }
249 } 251 }
250} 252}
251 253
@@ -995,6 +997,7 @@ static void psb_intel_sdvo_mode_set(struct drm_encoder *encoder,
995 struct psb_intel_sdvo_dtd input_dtd; 997 struct psb_intel_sdvo_dtd input_dtd;
996 int pixel_multiplier = psb_intel_mode_get_pixel_multiplier(adjusted_mode); 998 int pixel_multiplier = psb_intel_mode_get_pixel_multiplier(adjusted_mode);
997 int rate; 999 int rate;
1000 int need_aux = IS_MRST(dev) ? 1 : 0;
998 1001
999 if (!mode) 1002 if (!mode)
1000 return; 1003 return;
@@ -1060,7 +1063,11 @@ static void psb_intel_sdvo_mode_set(struct drm_encoder *encoder,
1060 return; 1063 return;
1061 1064
1062 /* Set the SDVO control regs. */ 1065 /* Set the SDVO control regs. */
1063 sdvox = REG_READ(psb_intel_sdvo->sdvo_reg); 1066 if (need_aux)
1067 sdvox = REG_READ_AUX(psb_intel_sdvo->sdvo_reg);
1068 else
1069 sdvox = REG_READ(psb_intel_sdvo->sdvo_reg);
1070
1064 switch (psb_intel_sdvo->sdvo_reg) { 1071 switch (psb_intel_sdvo->sdvo_reg) {
1065 case SDVOB: 1072 case SDVOB:
1066 sdvox &= SDVOB_PRESERVE_MASK; 1073 sdvox &= SDVOB_PRESERVE_MASK;
@@ -1090,6 +1097,8 @@ static void psb_intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
1090 struct drm_device *dev = encoder->dev; 1097 struct drm_device *dev = encoder->dev;
1091 struct psb_intel_sdvo *psb_intel_sdvo = to_psb_intel_sdvo(encoder); 1098 struct psb_intel_sdvo *psb_intel_sdvo = to_psb_intel_sdvo(encoder);
1092 u32 temp; 1099 u32 temp;
1100 int i;
1101 int need_aux = IS_MRST(dev) ? 1 : 0;
1093 1102
1094 switch (mode) { 1103 switch (mode) {
1095 case DRM_MODE_DPMS_ON: 1104 case DRM_MODE_DPMS_ON:
@@ -1108,19 +1117,27 @@ static void psb_intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
1108 psb_intel_sdvo_set_encoder_power_state(psb_intel_sdvo, mode); 1117 psb_intel_sdvo_set_encoder_power_state(psb_intel_sdvo, mode);
1109 1118
1110 if (mode == DRM_MODE_DPMS_OFF) { 1119 if (mode == DRM_MODE_DPMS_OFF) {
1111 temp = REG_READ(psb_intel_sdvo->sdvo_reg); 1120 if (need_aux)
1121 temp = REG_READ_AUX(psb_intel_sdvo->sdvo_reg);
1122 else
1123 temp = REG_READ(psb_intel_sdvo->sdvo_reg);
1124
1112 if ((temp & SDVO_ENABLE) != 0) { 1125 if ((temp & SDVO_ENABLE) != 0) {
1113 psb_intel_sdvo_write_sdvox(psb_intel_sdvo, temp & ~SDVO_ENABLE); 1126 psb_intel_sdvo_write_sdvox(psb_intel_sdvo, temp & ~SDVO_ENABLE);
1114 } 1127 }
1115 } 1128 }
1116 } else { 1129 } else {
1117 bool input1, input2; 1130 bool input1, input2;
1118 int i;
1119 u8 status; 1131 u8 status;
1120 1132
1121 temp = REG_READ(psb_intel_sdvo->sdvo_reg); 1133 if (need_aux)
1134 temp = REG_READ_AUX(psb_intel_sdvo->sdvo_reg);
1135 else
1136 temp = REG_READ(psb_intel_sdvo->sdvo_reg);
1137
1122 if ((temp & SDVO_ENABLE) == 0) 1138 if ((temp & SDVO_ENABLE) == 0)
1123 psb_intel_sdvo_write_sdvox(psb_intel_sdvo, temp | SDVO_ENABLE); 1139 psb_intel_sdvo_write_sdvox(psb_intel_sdvo, temp | SDVO_ENABLE);
1140
1124 for (i = 0; i < 2; i++) 1141 for (i = 0; i < 2; i++)
1125 gma_wait_for_vblank(dev); 1142 gma_wait_for_vblank(dev);
1126 1143
diff --git a/drivers/gpu/drm/gma500/psb_irq.c b/drivers/gpu/drm/gma500/psb_irq.c
index 029eccf30137..ba4830342d34 100644
--- a/drivers/gpu/drm/gma500/psb_irq.c
+++ b/drivers/gpu/drm/gma500/psb_irq.c
@@ -271,15 +271,15 @@ void psb_irq_preinstall(struct drm_device *dev)
271 271
272 if (gma_power_is_on(dev)) 272 if (gma_power_is_on(dev))
273 PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM); 273 PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
274 if (dev->vblank_enabled[0]) 274 if (dev->vblank[0].enabled)
275 dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEA_FLAG; 275 dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEA_FLAG;
276 if (dev->vblank_enabled[1]) 276 if (dev->vblank[1].enabled)
277 dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEB_FLAG; 277 dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEB_FLAG;
278 278
279 /* FIXME: Handle Medfield irq mask 279 /* FIXME: Handle Medfield irq mask
280 if (dev->vblank_enabled[1]) 280 if (dev->vblank[1].enabled)
281 dev_priv->vdc_irq_mask |= _MDFLD_PIPEB_EVENT_FLAG; 281 dev_priv->vdc_irq_mask |= _MDFLD_PIPEB_EVENT_FLAG;
282 if (dev->vblank_enabled[2]) 282 if (dev->vblank[2].enabled)
283 dev_priv->vdc_irq_mask |= _MDFLD_PIPEC_EVENT_FLAG; 283 dev_priv->vdc_irq_mask |= _MDFLD_PIPEC_EVENT_FLAG;
284 */ 284 */
285 285
@@ -305,17 +305,17 @@ int psb_irq_postinstall(struct drm_device *dev)
305 PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R); 305 PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
306 PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM); 306 PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
307 307
308 if (dev->vblank_enabled[0]) 308 if (dev->vblank[0].enabled)
309 psb_enable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE); 309 psb_enable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE);
310 else 310 else
311 psb_disable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE); 311 psb_disable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE);
312 312
313 if (dev->vblank_enabled[1]) 313 if (dev->vblank[1].enabled)
314 psb_enable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE); 314 psb_enable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE);
315 else 315 else
316 psb_disable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE); 316 psb_disable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE);
317 317
318 if (dev->vblank_enabled[2]) 318 if (dev->vblank[2].enabled)
319 psb_enable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE); 319 psb_enable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE);
320 else 320 else
321 psb_disable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE); 321 psb_disable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE);
@@ -339,13 +339,13 @@ void psb_irq_uninstall(struct drm_device *dev)
339 339
340 PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM); 340 PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
341 341
342 if (dev->vblank_enabled[0]) 342 if (dev->vblank[0].enabled)
343 psb_disable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE); 343 psb_disable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE);
344 344
345 if (dev->vblank_enabled[1]) 345 if (dev->vblank[1].enabled)
346 psb_disable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE); 346 psb_disable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE);
347 347
348 if (dev->vblank_enabled[2]) 348 if (dev->vblank[2].enabled)
349 psb_disable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE); 349 psb_disable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE);
350 350
351 dev_priv->vdc_irq_mask &= _PSB_IRQ_SGX_FLAG | 351 dev_priv->vdc_irq_mask &= _PSB_IRQ_SGX_FLAG |
@@ -456,7 +456,7 @@ static int psb_vblank_do_wait(struct drm_device *dev,
456{ 456{
457 unsigned int cur_vblank; 457 unsigned int cur_vblank;
458 int ret = 0; 458 int ret = 0;
459 DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ, 459 DRM_WAIT_ON(ret, dev->vblank.queue, 3 * DRM_HZ,
460 (((cur_vblank = atomic_read(counter)) 460 (((cur_vblank = atomic_read(counter))
461 - *sequence) <= (1 << 23))); 461 - *sequence) <= (1 << 23)));
462 *sequence = cur_vblank; 462 *sequence = cur_vblank;
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index 60e84043aa34..400b0c4a10fb 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -17,6 +17,7 @@
17 17
18 18
19 19
20#include <linux/hdmi.h>
20#include <linux/module.h> 21#include <linux/module.h>
21 22
22#include <drm/drmP.h> 23#include <drm/drmP.h>
@@ -549,6 +550,8 @@ tda998x_write_avi(struct drm_encoder *encoder, struct drm_display_mode *mode)
549 buf[HB(0)] = 0x82; 550 buf[HB(0)] = 0x82;
550 buf[HB(1)] = 0x02; 551 buf[HB(1)] = 0x02;
551 buf[HB(2)] = 13; 552 buf[HB(2)] = 13;
553 buf[PB(1)] = HDMI_SCAN_MODE_UNDERSCAN;
554 buf[PB(3)] = HDMI_QUANTIZATION_RANGE_FULL << 2;
552 buf[PB(4)] = drm_match_cea_mode(mode); 555 buf[PB(4)] = drm_match_cea_mode(mode);
553 556
554 tda998x_write_if(encoder, DIP_IF_FLAGS_IF2, REG_IF2_HB0, buf, 557 tda998x_write_if(encoder, DIP_IF_FLAGS_IF2, REG_IF2_HB0, buf,
diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
index ab1892eb1074..249fdff305c6 100644
--- a/drivers/gpu/drm/i810/i810_dma.c
+++ b/drivers/gpu/drm/i810/i810_dma.c
@@ -944,8 +944,6 @@ static int i810_dma_vertex(struct drm_device *dev, void *data,
944 dma->buflist[vertex->idx], 944 dma->buflist[vertex->idx],
945 vertex->discard, vertex->used); 945 vertex->discard, vertex->used);
946 946
947 atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
948 atomic_inc(&dev->counts[_DRM_STAT_DMA]);
949 sarea_priv->last_enqueue = dev_priv->counter - 1; 947 sarea_priv->last_enqueue = dev_priv->counter - 1;
950 sarea_priv->last_dispatch = (int)hw_status[5]; 948 sarea_priv->last_dispatch = (int)hw_status[5];
951 949
@@ -1105,8 +1103,6 @@ static int i810_dma_mc(struct drm_device *dev, void *data,
1105 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used, 1103 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
1106 mc->last_render); 1104 mc->last_render);
1107 1105
1108 atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
1109 atomic_inc(&dev->counts[_DRM_STAT_DMA]);
1110 sarea_priv->last_enqueue = dev_priv->counter - 1; 1106 sarea_priv->last_enqueue = dev_priv->counter - 1;
1111 sarea_priv->last_dispatch = (int)hw_status[5]; 1107 sarea_priv->last_dispatch = (int)hw_status[5];
1112 1108
@@ -1197,13 +1193,6 @@ static int i810_flip_bufs(struct drm_device *dev, void *data,
1197 1193
1198int i810_driver_load(struct drm_device *dev, unsigned long flags) 1194int i810_driver_load(struct drm_device *dev, unsigned long flags)
1199{ 1195{
1200 /* i810 has 4 more counters */
1201 dev->counters += 4;
1202 dev->types[6] = _DRM_STAT_IRQ;
1203 dev->types[7] = _DRM_STAT_PRIMARY;
1204 dev->types[8] = _DRM_STAT_SECONDARY;
1205 dev->types[9] = _DRM_STAT_DMA;
1206
1207 pci_set_master(dev->pdev); 1196 pci_set_master(dev->pdev);
1208 1197
1209 return 0; 1198 return 0;
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
new file mode 100644
index 000000000000..6199d0b5b958
--- /dev/null
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -0,0 +1,67 @@
1config DRM_I915
2 tristate "Intel 8xx/9xx/G3x/G4x/HD Graphics"
3 depends on DRM
4 depends on AGP
5 depends on AGP_INTEL
6 # we need shmfs for the swappable backing store, and in particular
7 # the shmem_readpage() which depends upon tmpfs
8 select SHMEM
9 select TMPFS
10 select DRM_KMS_HELPER
11 # i915 depends on ACPI_VIDEO when ACPI is enabled
12 # but for select to work, need to select ACPI_VIDEO's dependencies, ick
13 select BACKLIGHT_LCD_SUPPORT if ACPI
14 select BACKLIGHT_CLASS_DEVICE if ACPI
15 select VIDEO_OUTPUT_CONTROL if ACPI
16 select INPUT if ACPI
17 select ACPI_VIDEO if ACPI
18 select ACPI_BUTTON if ACPI
19 help
20 Choose this option if you have a system that has "Intel Graphics
21 Media Accelerator" or "HD Graphics" integrated graphics,
22 including 830M, 845G, 852GM, 855GM, 865G, 915G, 945G, 965G,
23 G35, G41, G43, G45 chipsets and Celeron, Pentium, Core i3,
24 Core i5, Core i7 as well as Atom CPUs with integrated graphics.
25 If M is selected, the module will be called i915. AGP support
26 is required for this driver to work. This driver is used by
27 the Intel driver in X.org 6.8 and XFree86 4.4 and above. It
28 replaces the older i830 module that supported a subset of the
29 hardware in older X.org releases.
30
31 Note that the older i810/i815 chipsets require the use of the
32 i810 driver instead, and the Atom z5xx series has an entirely
33 different implementation.
34
35config DRM_I915_KMS
36 bool "Enable modesetting on intel by default"
37 depends on DRM_I915
38 help
39 Choose this option if you want kernel modesetting enabled by default,
40 and you have a new enough userspace to support this. Running old
41 userspaces with this enabled will cause pain. Note that this causes
42 the driver to bind to PCI devices, which precludes loading things
43 like intelfb.
44
45config DRM_I915_FBDEV
46 bool "Enable legacy fbdev support for the modesettting intel driver"
47 depends on DRM_I915
48 select DRM_KMS_FB_HELPER
49 select FB_CFB_FILLRECT
50 select FB_CFB_COPYAREA
51 select FB_CFB_IMAGEBLIT
52 default y
53 help
54 Choose this option if you have a need for the legacy fbdev
55 support. Note that this support also provide the linux console
56 support on top of the intel modesetting driver.
57
58config DRM_I915_PRELIMINARY_HW_SUPPORT
59 bool "Enable preliminary support for prerelease Intel hardware by default"
60 depends on DRM_I915
61 help
62 Choose this option if you have prerelease Intel hardware and want the
63 i915 driver to support it by default. You can enable such support at
64 runtime with the module option i915.preliminary_hw_support=1; this
65 option changes the default for that module option.
66
67 If in doubt, say "N".
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index b8449a84a0dc..41838eaa799c 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -21,6 +21,9 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o \
21 intel_display.o \ 21 intel_display.o \
22 intel_crt.o \ 22 intel_crt.o \
23 intel_lvds.o \ 23 intel_lvds.o \
24 intel_dsi.o \
25 intel_dsi_cmd.o \
26 intel_dsi_pll.o \
24 intel_bios.o \ 27 intel_bios.o \
25 intel_ddi.o \ 28 intel_ddi.o \
26 intel_dp.o \ 29 intel_dp.o \
@@ -30,7 +33,6 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o \
30 intel_panel.o \ 33 intel_panel.o \
31 intel_pm.o \ 34 intel_pm.o \
32 intel_i2c.o \ 35 intel_i2c.o \
33 intel_fb.o \
34 intel_tv.o \ 36 intel_tv.o \
35 intel_dvo.o \ 37 intel_dvo.o \
36 intel_ringbuffer.o \ 38 intel_ringbuffer.o \
@@ -51,6 +53,8 @@ i915-$(CONFIG_COMPAT) += i915_ioc32.o
51 53
52i915-$(CONFIG_ACPI) += intel_acpi.o 54i915-$(CONFIG_ACPI) += intel_acpi.o
53 55
56i915-$(CONFIG_DRM_I915_FBDEV) += intel_fbdev.o
57
54obj-$(CONFIG_DRM_I915) += i915.o 58obj-$(CONFIG_DRM_I915) += i915.o
55 59
56CFLAGS_i915_trace_points.o := -I$(src) 60CFLAGS_i915_trace_points.o := -I$(src)
diff --git a/drivers/gpu/drm/i915/dvo.h b/drivers/gpu/drm/i915/dvo.h
index 33a62ad80100..312163379db9 100644
--- a/drivers/gpu/drm/i915/dvo.h
+++ b/drivers/gpu/drm/i915/dvo.h
@@ -77,17 +77,6 @@ struct intel_dvo_dev_ops {
77 struct drm_display_mode *mode); 77 struct drm_display_mode *mode);
78 78
79 /* 79 /*
80 * Callback to adjust the mode to be set in the CRTC.
81 *
82 * This allows an output to adjust the clock or even the entire set of
83 * timings, which is used for panels with fixed timings or for
84 * buses with clock limitations.
85 */
86 bool (*mode_fixup)(struct intel_dvo_device *dvo,
87 const struct drm_display_mode *mode,
88 struct drm_display_mode *adjusted_mode);
89
90 /*
91 * Callback for preparing mode changes on an output 80 * Callback for preparing mode changes on an output
92 */ 81 */
93 void (*prepare)(struct intel_dvo_device *dvo); 82 void (*prepare)(struct intel_dvo_device *dvo);
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index a6f4cb5af185..6ed45a984230 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -27,6 +27,8 @@
27 */ 27 */
28 28
29#include <linux/seq_file.h> 29#include <linux/seq_file.h>
30#include <linux/circ_buf.h>
31#include <linux/ctype.h>
30#include <linux/debugfs.h> 32#include <linux/debugfs.h>
31#include <linux/slab.h> 33#include <linux/slab.h>
32#include <linux/export.h> 34#include <linux/export.h>
@@ -38,9 +40,6 @@
38#include <drm/i915_drm.h> 40#include <drm/i915_drm.h>
39#include "i915_drv.h" 41#include "i915_drv.h"
40 42
41#define DRM_I915_RING_DEBUG 1
42
43
44#if defined(CONFIG_DEBUG_FS) 43#if defined(CONFIG_DEBUG_FS)
45 44
46enum { 45enum {
@@ -54,6 +53,32 @@ static const char *yesno(int v)
54 return v ? "yes" : "no"; 53 return v ? "yes" : "no";
55} 54}
56 55
56/* As the drm_debugfs_init() routines are called before dev->dev_private is
57 * allocated we need to hook into the minor for release. */
58static int
59drm_add_fake_info_node(struct drm_minor *minor,
60 struct dentry *ent,
61 const void *key)
62{
63 struct drm_info_node *node;
64
65 node = kmalloc(sizeof(*node), GFP_KERNEL);
66 if (node == NULL) {
67 debugfs_remove(ent);
68 return -ENOMEM;
69 }
70
71 node->minor = minor;
72 node->dent = ent;
73 node->info_ent = (void *) key;
74
75 mutex_lock(&minor->debugfs_lock);
76 list_add(&node->list, &minor->debugfs_list);
77 mutex_unlock(&minor->debugfs_lock);
78
79 return 0;
80}
81
57static int i915_capabilities(struct seq_file *m, void *data) 82static int i915_capabilities(struct seq_file *m, void *data)
58{ 83{
59 struct drm_info_node *node = (struct drm_info_node *) m->private; 84 struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -145,6 +170,13 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
145 seq_printf(m, " (%s)", obj->ring->name); 170 seq_printf(m, " (%s)", obj->ring->name);
146} 171}
147 172
173static void describe_ctx(struct seq_file *m, struct i915_hw_context *ctx)
174{
175 seq_putc(m, ctx->is_initialized ? 'I' : 'i');
176 seq_putc(m, ctx->remap_slice ? 'R' : 'r');
177 seq_putc(m, ' ');
178}
179
148static int i915_gem_object_list_info(struct seq_file *m, void *data) 180static int i915_gem_object_list_info(struct seq_file *m, void *data)
149{ 181{
150 struct drm_info_node *node = (struct drm_info_node *) m->private; 182 struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -554,7 +586,53 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
554 if (ret) 586 if (ret)
555 return ret; 587 return ret;
556 588
557 if (IS_VALLEYVIEW(dev)) { 589 if (INTEL_INFO(dev)->gen >= 8) {
590 int i;
591 seq_printf(m, "Master Interrupt Control:\t%08x\n",
592 I915_READ(GEN8_MASTER_IRQ));
593
594 for (i = 0; i < 4; i++) {
595 seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
596 i, I915_READ(GEN8_GT_IMR(i)));
597 seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
598 i, I915_READ(GEN8_GT_IIR(i)));
599 seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
600 i, I915_READ(GEN8_GT_IER(i)));
601 }
602
603 for_each_pipe(i) {
604 seq_printf(m, "Pipe %c IMR:\t%08x\n",
605 pipe_name(i),
606 I915_READ(GEN8_DE_PIPE_IMR(i)));
607 seq_printf(m, "Pipe %c IIR:\t%08x\n",
608 pipe_name(i),
609 I915_READ(GEN8_DE_PIPE_IIR(i)));
610 seq_printf(m, "Pipe %c IER:\t%08x\n",
611 pipe_name(i),
612 I915_READ(GEN8_DE_PIPE_IER(i)));
613 }
614
615 seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
616 I915_READ(GEN8_DE_PORT_IMR));
617 seq_printf(m, "Display Engine port interrupt identity:\t%08x\n",
618 I915_READ(GEN8_DE_PORT_IIR));
619 seq_printf(m, "Display Engine port interrupt enable:\t%08x\n",
620 I915_READ(GEN8_DE_PORT_IER));
621
622 seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n",
623 I915_READ(GEN8_DE_MISC_IMR));
624 seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n",
625 I915_READ(GEN8_DE_MISC_IIR));
626 seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n",
627 I915_READ(GEN8_DE_MISC_IER));
628
629 seq_printf(m, "PCU interrupt mask:\t%08x\n",
630 I915_READ(GEN8_PCU_IMR));
631 seq_printf(m, "PCU interrupt identity:\t%08x\n",
632 I915_READ(GEN8_PCU_IIR));
633 seq_printf(m, "PCU interrupt enable:\t%08x\n",
634 I915_READ(GEN8_PCU_IER));
635 } else if (IS_VALLEYVIEW(dev)) {
558 seq_printf(m, "Display IER:\t%08x\n", 636 seq_printf(m, "Display IER:\t%08x\n",
559 I915_READ(VLV_IER)); 637 I915_READ(VLV_IER));
560 seq_printf(m, "Display IIR:\t%08x\n", 638 seq_printf(m, "Display IIR:\t%08x\n",
@@ -626,7 +704,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
626 seq_printf(m, "Interrupts received: %d\n", 704 seq_printf(m, "Interrupts received: %d\n",
627 atomic_read(&dev_priv->irq_received)); 705 atomic_read(&dev_priv->irq_received));
628 for_each_ring(ring, dev_priv, i) { 706 for_each_ring(ring, dev_priv, i) {
629 if (IS_GEN6(dev) || IS_GEN7(dev)) { 707 if (INTEL_INFO(dev)->gen >= 6) {
630 seq_printf(m, 708 seq_printf(m,
631 "Graphics Interrupt mask (%s): %08x\n", 709 "Graphics Interrupt mask (%s): %08x\n",
632 ring->name, I915_READ_IMR(ring)); 710 ring->name, I915_READ_IMR(ring));
@@ -843,6 +921,8 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
843 drm_i915_private_t *dev_priv = dev->dev_private; 921 drm_i915_private_t *dev_priv = dev->dev_private;
844 int ret; 922 int ret;
845 923
924 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
925
846 if (IS_GEN5(dev)) { 926 if (IS_GEN5(dev)) {
847 u16 rgvswctl = I915_READ16(MEMSWCTL); 927 u16 rgvswctl = I915_READ16(MEMSWCTL);
848 u16 rgvstat = I915_READ16(MEMSTAT_ILK); 928 u16 rgvstat = I915_READ16(MEMSTAT_ILK);
@@ -1321,6 +1401,8 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
1321 return 0; 1401 return 0;
1322 } 1402 }
1323 1403
1404 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
1405
1324 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); 1406 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
1325 if (ret) 1407 if (ret)
1326 return ret; 1408 return ret;
@@ -1395,12 +1477,12 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
1395{ 1477{
1396 struct drm_info_node *node = (struct drm_info_node *) m->private; 1478 struct drm_info_node *node = (struct drm_info_node *) m->private;
1397 struct drm_device *dev = node->minor->dev; 1479 struct drm_device *dev = node->minor->dev;
1398 drm_i915_private_t *dev_priv = dev->dev_private; 1480 struct intel_fbdev *ifbdev = NULL;
1399 struct intel_fbdev *ifbdev;
1400 struct intel_framebuffer *fb; 1481 struct intel_framebuffer *fb;
1401 int ret;
1402 1482
1403 ret = mutex_lock_interruptible(&dev->mode_config.mutex); 1483#ifdef CONFIG_DRM_I915_FBDEV
1484 struct drm_i915_private *dev_priv = dev->dev_private;
1485 int ret = mutex_lock_interruptible(&dev->mode_config.mutex);
1404 if (ret) 1486 if (ret)
1405 return ret; 1487 return ret;
1406 1488
@@ -1416,10 +1498,11 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
1416 describe_obj(m, fb->obj); 1498 describe_obj(m, fb->obj);
1417 seq_putc(m, '\n'); 1499 seq_putc(m, '\n');
1418 mutex_unlock(&dev->mode_config.mutex); 1500 mutex_unlock(&dev->mode_config.mutex);
1501#endif
1419 1502
1420 mutex_lock(&dev->mode_config.fb_lock); 1503 mutex_lock(&dev->mode_config.fb_lock);
1421 list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) { 1504 list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) {
1422 if (&fb->base == ifbdev->helper.fb) 1505 if (ifbdev && &fb->base == ifbdev->helper.fb)
1423 continue; 1506 continue;
1424 1507
1425 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, refcount %d, obj ", 1508 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, refcount %d, obj ",
@@ -1442,6 +1525,7 @@ static int i915_context_status(struct seq_file *m, void *unused)
1442 struct drm_device *dev = node->minor->dev; 1525 struct drm_device *dev = node->minor->dev;
1443 drm_i915_private_t *dev_priv = dev->dev_private; 1526 drm_i915_private_t *dev_priv = dev->dev_private;
1444 struct intel_ring_buffer *ring; 1527 struct intel_ring_buffer *ring;
1528 struct i915_hw_context *ctx;
1445 int ret, i; 1529 int ret, i;
1446 1530
1447 ret = mutex_lock_interruptible(&dev->mode_config.mutex); 1531 ret = mutex_lock_interruptible(&dev->mode_config.mutex);
@@ -1460,12 +1544,15 @@ static int i915_context_status(struct seq_file *m, void *unused)
1460 seq_putc(m, '\n'); 1544 seq_putc(m, '\n');
1461 } 1545 }
1462 1546
1463 for_each_ring(ring, dev_priv, i) { 1547 list_for_each_entry(ctx, &dev_priv->context_list, link) {
1464 if (ring->default_context) { 1548 seq_puts(m, "HW context ");
1465 seq_printf(m, "HW default context %s ring ", ring->name); 1549 describe_ctx(m, ctx);
1466 describe_obj(m, ring->default_context->obj); 1550 for_each_ring(ring, dev_priv, i)
1467 seq_putc(m, '\n'); 1551 if (ring->default_context == ctx)
1468 } 1552 seq_printf(m, "(default context %s) ", ring->name);
1553
1554 describe_obj(m, ctx->obj);
1555 seq_putc(m, '\n');
1469 } 1556 }
1470 1557
1471 mutex_unlock(&dev->mode_config.mutex); 1558 mutex_unlock(&dev->mode_config.mutex);
@@ -1536,7 +1623,7 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
1536 I915_READ16(C0DRB3)); 1623 I915_READ16(C0DRB3));
1537 seq_printf(m, "C1DRB3 = 0x%04x\n", 1624 seq_printf(m, "C1DRB3 = 0x%04x\n",
1538 I915_READ16(C1DRB3)); 1625 I915_READ16(C1DRB3));
1539 } else if (IS_GEN6(dev) || IS_GEN7(dev)) { 1626 } else if (INTEL_INFO(dev)->gen >= 6) {
1540 seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n", 1627 seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
1541 I915_READ(MAD_DIMM_C0)); 1628 I915_READ(MAD_DIMM_C0));
1542 seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n", 1629 seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
@@ -1545,8 +1632,12 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
1545 I915_READ(MAD_DIMM_C2)); 1632 I915_READ(MAD_DIMM_C2));
1546 seq_printf(m, "TILECTL = 0x%08x\n", 1633 seq_printf(m, "TILECTL = 0x%08x\n",
1547 I915_READ(TILECTL)); 1634 I915_READ(TILECTL));
1548 seq_printf(m, "ARB_MODE = 0x%08x\n", 1635 if (IS_GEN8(dev))
1549 I915_READ(ARB_MODE)); 1636 seq_printf(m, "GAMTARBMODE = 0x%08x\n",
1637 I915_READ(GAMTARBMODE));
1638 else
1639 seq_printf(m, "ARB_MODE = 0x%08x\n",
1640 I915_READ(ARB_MODE));
1550 seq_printf(m, "DISP_ARB_CTL = 0x%08x\n", 1641 seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
1551 I915_READ(DISP_ARB_CTL)); 1642 I915_READ(DISP_ARB_CTL));
1552 } 1643 }
@@ -1555,18 +1646,37 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
1555 return 0; 1646 return 0;
1556} 1647}
1557 1648
1558static int i915_ppgtt_info(struct seq_file *m, void *data) 1649static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev)
1559{ 1650{
1560 struct drm_info_node *node = (struct drm_info_node *) m->private;
1561 struct drm_device *dev = node->minor->dev;
1562 struct drm_i915_private *dev_priv = dev->dev_private; 1651 struct drm_i915_private *dev_priv = dev->dev_private;
1563 struct intel_ring_buffer *ring; 1652 struct intel_ring_buffer *ring;
1564 int i, ret; 1653 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
1654 int unused, i;
1565 1655
1656 if (!ppgtt)
1657 return;
1658
1659 seq_printf(m, "Page directories: %d\n", ppgtt->num_pd_pages);
1660 seq_printf(m, "Page tables: %d\n", ppgtt->num_pt_pages);
1661 for_each_ring(ring, dev_priv, unused) {
1662 seq_printf(m, "%s\n", ring->name);
1663 for (i = 0; i < 4; i++) {
1664 u32 offset = 0x270 + i * 8;
1665 u64 pdp = I915_READ(ring->mmio_base + offset + 4);
1666 pdp <<= 32;
1667 pdp |= I915_READ(ring->mmio_base + offset);
1668 for (i = 0; i < 4; i++)
1669 seq_printf(m, "\tPDP%d 0x%016llx\n", i, pdp);
1670 }
1671 }
1672}
1673
1674static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev)
1675{
1676 struct drm_i915_private *dev_priv = dev->dev_private;
1677 struct intel_ring_buffer *ring;
1678 int i;
1566 1679
1567 ret = mutex_lock_interruptible(&dev->struct_mutex);
1568 if (ret)
1569 return ret;
1570 if (INTEL_INFO(dev)->gen == 6) 1680 if (INTEL_INFO(dev)->gen == 6)
1571 seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE)); 1681 seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));
1572 1682
@@ -1585,6 +1695,22 @@ static int i915_ppgtt_info(struct seq_file *m, void *data)
1585 seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd_offset); 1695 seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd_offset);
1586 } 1696 }
1587 seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK)); 1697 seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK));
1698}
1699
1700static int i915_ppgtt_info(struct seq_file *m, void *data)
1701{
1702 struct drm_info_node *node = (struct drm_info_node *) m->private;
1703 struct drm_device *dev = node->minor->dev;
1704
1705 int ret = mutex_lock_interruptible(&dev->struct_mutex);
1706 if (ret)
1707 return ret;
1708
1709 if (INTEL_INFO(dev)->gen >= 8)
1710 gen8_ppgtt_info(m, dev);
1711 else if (INTEL_INFO(dev)->gen >= 6)
1712 gen6_ppgtt_info(m, dev);
1713
1588 mutex_unlock(&dev->struct_mutex); 1714 mutex_unlock(&dev->struct_mutex);
1589 1715
1590 return 0; 1716 return 0;
@@ -1610,27 +1736,27 @@ static int i915_dpio_info(struct seq_file *m, void *data)
1610 seq_printf(m, "DPIO_CTL: 0x%08x\n", I915_READ(DPIO_CTL)); 1736 seq_printf(m, "DPIO_CTL: 0x%08x\n", I915_READ(DPIO_CTL));
1611 1737
1612 seq_printf(m, "DPIO_DIV_A: 0x%08x\n", 1738 seq_printf(m, "DPIO_DIV_A: 0x%08x\n",
1613 vlv_dpio_read(dev_priv, _DPIO_DIV_A)); 1739 vlv_dpio_read(dev_priv, PIPE_A, _DPIO_DIV_A));
1614 seq_printf(m, "DPIO_DIV_B: 0x%08x\n", 1740 seq_printf(m, "DPIO_DIV_B: 0x%08x\n",
1615 vlv_dpio_read(dev_priv, _DPIO_DIV_B)); 1741 vlv_dpio_read(dev_priv, PIPE_A, _DPIO_DIV_B));
1616 1742
1617 seq_printf(m, "DPIO_REFSFR_A: 0x%08x\n", 1743 seq_printf(m, "DPIO_REFSFR_A: 0x%08x\n",
1618 vlv_dpio_read(dev_priv, _DPIO_REFSFR_A)); 1744 vlv_dpio_read(dev_priv, PIPE_A, _DPIO_REFSFR_A));
1619 seq_printf(m, "DPIO_REFSFR_B: 0x%08x\n", 1745 seq_printf(m, "DPIO_REFSFR_B: 0x%08x\n",
1620 vlv_dpio_read(dev_priv, _DPIO_REFSFR_B)); 1746 vlv_dpio_read(dev_priv, PIPE_A, _DPIO_REFSFR_B));
1621 1747
1622 seq_printf(m, "DPIO_CORE_CLK_A: 0x%08x\n", 1748 seq_printf(m, "DPIO_CORE_CLK_A: 0x%08x\n",
1623 vlv_dpio_read(dev_priv, _DPIO_CORE_CLK_A)); 1749 vlv_dpio_read(dev_priv, PIPE_A, _DPIO_CORE_CLK_A));
1624 seq_printf(m, "DPIO_CORE_CLK_B: 0x%08x\n", 1750 seq_printf(m, "DPIO_CORE_CLK_B: 0x%08x\n",
1625 vlv_dpio_read(dev_priv, _DPIO_CORE_CLK_B)); 1751 vlv_dpio_read(dev_priv, PIPE_A, _DPIO_CORE_CLK_B));
1626 1752
1627 seq_printf(m, "DPIO_LPF_COEFF_A: 0x%08x\n", 1753 seq_printf(m, "DPIO_LPF_COEFF_A: 0x%08x\n",
1628 vlv_dpio_read(dev_priv, _DPIO_LPF_COEFF_A)); 1754 vlv_dpio_read(dev_priv, PIPE_A, _DPIO_LPF_COEFF_A));
1629 seq_printf(m, "DPIO_LPF_COEFF_B: 0x%08x\n", 1755 seq_printf(m, "DPIO_LPF_COEFF_B: 0x%08x\n",
1630 vlv_dpio_read(dev_priv, _DPIO_LPF_COEFF_B)); 1756 vlv_dpio_read(dev_priv, PIPE_A, _DPIO_LPF_COEFF_B));
1631 1757
1632 seq_printf(m, "DPIO_FASTCLK_DISABLE: 0x%08x\n", 1758 seq_printf(m, "DPIO_FASTCLK_DISABLE: 0x%08x\n",
1633 vlv_dpio_read(dev_priv, DPIO_FASTCLK_DISABLE)); 1759 vlv_dpio_read(dev_priv, PIPE_A, DPIO_FASTCLK_DISABLE));
1634 1760
1635 mutex_unlock(&dev_priv->dpio_lock); 1761 mutex_unlock(&dev_priv->dpio_lock);
1636 1762
@@ -1655,126 +1781,20 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
1655 struct drm_info_node *node = m->private; 1781 struct drm_info_node *node = m->private;
1656 struct drm_device *dev = node->minor->dev; 1782 struct drm_device *dev = node->minor->dev;
1657 struct drm_i915_private *dev_priv = dev->dev_private; 1783 struct drm_i915_private *dev_priv = dev->dev_private;
1658 u32 psrstat, psrperf; 1784 u32 psrperf = 0;
1659 1785 bool enabled = false;
1660 if (!IS_HASWELL(dev)) {
1661 seq_puts(m, "PSR not supported on this platform\n");
1662 } else if (IS_HASWELL(dev) && I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE) {
1663 seq_puts(m, "PSR enabled\n");
1664 } else {
1665 seq_puts(m, "PSR disabled: ");
1666 switch (dev_priv->no_psr_reason) {
1667 case PSR_NO_SOURCE:
1668 seq_puts(m, "not supported on this platform");
1669 break;
1670 case PSR_NO_SINK:
1671 seq_puts(m, "not supported by panel");
1672 break;
1673 case PSR_MODULE_PARAM:
1674 seq_puts(m, "disabled by flag");
1675 break;
1676 case PSR_CRTC_NOT_ACTIVE:
1677 seq_puts(m, "crtc not active");
1678 break;
1679 case PSR_PWR_WELL_ENABLED:
1680 seq_puts(m, "power well enabled");
1681 break;
1682 case PSR_NOT_TILED:
1683 seq_puts(m, "not tiled");
1684 break;
1685 case PSR_SPRITE_ENABLED:
1686 seq_puts(m, "sprite enabled");
1687 break;
1688 case PSR_S3D_ENABLED:
1689 seq_puts(m, "stereo 3d enabled");
1690 break;
1691 case PSR_INTERLACED_ENABLED:
1692 seq_puts(m, "interlaced enabled");
1693 break;
1694 case PSR_HSW_NOT_DDIA:
1695 seq_puts(m, "HSW ties PSR to DDI A (eDP)");
1696 break;
1697 default:
1698 seq_puts(m, "unknown reason");
1699 }
1700 seq_puts(m, "\n");
1701 return 0;
1702 }
1703
1704 psrstat = I915_READ(EDP_PSR_STATUS_CTL);
1705
1706 seq_puts(m, "PSR Current State: ");
1707 switch (psrstat & EDP_PSR_STATUS_STATE_MASK) {
1708 case EDP_PSR_STATUS_STATE_IDLE:
1709 seq_puts(m, "Reset state\n");
1710 break;
1711 case EDP_PSR_STATUS_STATE_SRDONACK:
1712 seq_puts(m, "Wait for TG/Stream to send on frame of data after SRD conditions are met\n");
1713 break;
1714 case EDP_PSR_STATUS_STATE_SRDENT:
1715 seq_puts(m, "SRD entry\n");
1716 break;
1717 case EDP_PSR_STATUS_STATE_BUFOFF:
1718 seq_puts(m, "Wait for buffer turn off\n");
1719 break;
1720 case EDP_PSR_STATUS_STATE_BUFON:
1721 seq_puts(m, "Wait for buffer turn on\n");
1722 break;
1723 case EDP_PSR_STATUS_STATE_AUXACK:
1724 seq_puts(m, "Wait for AUX to acknowledge on SRD exit\n");
1725 break;
1726 case EDP_PSR_STATUS_STATE_SRDOFFACK:
1727 seq_puts(m, "Wait for TG/Stream to acknowledge the SRD VDM exit\n");
1728 break;
1729 default:
1730 seq_puts(m, "Unknown\n");
1731 break;
1732 }
1733
1734 seq_puts(m, "Link Status: ");
1735 switch (psrstat & EDP_PSR_STATUS_LINK_MASK) {
1736 case EDP_PSR_STATUS_LINK_FULL_OFF:
1737 seq_puts(m, "Link is fully off\n");
1738 break;
1739 case EDP_PSR_STATUS_LINK_FULL_ON:
1740 seq_puts(m, "Link is fully on\n");
1741 break;
1742 case EDP_PSR_STATUS_LINK_STANDBY:
1743 seq_puts(m, "Link is in standby\n");
1744 break;
1745 default:
1746 seq_puts(m, "Unknown\n");
1747 break;
1748 }
1749
1750 seq_printf(m, "PSR Entry Count: %u\n",
1751 psrstat >> EDP_PSR_STATUS_COUNT_SHIFT &
1752 EDP_PSR_STATUS_COUNT_MASK);
1753
1754 seq_printf(m, "Max Sleep Timer Counter: %u\n",
1755 psrstat >> EDP_PSR_STATUS_MAX_SLEEP_TIMER_SHIFT &
1756 EDP_PSR_STATUS_MAX_SLEEP_TIMER_MASK);
1757 1786
1758 seq_printf(m, "Had AUX error: %s\n", 1787 seq_printf(m, "Sink_Support: %s\n", yesno(dev_priv->psr.sink_support));
1759 yesno(psrstat & EDP_PSR_STATUS_AUX_ERROR)); 1788 seq_printf(m, "Source_OK: %s\n", yesno(dev_priv->psr.source_ok));
1760 1789
1761 seq_printf(m, "Sending AUX: %s\n", 1790 enabled = HAS_PSR(dev) &&
1762 yesno(psrstat & EDP_PSR_STATUS_AUX_SENDING)); 1791 I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE;
1792 seq_printf(m, "Enabled: %s\n", yesno(enabled));
1763 1793
1764 seq_printf(m, "Sending Idle: %s\n", 1794 if (HAS_PSR(dev))
1765 yesno(psrstat & EDP_PSR_STATUS_SENDING_IDLE)); 1795 psrperf = I915_READ(EDP_PSR_PERF_CNT(dev)) &
1766 1796 EDP_PSR_PERF_CNT_MASK;
1767 seq_printf(m, "Sending TP2 TP3: %s\n", 1797 seq_printf(m, "Performance_Counter: %u\n", psrperf);
1768 yesno(psrstat & EDP_PSR_STATUS_SENDING_TP2_TP3));
1769
1770 seq_printf(m, "Sending TP1: %s\n",
1771 yesno(psrstat & EDP_PSR_STATUS_SENDING_TP1));
1772
1773 seq_printf(m, "Idle Count: %u\n",
1774 psrstat & EDP_PSR_STATUS_IDLE_MASK);
1775
1776 psrperf = (I915_READ(EDP_PSR_PERF_CNT)) & EDP_PSR_PERF_CNT_MASK;
1777 seq_printf(m, "Performance Counter: %u\n", psrperf);
1778 1798
1779 return 0; 1799 return 0;
1780} 1800}
@@ -1825,6 +1845,751 @@ static int i915_pc8_status(struct seq_file *m, void *unused)
1825 return 0; 1845 return 0;
1826} 1846}
1827 1847
1848struct pipe_crc_info {
1849 const char *name;
1850 struct drm_device *dev;
1851 enum pipe pipe;
1852};
1853
1854static int i915_pipe_crc_open(struct inode *inode, struct file *filep)
1855{
1856 struct pipe_crc_info *info = inode->i_private;
1857 struct drm_i915_private *dev_priv = info->dev->dev_private;
1858 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
1859
1860 spin_lock_irq(&pipe_crc->lock);
1861
1862 if (pipe_crc->opened) {
1863 spin_unlock_irq(&pipe_crc->lock);
1864 return -EBUSY; /* already open */
1865 }
1866
1867 pipe_crc->opened = true;
1868 filep->private_data = inode->i_private;
1869
1870 spin_unlock_irq(&pipe_crc->lock);
1871
1872 return 0;
1873}
1874
1875static int i915_pipe_crc_release(struct inode *inode, struct file *filep)
1876{
1877 struct pipe_crc_info *info = inode->i_private;
1878 struct drm_i915_private *dev_priv = info->dev->dev_private;
1879 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
1880
1881 spin_lock_irq(&pipe_crc->lock);
1882 pipe_crc->opened = false;
1883 spin_unlock_irq(&pipe_crc->lock);
1884
1885 return 0;
1886}
1887
1888/* (6 fields, 8 chars each, space separated (5) + '\n') */
1889#define PIPE_CRC_LINE_LEN (6 * 8 + 5 + 1)
1890/* account for \'0' */
1891#define PIPE_CRC_BUFFER_LEN (PIPE_CRC_LINE_LEN + 1)
1892
1893static int pipe_crc_data_count(struct intel_pipe_crc *pipe_crc)
1894{
1895 assert_spin_locked(&pipe_crc->lock);
1896 return CIRC_CNT(pipe_crc->head, pipe_crc->tail,
1897 INTEL_PIPE_CRC_ENTRIES_NR);
1898}
1899
1900static ssize_t
1901i915_pipe_crc_read(struct file *filep, char __user *user_buf, size_t count,
1902 loff_t *pos)
1903{
1904 struct pipe_crc_info *info = filep->private_data;
1905 struct drm_device *dev = info->dev;
1906 struct drm_i915_private *dev_priv = dev->dev_private;
1907 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
1908 char buf[PIPE_CRC_BUFFER_LEN];
1909 int head, tail, n_entries, n;
1910 ssize_t bytes_read;
1911
1912 /*
1913 * Don't allow user space to provide buffers not big enough to hold
1914 * a line of data.
1915 */
1916 if (count < PIPE_CRC_LINE_LEN)
1917 return -EINVAL;
1918
1919 if (pipe_crc->source == INTEL_PIPE_CRC_SOURCE_NONE)
1920 return 0;
1921
1922 /* nothing to read */
1923 spin_lock_irq(&pipe_crc->lock);
1924 while (pipe_crc_data_count(pipe_crc) == 0) {
1925 int ret;
1926
1927 if (filep->f_flags & O_NONBLOCK) {
1928 spin_unlock_irq(&pipe_crc->lock);
1929 return -EAGAIN;
1930 }
1931
1932 ret = wait_event_interruptible_lock_irq(pipe_crc->wq,
1933 pipe_crc_data_count(pipe_crc), pipe_crc->lock);
1934 if (ret) {
1935 spin_unlock_irq(&pipe_crc->lock);
1936 return ret;
1937 }
1938 }
1939
1940 /* We now have one or more entries to read */
1941 head = pipe_crc->head;
1942 tail = pipe_crc->tail;
1943 n_entries = min((size_t)CIRC_CNT(head, tail, INTEL_PIPE_CRC_ENTRIES_NR),
1944 count / PIPE_CRC_LINE_LEN);
1945 spin_unlock_irq(&pipe_crc->lock);
1946
1947 bytes_read = 0;
1948 n = 0;
1949 do {
1950 struct intel_pipe_crc_entry *entry = &pipe_crc->entries[tail];
1951 int ret;
1952
1953 bytes_read += snprintf(buf, PIPE_CRC_BUFFER_LEN,
1954 "%8u %8x %8x %8x %8x %8x\n",
1955 entry->frame, entry->crc[0],
1956 entry->crc[1], entry->crc[2],
1957 entry->crc[3], entry->crc[4]);
1958
1959 ret = copy_to_user(user_buf + n * PIPE_CRC_LINE_LEN,
1960 buf, PIPE_CRC_LINE_LEN);
1961 if (ret == PIPE_CRC_LINE_LEN)
1962 return -EFAULT;
1963
1964 BUILD_BUG_ON_NOT_POWER_OF_2(INTEL_PIPE_CRC_ENTRIES_NR);
1965 tail = (tail + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
1966 n++;
1967 } while (--n_entries);
1968
1969 spin_lock_irq(&pipe_crc->lock);
1970 pipe_crc->tail = tail;
1971 spin_unlock_irq(&pipe_crc->lock);
1972
1973 return bytes_read;
1974}
1975
1976static const struct file_operations i915_pipe_crc_fops = {
1977 .owner = THIS_MODULE,
1978 .open = i915_pipe_crc_open,
1979 .read = i915_pipe_crc_read,
1980 .release = i915_pipe_crc_release,
1981};
1982
1983static struct pipe_crc_info i915_pipe_crc_data[I915_MAX_PIPES] = {
1984 {
1985 .name = "i915_pipe_A_crc",
1986 .pipe = PIPE_A,
1987 },
1988 {
1989 .name = "i915_pipe_B_crc",
1990 .pipe = PIPE_B,
1991 },
1992 {
1993 .name = "i915_pipe_C_crc",
1994 .pipe = PIPE_C,
1995 },
1996};
1997
1998static int i915_pipe_crc_create(struct dentry *root, struct drm_minor *minor,
1999 enum pipe pipe)
2000{
2001 struct drm_device *dev = minor->dev;
2002 struct dentry *ent;
2003 struct pipe_crc_info *info = &i915_pipe_crc_data[pipe];
2004
2005 info->dev = dev;
2006 ent = debugfs_create_file(info->name, S_IRUGO, root, info,
2007 &i915_pipe_crc_fops);
2008 if (IS_ERR(ent))
2009 return PTR_ERR(ent);
2010
2011 return drm_add_fake_info_node(minor, ent, info);
2012}
2013
2014static const char * const pipe_crc_sources[] = {
2015 "none",
2016 "plane1",
2017 "plane2",
2018 "pf",
2019 "pipe",
2020 "TV",
2021 "DP-B",
2022 "DP-C",
2023 "DP-D",
2024 "auto",
2025};
2026
2027static const char *pipe_crc_source_name(enum intel_pipe_crc_source source)
2028{
2029 BUILD_BUG_ON(ARRAY_SIZE(pipe_crc_sources) != INTEL_PIPE_CRC_SOURCE_MAX);
2030 return pipe_crc_sources[source];
2031}
2032
2033static int display_crc_ctl_show(struct seq_file *m, void *data)
2034{
2035 struct drm_device *dev = m->private;
2036 struct drm_i915_private *dev_priv = dev->dev_private;
2037 int i;
2038
2039 for (i = 0; i < I915_MAX_PIPES; i++)
2040 seq_printf(m, "%c %s\n", pipe_name(i),
2041 pipe_crc_source_name(dev_priv->pipe_crc[i].source));
2042
2043 return 0;
2044}
2045
2046static int display_crc_ctl_open(struct inode *inode, struct file *file)
2047{
2048 struct drm_device *dev = inode->i_private;
2049
2050 return single_open(file, display_crc_ctl_show, dev);
2051}
2052
2053static int i8xx_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
2054 uint32_t *val)
2055{
2056 if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
2057 *source = INTEL_PIPE_CRC_SOURCE_PIPE;
2058
2059 switch (*source) {
2060 case INTEL_PIPE_CRC_SOURCE_PIPE:
2061 *val = PIPE_CRC_ENABLE | PIPE_CRC_INCLUDE_BORDER_I8XX;
2062 break;
2063 case INTEL_PIPE_CRC_SOURCE_NONE:
2064 *val = 0;
2065 break;
2066 default:
2067 return -EINVAL;
2068 }
2069
2070 return 0;
2071}
2072
2073static int i9xx_pipe_crc_auto_source(struct drm_device *dev, enum pipe pipe,
2074 enum intel_pipe_crc_source *source)
2075{
2076 struct intel_encoder *encoder;
2077 struct intel_crtc *crtc;
2078 struct intel_digital_port *dig_port;
2079 int ret = 0;
2080
2081 *source = INTEL_PIPE_CRC_SOURCE_PIPE;
2082
2083 mutex_lock(&dev->mode_config.mutex);
2084 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
2085 base.head) {
2086 if (!encoder->base.crtc)
2087 continue;
2088
2089 crtc = to_intel_crtc(encoder->base.crtc);
2090
2091 if (crtc->pipe != pipe)
2092 continue;
2093
2094 switch (encoder->type) {
2095 case INTEL_OUTPUT_TVOUT:
2096 *source = INTEL_PIPE_CRC_SOURCE_TV;
2097 break;
2098 case INTEL_OUTPUT_DISPLAYPORT:
2099 case INTEL_OUTPUT_EDP:
2100 dig_port = enc_to_dig_port(&encoder->base);
2101 switch (dig_port->port) {
2102 case PORT_B:
2103 *source = INTEL_PIPE_CRC_SOURCE_DP_B;
2104 break;
2105 case PORT_C:
2106 *source = INTEL_PIPE_CRC_SOURCE_DP_C;
2107 break;
2108 case PORT_D:
2109 *source = INTEL_PIPE_CRC_SOURCE_DP_D;
2110 break;
2111 default:
2112 WARN(1, "nonexisting DP port %c\n",
2113 port_name(dig_port->port));
2114 break;
2115 }
2116 break;
2117 }
2118 }
2119 mutex_unlock(&dev->mode_config.mutex);
2120
2121 return ret;
2122}
2123
2124static int vlv_pipe_crc_ctl_reg(struct drm_device *dev,
2125 enum pipe pipe,
2126 enum intel_pipe_crc_source *source,
2127 uint32_t *val)
2128{
2129 struct drm_i915_private *dev_priv = dev->dev_private;
2130 bool need_stable_symbols = false;
2131
2132 if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) {
2133 int ret = i9xx_pipe_crc_auto_source(dev, pipe, source);
2134 if (ret)
2135 return ret;
2136 }
2137
2138 switch (*source) {
2139 case INTEL_PIPE_CRC_SOURCE_PIPE:
2140 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_VLV;
2141 break;
2142 case INTEL_PIPE_CRC_SOURCE_DP_B:
2143 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_B_VLV;
2144 need_stable_symbols = true;
2145 break;
2146 case INTEL_PIPE_CRC_SOURCE_DP_C:
2147 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_VLV;
2148 need_stable_symbols = true;
2149 break;
2150 case INTEL_PIPE_CRC_SOURCE_NONE:
2151 *val = 0;
2152 break;
2153 default:
2154 return -EINVAL;
2155 }
2156
2157 /*
2158 * When the pipe CRC tap point is after the transcoders we need
2159 * to tweak symbol-level features to produce a deterministic series of
2160 * symbols for a given frame. We need to reset those features only once
2161 * a frame (instead of every nth symbol):
2162 * - DC-balance: used to ensure a better clock recovery from the data
2163 * link (SDVO)
2164 * - DisplayPort scrambling: used for EMI reduction
2165 */
2166 if (need_stable_symbols) {
2167 uint32_t tmp = I915_READ(PORT_DFT2_G4X);
2168
2169 WARN_ON(!IS_G4X(dev));
2170
2171 tmp |= DC_BALANCE_RESET_VLV;
2172 if (pipe == PIPE_A)
2173 tmp |= PIPE_A_SCRAMBLE_RESET;
2174 else
2175 tmp |= PIPE_B_SCRAMBLE_RESET;
2176
2177 I915_WRITE(PORT_DFT2_G4X, tmp);
2178 }
2179
2180 return 0;
2181}
2182
2183static int i9xx_pipe_crc_ctl_reg(struct drm_device *dev,
2184 enum pipe pipe,
2185 enum intel_pipe_crc_source *source,
2186 uint32_t *val)
2187{
2188 struct drm_i915_private *dev_priv = dev->dev_private;
2189 bool need_stable_symbols = false;
2190
2191 if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) {
2192 int ret = i9xx_pipe_crc_auto_source(dev, pipe, source);
2193 if (ret)
2194 return ret;
2195 }
2196
2197 switch (*source) {
2198 case INTEL_PIPE_CRC_SOURCE_PIPE:
2199 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_I9XX;
2200 break;
2201 case INTEL_PIPE_CRC_SOURCE_TV:
2202 if (!SUPPORTS_TV(dev))
2203 return -EINVAL;
2204 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_TV_PRE;
2205 break;
2206 case INTEL_PIPE_CRC_SOURCE_DP_B:
2207 if (!IS_G4X(dev))
2208 return -EINVAL;
2209 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_B_G4X;
2210 need_stable_symbols = true;
2211 break;
2212 case INTEL_PIPE_CRC_SOURCE_DP_C:
2213 if (!IS_G4X(dev))
2214 return -EINVAL;
2215 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_G4X;
2216 need_stable_symbols = true;
2217 break;
2218 case INTEL_PIPE_CRC_SOURCE_DP_D:
2219 if (!IS_G4X(dev))
2220 return -EINVAL;
2221 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_D_G4X;
2222 need_stable_symbols = true;
2223 break;
2224 case INTEL_PIPE_CRC_SOURCE_NONE:
2225 *val = 0;
2226 break;
2227 default:
2228 return -EINVAL;
2229 }
2230
2231 /*
2232 * When the pipe CRC tap point is after the transcoders we need
2233 * to tweak symbol-level features to produce a deterministic series of
2234 * symbols for a given frame. We need to reset those features only once
2235 * a frame (instead of every nth symbol):
2236 * - DC-balance: used to ensure a better clock recovery from the data
2237 * link (SDVO)
2238 * - DisplayPort scrambling: used for EMI reduction
2239 */
2240 if (need_stable_symbols) {
2241 uint32_t tmp = I915_READ(PORT_DFT2_G4X);
2242
2243 WARN_ON(!IS_G4X(dev));
2244
2245 I915_WRITE(PORT_DFT_I9XX,
2246 I915_READ(PORT_DFT_I9XX) | DC_BALANCE_RESET);
2247
2248 if (pipe == PIPE_A)
2249 tmp |= PIPE_A_SCRAMBLE_RESET;
2250 else
2251 tmp |= PIPE_B_SCRAMBLE_RESET;
2252
2253 I915_WRITE(PORT_DFT2_G4X, tmp);
2254 }
2255
2256 return 0;
2257}
2258
2259static void vlv_undo_pipe_scramble_reset(struct drm_device *dev,
2260 enum pipe pipe)
2261{
2262 struct drm_i915_private *dev_priv = dev->dev_private;
2263 uint32_t tmp = I915_READ(PORT_DFT2_G4X);
2264
2265 if (pipe == PIPE_A)
2266 tmp &= ~PIPE_A_SCRAMBLE_RESET;
2267 else
2268 tmp &= ~PIPE_B_SCRAMBLE_RESET;
2269 if (!(tmp & PIPE_SCRAMBLE_RESET_MASK))
2270 tmp &= ~DC_BALANCE_RESET_VLV;
2271 I915_WRITE(PORT_DFT2_G4X, tmp);
2272
2273}
2274
2275static void g4x_undo_pipe_scramble_reset(struct drm_device *dev,
2276 enum pipe pipe)
2277{
2278 struct drm_i915_private *dev_priv = dev->dev_private;
2279 uint32_t tmp = I915_READ(PORT_DFT2_G4X);
2280
2281 if (pipe == PIPE_A)
2282 tmp &= ~PIPE_A_SCRAMBLE_RESET;
2283 else
2284 tmp &= ~PIPE_B_SCRAMBLE_RESET;
2285 I915_WRITE(PORT_DFT2_G4X, tmp);
2286
2287 if (!(tmp & PIPE_SCRAMBLE_RESET_MASK)) {
2288 I915_WRITE(PORT_DFT_I9XX,
2289 I915_READ(PORT_DFT_I9XX) & ~DC_BALANCE_RESET);
2290 }
2291}
2292
2293static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
2294 uint32_t *val)
2295{
2296 if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
2297 *source = INTEL_PIPE_CRC_SOURCE_PIPE;
2298
2299 switch (*source) {
2300 case INTEL_PIPE_CRC_SOURCE_PLANE1:
2301 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PRIMARY_ILK;
2302 break;
2303 case INTEL_PIPE_CRC_SOURCE_PLANE2:
2304 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_ILK;
2305 break;
2306 case INTEL_PIPE_CRC_SOURCE_PIPE:
2307 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_ILK;
2308 break;
2309 case INTEL_PIPE_CRC_SOURCE_NONE:
2310 *val = 0;
2311 break;
2312 default:
2313 return -EINVAL;
2314 }
2315
2316 return 0;
2317}
2318
2319static int ivb_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
2320 uint32_t *val)
2321{
2322 if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
2323 *source = INTEL_PIPE_CRC_SOURCE_PF;
2324
2325 switch (*source) {
2326 case INTEL_PIPE_CRC_SOURCE_PLANE1:
2327 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PRIMARY_IVB;
2328 break;
2329 case INTEL_PIPE_CRC_SOURCE_PLANE2:
2330 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_IVB;
2331 break;
2332 case INTEL_PIPE_CRC_SOURCE_PF:
2333 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PF_IVB;
2334 break;
2335 case INTEL_PIPE_CRC_SOURCE_NONE:
2336 *val = 0;
2337 break;
2338 default:
2339 return -EINVAL;
2340 }
2341
2342 return 0;
2343}
2344
2345static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
2346 enum intel_pipe_crc_source source)
2347{
2348 struct drm_i915_private *dev_priv = dev->dev_private;
2349 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
2350 u32 val;
2351 int ret;
2352
2353 if (pipe_crc->source == source)
2354 return 0;
2355
2356 /* forbid changing the source without going back to 'none' */
2357 if (pipe_crc->source && source)
2358 return -EINVAL;
2359
2360 if (IS_GEN2(dev))
2361 ret = i8xx_pipe_crc_ctl_reg(&source, &val);
2362 else if (INTEL_INFO(dev)->gen < 5)
2363 ret = i9xx_pipe_crc_ctl_reg(dev, pipe, &source, &val);
2364 else if (IS_VALLEYVIEW(dev))
2365 ret = vlv_pipe_crc_ctl_reg(dev,pipe, &source, &val);
2366 else if (IS_GEN5(dev) || IS_GEN6(dev))
2367 ret = ilk_pipe_crc_ctl_reg(&source, &val);
2368 else
2369 ret = ivb_pipe_crc_ctl_reg(&source, &val);
2370
2371 if (ret != 0)
2372 return ret;
2373
2374 /* none -> real source transition */
2375 if (source) {
2376 DRM_DEBUG_DRIVER("collecting CRCs for pipe %c, %s\n",
2377 pipe_name(pipe), pipe_crc_source_name(source));
2378
2379 pipe_crc->entries = kzalloc(sizeof(*pipe_crc->entries) *
2380 INTEL_PIPE_CRC_ENTRIES_NR,
2381 GFP_KERNEL);
2382 if (!pipe_crc->entries)
2383 return -ENOMEM;
2384
2385 spin_lock_irq(&pipe_crc->lock);
2386 pipe_crc->head = 0;
2387 pipe_crc->tail = 0;
2388 spin_unlock_irq(&pipe_crc->lock);
2389 }
2390
2391 pipe_crc->source = source;
2392
2393 I915_WRITE(PIPE_CRC_CTL(pipe), val);
2394 POSTING_READ(PIPE_CRC_CTL(pipe));
2395
2396 /* real source -> none transition */
2397 if (source == INTEL_PIPE_CRC_SOURCE_NONE) {
2398 struct intel_pipe_crc_entry *entries;
2399
2400 DRM_DEBUG_DRIVER("stopping CRCs for pipe %c\n",
2401 pipe_name(pipe));
2402
2403 intel_wait_for_vblank(dev, pipe);
2404
2405 spin_lock_irq(&pipe_crc->lock);
2406 entries = pipe_crc->entries;
2407 pipe_crc->entries = NULL;
2408 spin_unlock_irq(&pipe_crc->lock);
2409
2410 kfree(entries);
2411
2412 if (IS_G4X(dev))
2413 g4x_undo_pipe_scramble_reset(dev, pipe);
2414 else if (IS_VALLEYVIEW(dev))
2415 vlv_undo_pipe_scramble_reset(dev, pipe);
2416 }
2417
2418 return 0;
2419}
2420
2421/*
2422 * Parse pipe CRC command strings:
2423 * command: wsp* object wsp+ name wsp+ source wsp*
2424 * object: 'pipe'
2425 * name: (A | B | C)
2426 * source: (none | plane1 | plane2 | pf)
2427 * wsp: (#0x20 | #0x9 | #0xA)+
2428 *
2429 * eg.:
2430 * "pipe A plane1" -> Start CRC computations on plane1 of pipe A
2431 * "pipe A none" -> Stop CRC
2432 */
2433static int display_crc_ctl_tokenize(char *buf, char *words[], int max_words)
2434{
2435 int n_words = 0;
2436
2437 while (*buf) {
2438 char *end;
2439
2440 /* skip leading white space */
2441 buf = skip_spaces(buf);
2442 if (!*buf)
2443 break; /* end of buffer */
2444
2445 /* find end of word */
2446 for (end = buf; *end && !isspace(*end); end++)
2447 ;
2448
2449 if (n_words == max_words) {
2450 DRM_DEBUG_DRIVER("too many words, allowed <= %d\n",
2451 max_words);
2452 return -EINVAL; /* ran out of words[] before bytes */
2453 }
2454
2455 if (*end)
2456 *end++ = '\0';
2457 words[n_words++] = buf;
2458 buf = end;
2459 }
2460
2461 return n_words;
2462}
2463
2464enum intel_pipe_crc_object {
2465 PIPE_CRC_OBJECT_PIPE,
2466};
2467
2468static const char * const pipe_crc_objects[] = {
2469 "pipe",
2470};
2471
2472static int
2473display_crc_ctl_parse_object(const char *buf, enum intel_pipe_crc_object *o)
2474{
2475 int i;
2476
2477 for (i = 0; i < ARRAY_SIZE(pipe_crc_objects); i++)
2478 if (!strcmp(buf, pipe_crc_objects[i])) {
2479 *o = i;
2480 return 0;
2481 }
2482
2483 return -EINVAL;
2484}
2485
2486static int display_crc_ctl_parse_pipe(const char *buf, enum pipe *pipe)
2487{
2488 const char name = buf[0];
2489
2490 if (name < 'A' || name >= pipe_name(I915_MAX_PIPES))
2491 return -EINVAL;
2492
2493 *pipe = name - 'A';
2494
2495 return 0;
2496}
2497
2498static int
2499display_crc_ctl_parse_source(const char *buf, enum intel_pipe_crc_source *s)
2500{
2501 int i;
2502
2503 for (i = 0; i < ARRAY_SIZE(pipe_crc_sources); i++)
2504 if (!strcmp(buf, pipe_crc_sources[i])) {
2505 *s = i;
2506 return 0;
2507 }
2508
2509 return -EINVAL;
2510}
2511
2512static int display_crc_ctl_parse(struct drm_device *dev, char *buf, size_t len)
2513{
2514#define N_WORDS 3
2515 int n_words;
2516 char *words[N_WORDS];
2517 enum pipe pipe;
2518 enum intel_pipe_crc_object object;
2519 enum intel_pipe_crc_source source;
2520
2521 n_words = display_crc_ctl_tokenize(buf, words, N_WORDS);
2522 if (n_words != N_WORDS) {
2523 DRM_DEBUG_DRIVER("tokenize failed, a command is %d words\n",
2524 N_WORDS);
2525 return -EINVAL;
2526 }
2527
2528 if (display_crc_ctl_parse_object(words[0], &object) < 0) {
2529 DRM_DEBUG_DRIVER("unknown object %s\n", words[0]);
2530 return -EINVAL;
2531 }
2532
2533 if (display_crc_ctl_parse_pipe(words[1], &pipe) < 0) {
2534 DRM_DEBUG_DRIVER("unknown pipe %s\n", words[1]);
2535 return -EINVAL;
2536 }
2537
2538 if (display_crc_ctl_parse_source(words[2], &source) < 0) {
2539 DRM_DEBUG_DRIVER("unknown source %s\n", words[2]);
2540 return -EINVAL;
2541 }
2542
2543 return pipe_crc_set_source(dev, pipe, source);
2544}
2545
2546static ssize_t display_crc_ctl_write(struct file *file, const char __user *ubuf,
2547 size_t len, loff_t *offp)
2548{
2549 struct seq_file *m = file->private_data;
2550 struct drm_device *dev = m->private;
2551 char *tmpbuf;
2552 int ret;
2553
2554 if (len == 0)
2555 return 0;
2556
2557 if (len > PAGE_SIZE - 1) {
2558 DRM_DEBUG_DRIVER("expected <%lu bytes into pipe crc control\n",
2559 PAGE_SIZE);
2560 return -E2BIG;
2561 }
2562
2563 tmpbuf = kmalloc(len + 1, GFP_KERNEL);
2564 if (!tmpbuf)
2565 return -ENOMEM;
2566
2567 if (copy_from_user(tmpbuf, ubuf, len)) {
2568 ret = -EFAULT;
2569 goto out;
2570 }
2571 tmpbuf[len] = '\0';
2572
2573 ret = display_crc_ctl_parse(dev, tmpbuf, len);
2574
2575out:
2576 kfree(tmpbuf);
2577 if (ret < 0)
2578 return ret;
2579
2580 *offp += len;
2581 return len;
2582}
2583
2584static const struct file_operations i915_display_crc_ctl_fops = {
2585 .owner = THIS_MODULE,
2586 .open = display_crc_ctl_open,
2587 .read = seq_read,
2588 .llseek = seq_lseek,
2589 .release = single_release,
2590 .write = display_crc_ctl_write
2591};
2592
1828static int 2593static int
1829i915_wedged_get(void *data, u64 *val) 2594i915_wedged_get(void *data, u64 *val)
1830{ 2595{
@@ -1885,6 +2650,72 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_ring_stop_fops,
1885 i915_ring_stop_get, i915_ring_stop_set, 2650 i915_ring_stop_get, i915_ring_stop_set,
1886 "0x%08llx\n"); 2651 "0x%08llx\n");
1887 2652
2653static int
2654i915_ring_missed_irq_get(void *data, u64 *val)
2655{
2656 struct drm_device *dev = data;
2657 struct drm_i915_private *dev_priv = dev->dev_private;
2658
2659 *val = dev_priv->gpu_error.missed_irq_rings;
2660 return 0;
2661}
2662
2663static int
2664i915_ring_missed_irq_set(void *data, u64 val)
2665{
2666 struct drm_device *dev = data;
2667 struct drm_i915_private *dev_priv = dev->dev_private;
2668 int ret;
2669
2670 /* Lock against concurrent debugfs callers */
2671 ret = mutex_lock_interruptible(&dev->struct_mutex);
2672 if (ret)
2673 return ret;
2674 dev_priv->gpu_error.missed_irq_rings = val;
2675 mutex_unlock(&dev->struct_mutex);
2676
2677 return 0;
2678}
2679
2680DEFINE_SIMPLE_ATTRIBUTE(i915_ring_missed_irq_fops,
2681 i915_ring_missed_irq_get, i915_ring_missed_irq_set,
2682 "0x%08llx\n");
2683
2684static int
2685i915_ring_test_irq_get(void *data, u64 *val)
2686{
2687 struct drm_device *dev = data;
2688 struct drm_i915_private *dev_priv = dev->dev_private;
2689
2690 *val = dev_priv->gpu_error.test_irq_rings;
2691
2692 return 0;
2693}
2694
2695static int
2696i915_ring_test_irq_set(void *data, u64 val)
2697{
2698 struct drm_device *dev = data;
2699 struct drm_i915_private *dev_priv = dev->dev_private;
2700 int ret;
2701
2702 DRM_DEBUG_DRIVER("Masking interrupts on rings 0x%08llx\n", val);
2703
2704 /* Lock against concurrent debugfs callers */
2705 ret = mutex_lock_interruptible(&dev->struct_mutex);
2706 if (ret)
2707 return ret;
2708
2709 dev_priv->gpu_error.test_irq_rings = val;
2710 mutex_unlock(&dev->struct_mutex);
2711
2712 return 0;
2713}
2714
2715DEFINE_SIMPLE_ATTRIBUTE(i915_ring_test_irq_fops,
2716 i915_ring_test_irq_get, i915_ring_test_irq_set,
2717 "0x%08llx\n");
2718
1888#define DROP_UNBOUND 0x1 2719#define DROP_UNBOUND 0x1
1889#define DROP_BOUND 0x2 2720#define DROP_BOUND 0x2
1890#define DROP_RETIRE 0x4 2721#define DROP_RETIRE 0x4
@@ -1972,6 +2803,8 @@ i915_max_freq_get(void *data, u64 *val)
1972 if (!(IS_GEN6(dev) || IS_GEN7(dev))) 2803 if (!(IS_GEN6(dev) || IS_GEN7(dev)))
1973 return -ENODEV; 2804 return -ENODEV;
1974 2805
2806 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
2807
1975 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); 2808 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
1976 if (ret) 2809 if (ret)
1977 return ret; 2810 return ret;
@@ -1996,6 +2829,8 @@ i915_max_freq_set(void *data, u64 val)
1996 if (!(IS_GEN6(dev) || IS_GEN7(dev))) 2829 if (!(IS_GEN6(dev) || IS_GEN7(dev)))
1997 return -ENODEV; 2830 return -ENODEV;
1998 2831
2832 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
2833
1999 DRM_DEBUG_DRIVER("Manually setting max freq to %llu\n", val); 2834 DRM_DEBUG_DRIVER("Manually setting max freq to %llu\n", val);
2000 2835
2001 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); 2836 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
@@ -2034,6 +2869,8 @@ i915_min_freq_get(void *data, u64 *val)
2034 if (!(IS_GEN6(dev) || IS_GEN7(dev))) 2869 if (!(IS_GEN6(dev) || IS_GEN7(dev)))
2035 return -ENODEV; 2870 return -ENODEV;
2036 2871
2872 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
2873
2037 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); 2874 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
2038 if (ret) 2875 if (ret)
2039 return ret; 2876 return ret;
@@ -2058,6 +2895,8 @@ i915_min_freq_set(void *data, u64 val)
2058 if (!(IS_GEN6(dev) || IS_GEN7(dev))) 2895 if (!(IS_GEN6(dev) || IS_GEN7(dev)))
2059 return -ENODEV; 2896 return -ENODEV;
2060 2897
2898 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
2899
2061 DRM_DEBUG_DRIVER("Manually setting min freq to %llu\n", val); 2900 DRM_DEBUG_DRIVER("Manually setting min freq to %llu\n", val);
2062 2901
2063 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); 2902 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
@@ -2136,32 +2975,6 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
2136 i915_cache_sharing_get, i915_cache_sharing_set, 2975 i915_cache_sharing_get, i915_cache_sharing_set,
2137 "%llu\n"); 2976 "%llu\n");
2138 2977
2139/* As the drm_debugfs_init() routines are called before dev->dev_private is
2140 * allocated we need to hook into the minor for release. */
2141static int
2142drm_add_fake_info_node(struct drm_minor *minor,
2143 struct dentry *ent,
2144 const void *key)
2145{
2146 struct drm_info_node *node;
2147
2148 node = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL);
2149 if (node == NULL) {
2150 debugfs_remove(ent);
2151 return -ENOMEM;
2152 }
2153
2154 node->minor = minor;
2155 node->dent = ent;
2156 node->info_ent = (void *) key;
2157
2158 mutex_lock(&minor->debugfs_lock);
2159 list_add(&node->list, &minor->debugfs_list);
2160 mutex_unlock(&minor->debugfs_lock);
2161
2162 return 0;
2163}
2164
2165static int i915_forcewake_open(struct inode *inode, struct file *file) 2978static int i915_forcewake_open(struct inode *inode, struct file *file)
2166{ 2979{
2167 struct drm_device *dev = inode->i_private; 2980 struct drm_device *dev = inode->i_private;
@@ -2227,7 +3040,7 @@ static int i915_debugfs_create(struct dentry *root,
2227 return drm_add_fake_info_node(minor, ent, fops); 3040 return drm_add_fake_info_node(minor, ent, fops);
2228} 3041}
2229 3042
2230static struct drm_info_list i915_debugfs_list[] = { 3043static const struct drm_info_list i915_debugfs_list[] = {
2231 {"i915_capabilities", i915_capabilities, 0}, 3044 {"i915_capabilities", i915_capabilities, 0},
2232 {"i915_gem_objects", i915_gem_object_info, 0}, 3045 {"i915_gem_objects", i915_gem_object_info, 0},
2233 {"i915_gem_gtt", i915_gem_gtt_info, 0}, 3046 {"i915_gem_gtt", i915_gem_gtt_info, 0},
@@ -2269,7 +3082,7 @@ static struct drm_info_list i915_debugfs_list[] = {
2269}; 3082};
2270#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list) 3083#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
2271 3084
2272static struct i915_debugfs_files { 3085static const struct i915_debugfs_files {
2273 const char *name; 3086 const char *name;
2274 const struct file_operations *fops; 3087 const struct file_operations *fops;
2275} i915_debugfs_files[] = { 3088} i915_debugfs_files[] = {
@@ -2278,11 +3091,28 @@ static struct i915_debugfs_files {
2278 {"i915_min_freq", &i915_min_freq_fops}, 3091 {"i915_min_freq", &i915_min_freq_fops},
2279 {"i915_cache_sharing", &i915_cache_sharing_fops}, 3092 {"i915_cache_sharing", &i915_cache_sharing_fops},
2280 {"i915_ring_stop", &i915_ring_stop_fops}, 3093 {"i915_ring_stop", &i915_ring_stop_fops},
3094 {"i915_ring_missed_irq", &i915_ring_missed_irq_fops},
3095 {"i915_ring_test_irq", &i915_ring_test_irq_fops},
2281 {"i915_gem_drop_caches", &i915_drop_caches_fops}, 3096 {"i915_gem_drop_caches", &i915_drop_caches_fops},
2282 {"i915_error_state", &i915_error_state_fops}, 3097 {"i915_error_state", &i915_error_state_fops},
2283 {"i915_next_seqno", &i915_next_seqno_fops}, 3098 {"i915_next_seqno", &i915_next_seqno_fops},
3099 {"i915_display_crc_ctl", &i915_display_crc_ctl_fops},
2284}; 3100};
2285 3101
3102void intel_display_crc_init(struct drm_device *dev)
3103{
3104 struct drm_i915_private *dev_priv = dev->dev_private;
3105 int i;
3106
3107 for (i = 0; i < INTEL_INFO(dev)->num_pipes; i++) {
3108 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[i];
3109
3110 pipe_crc->opened = false;
3111 spin_lock_init(&pipe_crc->lock);
3112 init_waitqueue_head(&pipe_crc->wq);
3113 }
3114}
3115
2286int i915_debugfs_init(struct drm_minor *minor) 3116int i915_debugfs_init(struct drm_minor *minor)
2287{ 3117{
2288 int ret, i; 3118 int ret, i;
@@ -2291,6 +3121,12 @@ int i915_debugfs_init(struct drm_minor *minor)
2291 if (ret) 3121 if (ret)
2292 return ret; 3122 return ret;
2293 3123
3124 for (i = 0; i < ARRAY_SIZE(i915_pipe_crc_data); i++) {
3125 ret = i915_pipe_crc_create(minor->debugfs_root, minor, i);
3126 if (ret)
3127 return ret;
3128 }
3129
2294 for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) { 3130 for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
2295 ret = i915_debugfs_create(minor->debugfs_root, minor, 3131 ret = i915_debugfs_create(minor->debugfs_root, minor,
2296 i915_debugfs_files[i].name, 3132 i915_debugfs_files[i].name,
@@ -2310,8 +3146,17 @@ void i915_debugfs_cleanup(struct drm_minor *minor)
2310 3146
2311 drm_debugfs_remove_files(i915_debugfs_list, 3147 drm_debugfs_remove_files(i915_debugfs_list,
2312 I915_DEBUGFS_ENTRIES, minor); 3148 I915_DEBUGFS_ENTRIES, minor);
3149
2313 drm_debugfs_remove_files((struct drm_info_list *) &i915_forcewake_fops, 3150 drm_debugfs_remove_files((struct drm_info_list *) &i915_forcewake_fops,
2314 1, minor); 3151 1, minor);
3152
3153 for (i = 0; i < ARRAY_SIZE(i915_pipe_crc_data); i++) {
3154 struct drm_info_list *info_list =
3155 (struct drm_info_list *)&i915_pipe_crc_data[i];
3156
3157 drm_debugfs_remove_files(info_list, 1, minor);
3158 }
3159
2315 for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) { 3160 for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
2316 struct drm_info_list *info_list = 3161 struct drm_info_list *info_list =
2317 (struct drm_info_list *) i915_debugfs_files[i].fops; 3162 (struct drm_info_list *) i915_debugfs_files[i].fops;
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index d5c784d48671..0cab2d045135 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -52,7 +52,7 @@
52 intel_ring_emit(LP_RING(dev_priv), x) 52 intel_ring_emit(LP_RING(dev_priv), x)
53 53
54#define ADVANCE_LP_RING() \ 54#define ADVANCE_LP_RING() \
55 intel_ring_advance(LP_RING(dev_priv)) 55 __intel_ring_advance(LP_RING(dev_priv))
56 56
57/** 57/**
58 * Lock test for when it's just for synchronization of ring access. 58 * Lock test for when it's just for synchronization of ring access.
@@ -641,7 +641,7 @@ static int i915_batchbuffer(struct drm_device *dev, void *data,
641 641
642 if (batch->num_cliprects) { 642 if (batch->num_cliprects) {
643 cliprects = kcalloc(batch->num_cliprects, 643 cliprects = kcalloc(batch->num_cliprects,
644 sizeof(struct drm_clip_rect), 644 sizeof(*cliprects),
645 GFP_KERNEL); 645 GFP_KERNEL);
646 if (cliprects == NULL) 646 if (cliprects == NULL)
647 return -ENOMEM; 647 return -ENOMEM;
@@ -703,7 +703,7 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
703 703
704 if (cmdbuf->num_cliprects) { 704 if (cmdbuf->num_cliprects) {
705 cliprects = kcalloc(cmdbuf->num_cliprects, 705 cliprects = kcalloc(cmdbuf->num_cliprects,
706 sizeof(struct drm_clip_rect), GFP_KERNEL); 706 sizeof(*cliprects), GFP_KERNEL);
707 if (cliprects == NULL) { 707 if (cliprects == NULL) {
708 ret = -ENOMEM; 708 ret = -ENOMEM;
709 goto fail_batch_free; 709 goto fail_batch_free;
@@ -931,7 +931,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
931 value = READ_BREADCRUMB(dev_priv); 931 value = READ_BREADCRUMB(dev_priv);
932 break; 932 break;
933 case I915_PARAM_CHIPSET_ID: 933 case I915_PARAM_CHIPSET_ID:
934 value = dev->pci_device; 934 value = dev->pdev->device;
935 break; 935 break;
936 case I915_PARAM_HAS_GEM: 936 case I915_PARAM_HAS_GEM:
937 value = 1; 937 value = 1;
@@ -1311,13 +1311,15 @@ static int i915_load_modeset_init(struct drm_device *dev)
1311 if (ret) 1311 if (ret)
1312 goto cleanup_gem_stolen; 1312 goto cleanup_gem_stolen;
1313 1313
1314 intel_power_domains_init_hw(dev);
1315
1314 /* Important: The output setup functions called by modeset_init need 1316 /* Important: The output setup functions called by modeset_init need
1315 * working irqs for e.g. gmbus and dp aux transfers. */ 1317 * working irqs for e.g. gmbus and dp aux transfers. */
1316 intel_modeset_init(dev); 1318 intel_modeset_init(dev);
1317 1319
1318 ret = i915_gem_init(dev); 1320 ret = i915_gem_init(dev);
1319 if (ret) 1321 if (ret)
1320 goto cleanup_irq; 1322 goto cleanup_power;
1321 1323
1322 INIT_WORK(&dev_priv->console_resume_work, intel_console_resume); 1324 INIT_WORK(&dev_priv->console_resume_work, intel_console_resume);
1323 1325
@@ -1325,9 +1327,11 @@ static int i915_load_modeset_init(struct drm_device *dev)
1325 1327
1326 /* Always safe in the mode setting case. */ 1328 /* Always safe in the mode setting case. */
1327 /* FIXME: do pre/post-mode set stuff in core KMS code */ 1329 /* FIXME: do pre/post-mode set stuff in core KMS code */
1328 dev->vblank_disable_allowed = 1; 1330 dev->vblank_disable_allowed = true;
1329 if (INTEL_INFO(dev)->num_pipes == 0) 1331 if (INTEL_INFO(dev)->num_pipes == 0) {
1332 intel_display_power_put(dev, POWER_DOMAIN_VGA);
1330 return 0; 1333 return 0;
1334 }
1331 1335
1332 ret = intel_fbdev_init(dev); 1336 ret = intel_fbdev_init(dev);
1333 if (ret) 1337 if (ret)
@@ -1362,7 +1366,8 @@ cleanup_gem:
1362 mutex_unlock(&dev->struct_mutex); 1366 mutex_unlock(&dev->struct_mutex);
1363 i915_gem_cleanup_aliasing_ppgtt(dev); 1367 i915_gem_cleanup_aliasing_ppgtt(dev);
1364 drm_mm_takedown(&dev_priv->gtt.base.mm); 1368 drm_mm_takedown(&dev_priv->gtt.base.mm);
1365cleanup_irq: 1369cleanup_power:
1370 intel_display_power_put(dev, POWER_DOMAIN_VGA);
1366 drm_irq_uninstall(dev); 1371 drm_irq_uninstall(dev);
1367cleanup_gem_stolen: 1372cleanup_gem_stolen:
1368 i915_gem_cleanup_stolen(dev); 1373 i915_gem_cleanup_stolen(dev);
@@ -1398,6 +1403,7 @@ void i915_master_destroy(struct drm_device *dev, struct drm_master *master)
1398 master->driver_priv = NULL; 1403 master->driver_priv = NULL;
1399} 1404}
1400 1405
1406#ifdef CONFIG_DRM_I915_FBDEV
1401static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv) 1407static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
1402{ 1408{
1403 struct apertures_struct *ap; 1409 struct apertures_struct *ap;
@@ -1418,6 +1424,11 @@ static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
1418 1424
1419 kfree(ap); 1425 kfree(ap);
1420} 1426}
1427#else
1428static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
1429{
1430}
1431#endif
1421 1432
1422static void i915_dump_device_info(struct drm_i915_private *dev_priv) 1433static void i915_dump_device_info(struct drm_i915_private *dev_priv)
1423{ 1434{
@@ -1459,17 +1470,13 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1459 info = (struct intel_device_info *) flags; 1470 info = (struct intel_device_info *) flags;
1460 1471
1461 /* Refuse to load on gen6+ without kms enabled. */ 1472 /* Refuse to load on gen6+ without kms enabled. */
1462 if (info->gen >= 6 && !drm_core_check_feature(dev, DRIVER_MODESET)) 1473 if (info->gen >= 6 && !drm_core_check_feature(dev, DRIVER_MODESET)) {
1474 DRM_INFO("Your hardware requires kernel modesetting (KMS)\n");
1475 DRM_INFO("See CONFIG_DRM_I915_KMS, nomodeset, and i915.modeset parameters\n");
1463 return -ENODEV; 1476 return -ENODEV;
1477 }
1464 1478
1465 /* i915 has 4 more counters */ 1479 dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
1466 dev->counters += 4;
1467 dev->types[6] = _DRM_STAT_IRQ;
1468 dev->types[7] = _DRM_STAT_PRIMARY;
1469 dev->types[8] = _DRM_STAT_SECONDARY;
1470 dev->types[9] = _DRM_STAT_DMA;
1471
1472 dev_priv = kzalloc(sizeof(drm_i915_private_t), GFP_KERNEL);
1473 if (dev_priv == NULL) 1480 if (dev_priv == NULL)
1474 return -ENOMEM; 1481 return -ENOMEM;
1475 1482
@@ -1494,6 +1501,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1494 dev_priv->pc8.disable_count = 2; /* requirements_met + gpu_idle */ 1501 dev_priv->pc8.disable_count = 2; /* requirements_met + gpu_idle */
1495 INIT_DELAYED_WORK(&dev_priv->pc8.enable_work, hsw_enable_pc8_work); 1502 INIT_DELAYED_WORK(&dev_priv->pc8.enable_work, hsw_enable_pc8_work);
1496 1503
1504 intel_display_crc_init(dev);
1505
1497 i915_dump_device_info(dev_priv); 1506 i915_dump_device_info(dev_priv);
1498 1507
1499 /* Not all pre-production machines fall into this category, only the 1508 /* Not all pre-production machines fall into this category, only the
@@ -1531,19 +1540,14 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1531 1540
1532 intel_uncore_early_sanitize(dev); 1541 intel_uncore_early_sanitize(dev);
1533 1542
1534 if (IS_HASWELL(dev) && (I915_READ(HSW_EDRAM_PRESENT) == 1)) { 1543 /* This must be called before any calls to HAS_PCH_* */
1535 /* The docs do not explain exactly how the calculation can be 1544 intel_detect_pch(dev);
1536 * made. It is somewhat guessable, but for now, it's always 1545
1537 * 128MB. 1546 intel_uncore_init(dev);
1538 * NB: We can't write IDICR yet because we do not have gt funcs
1539 * set up */
1540 dev_priv->ellc_size = 128;
1541 DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size);
1542 }
1543 1547
1544 ret = i915_gem_gtt_init(dev); 1548 ret = i915_gem_gtt_init(dev);
1545 if (ret) 1549 if (ret)
1546 goto put_bridge; 1550 goto out_regs;
1547 1551
1548 if (drm_core_check_feature(dev, DRIVER_MODESET)) 1552 if (drm_core_check_feature(dev, DRIVER_MODESET))
1549 i915_kick_out_firmware_fb(dev_priv); 1553 i915_kick_out_firmware_fb(dev_priv);
@@ -1572,7 +1576,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1572 aperture_size); 1576 aperture_size);
1573 if (dev_priv->gtt.mappable == NULL) { 1577 if (dev_priv->gtt.mappable == NULL) {
1574 ret = -EIO; 1578 ret = -EIO;
1575 goto out_rmmap; 1579 goto out_gtt;
1576 } 1580 }
1577 1581
1578 dev_priv->gtt.mtrr = arch_phys_wc_add(dev_priv->gtt.mappable_base, 1582 dev_priv->gtt.mtrr = arch_phys_wc_add(dev_priv->gtt.mappable_base,
@@ -1598,13 +1602,9 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1598 goto out_mtrrfree; 1602 goto out_mtrrfree;
1599 } 1603 }
1600 1604
1601 /* This must be called before any calls to HAS_PCH_* */
1602 intel_detect_pch(dev);
1603
1604 intel_irq_init(dev); 1605 intel_irq_init(dev);
1605 intel_pm_init(dev); 1606 intel_pm_init(dev);
1606 intel_uncore_sanitize(dev); 1607 intel_uncore_sanitize(dev);
1607 intel_uncore_init(dev);
1608 1608
1609 /* Try to make sure MCHBAR is enabled before poking at it */ 1609 /* Try to make sure MCHBAR is enabled before poking at it */
1610 intel_setup_mchbar(dev); 1610 intel_setup_mchbar(dev);
@@ -1640,13 +1640,13 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1640 } 1640 }
1641 1641
1642 if (HAS_POWER_WELL(dev)) 1642 if (HAS_POWER_WELL(dev))
1643 i915_init_power_well(dev); 1643 intel_power_domains_init(dev);
1644 1644
1645 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 1645 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1646 ret = i915_load_modeset_init(dev); 1646 ret = i915_load_modeset_init(dev);
1647 if (ret < 0) { 1647 if (ret < 0) {
1648 DRM_ERROR("failed to init modeset\n"); 1648 DRM_ERROR("failed to init modeset\n");
1649 goto out_gem_unload; 1649 goto out_power_well;
1650 } 1650 }
1651 } else { 1651 } else {
1652 /* Start out suspended in ums mode. */ 1652 /* Start out suspended in ums mode. */
@@ -1666,6 +1666,10 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1666 1666
1667 return 0; 1667 return 0;
1668 1668
1669out_power_well:
1670 if (HAS_POWER_WELL(dev))
1671 intel_power_domains_remove(dev);
1672 drm_vblank_cleanup(dev);
1669out_gem_unload: 1673out_gem_unload:
1670 if (dev_priv->mm.inactive_shrinker.scan_objects) 1674 if (dev_priv->mm.inactive_shrinker.scan_objects)
1671 unregister_shrinker(&dev_priv->mm.inactive_shrinker); 1675 unregister_shrinker(&dev_priv->mm.inactive_shrinker);
@@ -1679,12 +1683,18 @@ out_gem_unload:
1679out_mtrrfree: 1683out_mtrrfree:
1680 arch_phys_wc_del(dev_priv->gtt.mtrr); 1684 arch_phys_wc_del(dev_priv->gtt.mtrr);
1681 io_mapping_free(dev_priv->gtt.mappable); 1685 io_mapping_free(dev_priv->gtt.mappable);
1686out_gtt:
1687 list_del(&dev_priv->gtt.base.global_link);
1688 drm_mm_takedown(&dev_priv->gtt.base.mm);
1682 dev_priv->gtt.base.cleanup(&dev_priv->gtt.base); 1689 dev_priv->gtt.base.cleanup(&dev_priv->gtt.base);
1683out_rmmap: 1690out_regs:
1691 intel_uncore_fini(dev);
1684 pci_iounmap(dev->pdev, dev_priv->regs); 1692 pci_iounmap(dev->pdev, dev_priv->regs);
1685put_bridge: 1693put_bridge:
1686 pci_dev_put(dev_priv->bridge_dev); 1694 pci_dev_put(dev_priv->bridge_dev);
1687free_priv: 1695free_priv:
1696 if (dev_priv->slab)
1697 kmem_cache_destroy(dev_priv->slab);
1688 kfree(dev_priv); 1698 kfree(dev_priv);
1689 return ret; 1699 return ret;
1690} 1700}
@@ -1700,8 +1710,8 @@ int i915_driver_unload(struct drm_device *dev)
1700 /* The i915.ko module is still not prepared to be loaded when 1710 /* The i915.ko module is still not prepared to be loaded when
1701 * the power well is not enabled, so just enable it in case 1711 * the power well is not enabled, so just enable it in case
1702 * we're going to unload/reload. */ 1712 * we're going to unload/reload. */
1703 intel_set_power_well(dev, true); 1713 intel_display_set_init_power(dev, true);
1704 i915_remove_power_well(dev); 1714 intel_power_domains_remove(dev);
1705 } 1715 }
1706 1716
1707 i915_teardown_sysfs(dev); 1717 i915_teardown_sysfs(dev);
@@ -1709,15 +1719,9 @@ int i915_driver_unload(struct drm_device *dev)
1709 if (dev_priv->mm.inactive_shrinker.scan_objects) 1719 if (dev_priv->mm.inactive_shrinker.scan_objects)
1710 unregister_shrinker(&dev_priv->mm.inactive_shrinker); 1720 unregister_shrinker(&dev_priv->mm.inactive_shrinker);
1711 1721
1712 mutex_lock(&dev->struct_mutex); 1722 ret = i915_gem_suspend(dev);
1713 ret = i915_gpu_idle(dev);
1714 if (ret) 1723 if (ret)
1715 DRM_ERROR("failed to idle hardware: %d\n", ret); 1724 DRM_ERROR("failed to idle hardware: %d\n", ret);
1716 i915_gem_retire_requests(dev);
1717 mutex_unlock(&dev->struct_mutex);
1718
1719 /* Cancel the retire work handler, which should be idle now. */
1720 cancel_delayed_work_sync(&dev_priv->mm.retire_work);
1721 1725
1722 io_mapping_free(dev_priv->gtt.mappable); 1726 io_mapping_free(dev_priv->gtt.mappable);
1723 arch_phys_wc_del(dev_priv->gtt.mtrr); 1727 arch_phys_wc_del(dev_priv->gtt.mtrr);
@@ -1774,8 +1778,8 @@ int i915_driver_unload(struct drm_device *dev)
1774 list_del(&dev_priv->gtt.base.global_link); 1778 list_del(&dev_priv->gtt.base.global_link);
1775 WARN_ON(!list_empty(&dev_priv->vm_list)); 1779 WARN_ON(!list_empty(&dev_priv->vm_list));
1776 drm_mm_takedown(&dev_priv->gtt.base.mm); 1780 drm_mm_takedown(&dev_priv->gtt.base.mm);
1777 if (dev_priv->regs != NULL) 1781
1778 pci_iounmap(dev->pdev, dev_priv->regs); 1782 drm_vblank_cleanup(dev);
1779 1783
1780 intel_teardown_gmbus(dev); 1784 intel_teardown_gmbus(dev);
1781 intel_teardown_mchbar(dev); 1785 intel_teardown_mchbar(dev);
@@ -1785,6 +1789,10 @@ int i915_driver_unload(struct drm_device *dev)
1785 1789
1786 dev_priv->gtt.base.cleanup(&dev_priv->gtt.base); 1790 dev_priv->gtt.base.cleanup(&dev_priv->gtt.base);
1787 1791
1792 intel_uncore_fini(dev);
1793 if (dev_priv->regs != NULL)
1794 pci_iounmap(dev->pdev, dev_priv->regs);
1795
1788 if (dev_priv->slab) 1796 if (dev_priv->slab)
1789 kmem_cache_destroy(dev_priv->slab); 1797 kmem_cache_destroy(dev_priv->slab);
1790 1798
@@ -1796,19 +1804,11 @@ int i915_driver_unload(struct drm_device *dev)
1796 1804
1797int i915_driver_open(struct drm_device *dev, struct drm_file *file) 1805int i915_driver_open(struct drm_device *dev, struct drm_file *file)
1798{ 1806{
1799 struct drm_i915_file_private *file_priv; 1807 int ret;
1800
1801 DRM_DEBUG_DRIVER("\n");
1802 file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
1803 if (!file_priv)
1804 return -ENOMEM;
1805
1806 file->driver_priv = file_priv;
1807
1808 spin_lock_init(&file_priv->mm.lock);
1809 INIT_LIST_HEAD(&file_priv->mm.request_list);
1810 1808
1811 idr_init(&file_priv->context_idr); 1809 ret = i915_gem_open(dev, file);
1810 if (ret)
1811 return ret;
1812 1812
1813 return 0; 1813 return 0;
1814} 1814}
@@ -1836,7 +1836,7 @@ void i915_driver_lastclose(struct drm_device * dev)
1836 return; 1836 return;
1837 1837
1838 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 1838 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1839 intel_fb_restore_mode(dev); 1839 intel_fbdev_restore_mode(dev);
1840 vga_switcheroo_process_delayed_switch(); 1840 vga_switcheroo_process_delayed_switch();
1841 return; 1841 return;
1842 } 1842 }
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 2ad27880cd04..989be12cdd6e 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -160,49 +160,58 @@ extern int intel_agp_enabled;
160static const struct intel_device_info intel_i830_info = { 160static const struct intel_device_info intel_i830_info = {
161 .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2, 161 .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2,
162 .has_overlay = 1, .overlay_needs_physical = 1, 162 .has_overlay = 1, .overlay_needs_physical = 1,
163 .ring_mask = RENDER_RING,
163}; 164};
164 165
165static const struct intel_device_info intel_845g_info = { 166static const struct intel_device_info intel_845g_info = {
166 .gen = 2, .num_pipes = 1, 167 .gen = 2, .num_pipes = 1,
167 .has_overlay = 1, .overlay_needs_physical = 1, 168 .has_overlay = 1, .overlay_needs_physical = 1,
169 .ring_mask = RENDER_RING,
168}; 170};
169 171
170static const struct intel_device_info intel_i85x_info = { 172static const struct intel_device_info intel_i85x_info = {
171 .gen = 2, .is_i85x = 1, .is_mobile = 1, .num_pipes = 2, 173 .gen = 2, .is_i85x = 1, .is_mobile = 1, .num_pipes = 2,
172 .cursor_needs_physical = 1, 174 .cursor_needs_physical = 1,
173 .has_overlay = 1, .overlay_needs_physical = 1, 175 .has_overlay = 1, .overlay_needs_physical = 1,
176 .ring_mask = RENDER_RING,
174}; 177};
175 178
176static const struct intel_device_info intel_i865g_info = { 179static const struct intel_device_info intel_i865g_info = {
177 .gen = 2, .num_pipes = 1, 180 .gen = 2, .num_pipes = 1,
178 .has_overlay = 1, .overlay_needs_physical = 1, 181 .has_overlay = 1, .overlay_needs_physical = 1,
182 .ring_mask = RENDER_RING,
179}; 183};
180 184
181static const struct intel_device_info intel_i915g_info = { 185static const struct intel_device_info intel_i915g_info = {
182 .gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .num_pipes = 2, 186 .gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .num_pipes = 2,
183 .has_overlay = 1, .overlay_needs_physical = 1, 187 .has_overlay = 1, .overlay_needs_physical = 1,
188 .ring_mask = RENDER_RING,
184}; 189};
185static const struct intel_device_info intel_i915gm_info = { 190static const struct intel_device_info intel_i915gm_info = {
186 .gen = 3, .is_mobile = 1, .num_pipes = 2, 191 .gen = 3, .is_mobile = 1, .num_pipes = 2,
187 .cursor_needs_physical = 1, 192 .cursor_needs_physical = 1,
188 .has_overlay = 1, .overlay_needs_physical = 1, 193 .has_overlay = 1, .overlay_needs_physical = 1,
189 .supports_tv = 1, 194 .supports_tv = 1,
195 .ring_mask = RENDER_RING,
190}; 196};
191static const struct intel_device_info intel_i945g_info = { 197static const struct intel_device_info intel_i945g_info = {
192 .gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .num_pipes = 2, 198 .gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .num_pipes = 2,
193 .has_overlay = 1, .overlay_needs_physical = 1, 199 .has_overlay = 1, .overlay_needs_physical = 1,
200 .ring_mask = RENDER_RING,
194}; 201};
195static const struct intel_device_info intel_i945gm_info = { 202static const struct intel_device_info intel_i945gm_info = {
196 .gen = 3, .is_i945gm = 1, .is_mobile = 1, .num_pipes = 2, 203 .gen = 3, .is_i945gm = 1, .is_mobile = 1, .num_pipes = 2,
197 .has_hotplug = 1, .cursor_needs_physical = 1, 204 .has_hotplug = 1, .cursor_needs_physical = 1,
198 .has_overlay = 1, .overlay_needs_physical = 1, 205 .has_overlay = 1, .overlay_needs_physical = 1,
199 .supports_tv = 1, 206 .supports_tv = 1,
207 .ring_mask = RENDER_RING,
200}; 208};
201 209
202static const struct intel_device_info intel_i965g_info = { 210static const struct intel_device_info intel_i965g_info = {
203 .gen = 4, .is_broadwater = 1, .num_pipes = 2, 211 .gen = 4, .is_broadwater = 1, .num_pipes = 2,
204 .has_hotplug = 1, 212 .has_hotplug = 1,
205 .has_overlay = 1, 213 .has_overlay = 1,
214 .ring_mask = RENDER_RING,
206}; 215};
207 216
208static const struct intel_device_info intel_i965gm_info = { 217static const struct intel_device_info intel_i965gm_info = {
@@ -210,18 +219,20 @@ static const struct intel_device_info intel_i965gm_info = {
210 .is_mobile = 1, .has_fbc = 1, .has_hotplug = 1, 219 .is_mobile = 1, .has_fbc = 1, .has_hotplug = 1,
211 .has_overlay = 1, 220 .has_overlay = 1,
212 .supports_tv = 1, 221 .supports_tv = 1,
222 .ring_mask = RENDER_RING,
213}; 223};
214 224
215static const struct intel_device_info intel_g33_info = { 225static const struct intel_device_info intel_g33_info = {
216 .gen = 3, .is_g33 = 1, .num_pipes = 2, 226 .gen = 3, .is_g33 = 1, .num_pipes = 2,
217 .need_gfx_hws = 1, .has_hotplug = 1, 227 .need_gfx_hws = 1, .has_hotplug = 1,
218 .has_overlay = 1, 228 .has_overlay = 1,
229 .ring_mask = RENDER_RING,
219}; 230};
220 231
221static const struct intel_device_info intel_g45_info = { 232static const struct intel_device_info intel_g45_info = {
222 .gen = 4, .is_g4x = 1, .need_gfx_hws = 1, .num_pipes = 2, 233 .gen = 4, .is_g4x = 1, .need_gfx_hws = 1, .num_pipes = 2,
223 .has_pipe_cxsr = 1, .has_hotplug = 1, 234 .has_pipe_cxsr = 1, .has_hotplug = 1,
224 .has_bsd_ring = 1, 235 .ring_mask = RENDER_RING | BSD_RING,
225}; 236};
226 237
227static const struct intel_device_info intel_gm45_info = { 238static const struct intel_device_info intel_gm45_info = {
@@ -229,7 +240,7 @@ static const struct intel_device_info intel_gm45_info = {
229 .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, 240 .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1,
230 .has_pipe_cxsr = 1, .has_hotplug = 1, 241 .has_pipe_cxsr = 1, .has_hotplug = 1,
231 .supports_tv = 1, 242 .supports_tv = 1,
232 .has_bsd_ring = 1, 243 .ring_mask = RENDER_RING | BSD_RING,
233}; 244};
234 245
235static const struct intel_device_info intel_pineview_info = { 246static const struct intel_device_info intel_pineview_info = {
@@ -241,42 +252,36 @@ static const struct intel_device_info intel_pineview_info = {
241static const struct intel_device_info intel_ironlake_d_info = { 252static const struct intel_device_info intel_ironlake_d_info = {
242 .gen = 5, .num_pipes = 2, 253 .gen = 5, .num_pipes = 2,
243 .need_gfx_hws = 1, .has_hotplug = 1, 254 .need_gfx_hws = 1, .has_hotplug = 1,
244 .has_bsd_ring = 1, 255 .ring_mask = RENDER_RING | BSD_RING,
245}; 256};
246 257
247static const struct intel_device_info intel_ironlake_m_info = { 258static const struct intel_device_info intel_ironlake_m_info = {
248 .gen = 5, .is_mobile = 1, .num_pipes = 2, 259 .gen = 5, .is_mobile = 1, .num_pipes = 2,
249 .need_gfx_hws = 1, .has_hotplug = 1, 260 .need_gfx_hws = 1, .has_hotplug = 1,
250 .has_fbc = 1, 261 .has_fbc = 1,
251 .has_bsd_ring = 1, 262 .ring_mask = RENDER_RING | BSD_RING,
252}; 263};
253 264
254static const struct intel_device_info intel_sandybridge_d_info = { 265static const struct intel_device_info intel_sandybridge_d_info = {
255 .gen = 6, .num_pipes = 2, 266 .gen = 6, .num_pipes = 2,
256 .need_gfx_hws = 1, .has_hotplug = 1, 267 .need_gfx_hws = 1, .has_hotplug = 1,
257 .has_bsd_ring = 1, 268 .ring_mask = RENDER_RING | BSD_RING | BLT_RING,
258 .has_blt_ring = 1,
259 .has_llc = 1, 269 .has_llc = 1,
260 .has_force_wake = 1,
261}; 270};
262 271
263static const struct intel_device_info intel_sandybridge_m_info = { 272static const struct intel_device_info intel_sandybridge_m_info = {
264 .gen = 6, .is_mobile = 1, .num_pipes = 2, 273 .gen = 6, .is_mobile = 1, .num_pipes = 2,
265 .need_gfx_hws = 1, .has_hotplug = 1, 274 .need_gfx_hws = 1, .has_hotplug = 1,
266 .has_fbc = 1, 275 .has_fbc = 1,
267 .has_bsd_ring = 1, 276 .ring_mask = RENDER_RING | BSD_RING | BLT_RING,
268 .has_blt_ring = 1,
269 .has_llc = 1, 277 .has_llc = 1,
270 .has_force_wake = 1,
271}; 278};
272 279
273#define GEN7_FEATURES \ 280#define GEN7_FEATURES \
274 .gen = 7, .num_pipes = 3, \ 281 .gen = 7, .num_pipes = 3, \
275 .need_gfx_hws = 1, .has_hotplug = 1, \ 282 .need_gfx_hws = 1, .has_hotplug = 1, \
276 .has_bsd_ring = 1, \ 283 .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
277 .has_blt_ring = 1, \ 284 .has_llc = 1
278 .has_llc = 1, \
279 .has_force_wake = 1
280 285
281static const struct intel_device_info intel_ivybridge_d_info = { 286static const struct intel_device_info intel_ivybridge_d_info = {
282 GEN7_FEATURES, 287 GEN7_FEATURES,
@@ -318,7 +323,7 @@ static const struct intel_device_info intel_haswell_d_info = {
318 .is_haswell = 1, 323 .is_haswell = 1,
319 .has_ddi = 1, 324 .has_ddi = 1,
320 .has_fpga_dbg = 1, 325 .has_fpga_dbg = 1,
321 .has_vebox_ring = 1, 326 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
322}; 327};
323 328
324static const struct intel_device_info intel_haswell_m_info = { 329static const struct intel_device_info intel_haswell_m_info = {
@@ -328,7 +333,25 @@ static const struct intel_device_info intel_haswell_m_info = {
328 .has_ddi = 1, 333 .has_ddi = 1,
329 .has_fpga_dbg = 1, 334 .has_fpga_dbg = 1,
330 .has_fbc = 1, 335 .has_fbc = 1,
331 .has_vebox_ring = 1, 336 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
337};
338
339static const struct intel_device_info intel_broadwell_d_info = {
340 .is_preliminary = 1,
341 .gen = 8, .num_pipes = 3,
342 .need_gfx_hws = 1, .has_hotplug = 1,
343 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
344 .has_llc = 1,
345 .has_ddi = 1,
346};
347
348static const struct intel_device_info intel_broadwell_m_info = {
349 .is_preliminary = 1,
350 .gen = 8, .is_mobile = 1, .num_pipes = 3,
351 .need_gfx_hws = 1, .has_hotplug = 1,
352 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
353 .has_llc = 1,
354 .has_ddi = 1,
332}; 355};
333 356
334/* 357/*
@@ -362,7 +385,9 @@ static const struct intel_device_info intel_haswell_m_info = {
362 INTEL_HSW_D_IDS(&intel_haswell_d_info), \ 385 INTEL_HSW_D_IDS(&intel_haswell_d_info), \
363 INTEL_HSW_M_IDS(&intel_haswell_m_info), \ 386 INTEL_HSW_M_IDS(&intel_haswell_m_info), \
364 INTEL_VLV_M_IDS(&intel_valleyview_m_info), \ 387 INTEL_VLV_M_IDS(&intel_valleyview_m_info), \
365 INTEL_VLV_D_IDS(&intel_valleyview_d_info) 388 INTEL_VLV_D_IDS(&intel_valleyview_d_info), \
389 INTEL_BDW_M_IDS(&intel_broadwell_m_info), \
390 INTEL_BDW_D_IDS(&intel_broadwell_d_info)
366 391
367static const struct pci_device_id pciidlist[] = { /* aka */ 392static const struct pci_device_id pciidlist[] = { /* aka */
368 INTEL_PCI_IDS, 393 INTEL_PCI_IDS,
@@ -416,13 +441,19 @@ void intel_detect_pch(struct drm_device *dev)
416 } else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) { 441 } else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
417 /* PantherPoint is CPT compatible */ 442 /* PantherPoint is CPT compatible */
418 dev_priv->pch_type = PCH_CPT; 443 dev_priv->pch_type = PCH_CPT;
419 DRM_DEBUG_KMS("Found PatherPoint PCH\n"); 444 DRM_DEBUG_KMS("Found PantherPoint PCH\n");
420 WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev))); 445 WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
421 } else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) { 446 } else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
422 dev_priv->pch_type = PCH_LPT; 447 dev_priv->pch_type = PCH_LPT;
423 DRM_DEBUG_KMS("Found LynxPoint PCH\n"); 448 DRM_DEBUG_KMS("Found LynxPoint PCH\n");
424 WARN_ON(!IS_HASWELL(dev)); 449 WARN_ON(!IS_HASWELL(dev));
425 WARN_ON(IS_ULT(dev)); 450 WARN_ON(IS_ULT(dev));
451 } else if (IS_BROADWELL(dev)) {
452 dev_priv->pch_type = PCH_LPT;
453 dev_priv->pch_id =
454 INTEL_PCH_LPT_LP_DEVICE_ID_TYPE;
455 DRM_DEBUG_KMS("This is Broadwell, assuming "
456 "LynxPoint LP PCH\n");
426 } else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) { 457 } else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
427 dev_priv->pch_type = PCH_LPT; 458 dev_priv->pch_type = PCH_LPT;
428 DRM_DEBUG_KMS("Found LynxPoint LP PCH\n"); 459 DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
@@ -447,6 +478,12 @@ bool i915_semaphore_is_enabled(struct drm_device *dev)
447 if (INTEL_INFO(dev)->gen < 6) 478 if (INTEL_INFO(dev)->gen < 6)
448 return 0; 479 return 0;
449 480
481 /* Until we get further testing... */
482 if (IS_GEN8(dev)) {
483 WARN_ON(!i915_preliminary_hw_support);
484 return 0;
485 }
486
450 if (i915_semaphores >= 0) 487 if (i915_semaphores >= 0)
451 return i915_semaphores; 488 return i915_semaphores;
452 489
@@ -472,7 +509,7 @@ static int i915_drm_freeze(struct drm_device *dev)
472 /* We do a lot of poking in a lot of registers, make sure they work 509 /* We do a lot of poking in a lot of registers, make sure they work
473 * properly. */ 510 * properly. */
474 hsw_disable_package_c8(dev_priv); 511 hsw_disable_package_c8(dev_priv);
475 intel_set_power_well(dev, true); 512 intel_display_set_init_power(dev, true);
476 513
477 drm_kms_helper_poll_disable(dev); 514 drm_kms_helper_poll_disable(dev);
478 515
@@ -482,9 +519,7 @@ static int i915_drm_freeze(struct drm_device *dev)
482 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 519 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
483 int error; 520 int error;
484 521
485 mutex_lock(&dev->struct_mutex); 522 error = i915_gem_suspend(dev);
486 error = i915_gem_idle(dev);
487 mutex_unlock(&dev->struct_mutex);
488 if (error) { 523 if (error) {
489 dev_err(&dev->pdev->dev, 524 dev_err(&dev->pdev->dev,
490 "GEM idle failed, resume might fail\n"); 525 "GEM idle failed, resume might fail\n");
@@ -578,11 +613,24 @@ static void intel_resume_hotplug(struct drm_device *dev)
578 drm_helper_hpd_irq_event(dev); 613 drm_helper_hpd_irq_event(dev);
579} 614}
580 615
581static int __i915_drm_thaw(struct drm_device *dev) 616static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
582{ 617{
583 struct drm_i915_private *dev_priv = dev->dev_private; 618 struct drm_i915_private *dev_priv = dev->dev_private;
584 int error = 0; 619 int error = 0;
585 620
621 intel_uncore_early_sanitize(dev);
622
623 intel_uncore_sanitize(dev);
624
625 if (drm_core_check_feature(dev, DRIVER_MODESET) &&
626 restore_gtt_mappings) {
627 mutex_lock(&dev->struct_mutex);
628 i915_gem_restore_gtt_mappings(dev);
629 mutex_unlock(&dev->struct_mutex);
630 }
631
632 intel_power_domains_init_hw(dev);
633
586 i915_restore_state(dev); 634 i915_restore_state(dev);
587 intel_opregion_setup(dev); 635 intel_opregion_setup(dev);
588 636
@@ -642,20 +690,10 @@ static int __i915_drm_thaw(struct drm_device *dev)
642 690
643static int i915_drm_thaw(struct drm_device *dev) 691static int i915_drm_thaw(struct drm_device *dev)
644{ 692{
645 int error = 0; 693 if (drm_core_check_feature(dev, DRIVER_MODESET))
646
647 intel_uncore_sanitize(dev);
648
649 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
650 mutex_lock(&dev->struct_mutex);
651 i915_gem_restore_gtt_mappings(dev);
652 mutex_unlock(&dev->struct_mutex);
653 } else if (drm_core_check_feature(dev, DRIVER_MODESET))
654 i915_check_and_clear_faults(dev); 694 i915_check_and_clear_faults(dev);
655 695
656 __i915_drm_thaw(dev); 696 return __i915_drm_thaw(dev, true);
657
658 return error;
659} 697}
660 698
661int i915_resume(struct drm_device *dev) 699int i915_resume(struct drm_device *dev)
@@ -671,20 +709,12 @@ int i915_resume(struct drm_device *dev)
671 709
672 pci_set_master(dev->pdev); 710 pci_set_master(dev->pdev);
673 711
674 intel_uncore_sanitize(dev);
675
676 /* 712 /*
677 * Platforms with opregion should have sane BIOS, older ones (gen3 and 713 * Platforms with opregion should have sane BIOS, older ones (gen3 and
678 * earlier) need this since the BIOS might clear all our scratch PTEs. 714 * earlier) need to restore the GTT mappings since the BIOS might clear
715 * all our scratch PTEs.
679 */ 716 */
680 if (drm_core_check_feature(dev, DRIVER_MODESET) && 717 ret = __i915_drm_thaw(dev, !dev_priv->opregion.header);
681 !dev_priv->opregion.header) {
682 mutex_lock(&dev->struct_mutex);
683 i915_gem_restore_gtt_mappings(dev);
684 mutex_unlock(&dev->struct_mutex);
685 }
686
687 ret = __i915_drm_thaw(dev);
688 if (ret) 718 if (ret)
689 return ret; 719 return ret;
690 720
@@ -722,24 +752,19 @@ int i915_reset(struct drm_device *dev)
722 752
723 simulated = dev_priv->gpu_error.stop_rings != 0; 753 simulated = dev_priv->gpu_error.stop_rings != 0;
724 754
725 if (!simulated && get_seconds() - dev_priv->gpu_error.last_reset < 5) { 755 ret = intel_gpu_reset(dev);
726 DRM_ERROR("GPU hanging too fast, declaring wedged!\n"); 756
727 ret = -ENODEV; 757 /* Also reset the gpu hangman. */
728 } else { 758 if (simulated) {
729 ret = intel_gpu_reset(dev); 759 DRM_INFO("Simulated gpu hang, resetting stop_rings\n");
730 760 dev_priv->gpu_error.stop_rings = 0;
731 /* Also reset the gpu hangman. */ 761 if (ret == -ENODEV) {
732 if (simulated) { 762 DRM_ERROR("Reset not implemented, but ignoring "
733 DRM_INFO("Simulated gpu hang, resetting stop_rings\n"); 763 "error for simulated gpu hangs\n");
734 dev_priv->gpu_error.stop_rings = 0; 764 ret = 0;
735 if (ret == -ENODEV) { 765 }
736 DRM_ERROR("Reset not implemented, but ignoring "
737 "error for simulated gpu hangs\n");
738 ret = 0;
739 }
740 } else
741 dev_priv->gpu_error.last_reset = get_seconds();
742 } 766 }
767
743 if (ret) { 768 if (ret) {
744 DRM_ERROR("Failed to reset chip.\n"); 769 DRM_ERROR("Failed to reset chip.\n");
745 mutex_unlock(&dev->struct_mutex); 770 mutex_unlock(&dev->struct_mutex);
@@ -762,30 +787,17 @@ int i915_reset(struct drm_device *dev)
762 */ 787 */
763 if (drm_core_check_feature(dev, DRIVER_MODESET) || 788 if (drm_core_check_feature(dev, DRIVER_MODESET) ||
764 !dev_priv->ums.mm_suspended) { 789 !dev_priv->ums.mm_suspended) {
765 struct intel_ring_buffer *ring; 790 bool hw_contexts_disabled = dev_priv->hw_contexts_disabled;
766 int i;
767
768 dev_priv->ums.mm_suspended = 0; 791 dev_priv->ums.mm_suspended = 0;
769 792
770 i915_gem_init_swizzling(dev); 793 ret = i915_gem_init_hw(dev);
771 794 if (!hw_contexts_disabled && dev_priv->hw_contexts_disabled)
772 for_each_ring(ring, dev_priv, i) 795 DRM_ERROR("HW contexts didn't survive reset\n");
773 ring->init(ring);
774
775 i915_gem_context_init(dev);
776 if (dev_priv->mm.aliasing_ppgtt) {
777 ret = dev_priv->mm.aliasing_ppgtt->enable(dev);
778 if (ret)
779 i915_gem_cleanup_aliasing_ppgtt(dev);
780 }
781
782 /*
783 * It would make sense to re-init all the other hw state, at
784 * least the rps/rc6/emon init done within modeset_init_hw. For
785 * some unknown reason, this blows up my ilk, so don't.
786 */
787
788 mutex_unlock(&dev->struct_mutex); 796 mutex_unlock(&dev->struct_mutex);
797 if (ret) {
798 DRM_ERROR("Failed hw init on reset %d\n", ret);
799 return ret;
800 }
789 801
790 drm_irq_uninstall(dev); 802 drm_irq_uninstall(dev);
791 drm_irq_install(dev); 803 drm_irq_install(dev);
@@ -802,6 +814,12 @@ static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
802 struct intel_device_info *intel_info = 814 struct intel_device_info *intel_info =
803 (struct intel_device_info *) ent->driver_data; 815 (struct intel_device_info *) ent->driver_data;
804 816
817 if (IS_PRELIMINARY_HW(intel_info) && !i915_preliminary_hw_support) {
818 DRM_INFO("This hardware requires preliminary hardware support.\n"
819 "See CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT, and/or modparam preliminary_hw_support\n");
820 return -ENODEV;
821 }
822
805 /* Only bind to function 0 of the device. Early generations 823 /* Only bind to function 0 of the device. Early generations
806 * used function 1 as a placeholder for multi-head. This causes 824 * used function 1 as a placeholder for multi-head. This causes
807 * us confusion instead, especially on the systems where both 825 * us confusion instead, especially on the systems where both
@@ -949,7 +967,6 @@ static struct drm_driver driver = {
949 .debugfs_init = i915_debugfs_init, 967 .debugfs_init = i915_debugfs_init,
950 .debugfs_cleanup = i915_debugfs_cleanup, 968 .debugfs_cleanup = i915_debugfs_cleanup,
951#endif 969#endif
952 .gem_init_object = i915_gem_init_object,
953 .gem_free_object = i915_gem_free_object, 970 .gem_free_object = i915_gem_free_object,
954 .gem_vm_ops = &i915_gem_vm_ops, 971 .gem_vm_ops = &i915_gem_vm_ops,
955 972
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index ab0f2c0a440c..8600c315b4c4 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -54,6 +54,7 @@
54#define DRIVER_DATE "20080730" 54#define DRIVER_DATE "20080730"
55 55
56enum pipe { 56enum pipe {
57 INVALID_PIPE = -1,
57 PIPE_A = 0, 58 PIPE_A = 0,
58 PIPE_B, 59 PIPE_B,
59 PIPE_C, 60 PIPE_C,
@@ -98,13 +99,29 @@ enum intel_display_power_domain {
98 POWER_DOMAIN_TRANSCODER_A, 99 POWER_DOMAIN_TRANSCODER_A,
99 POWER_DOMAIN_TRANSCODER_B, 100 POWER_DOMAIN_TRANSCODER_B,
100 POWER_DOMAIN_TRANSCODER_C, 101 POWER_DOMAIN_TRANSCODER_C,
101 POWER_DOMAIN_TRANSCODER_EDP = POWER_DOMAIN_TRANSCODER_A + 0xF, 102 POWER_DOMAIN_TRANSCODER_EDP,
103 POWER_DOMAIN_VGA,
104 POWER_DOMAIN_INIT,
105
106 POWER_DOMAIN_NUM,
102}; 107};
103 108
109#define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1)
110
104#define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A) 111#define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A)
105#define POWER_DOMAIN_PIPE_PANEL_FITTER(pipe) \ 112#define POWER_DOMAIN_PIPE_PANEL_FITTER(pipe) \
106 ((pipe) + POWER_DOMAIN_PIPE_A_PANEL_FITTER) 113 ((pipe) + POWER_DOMAIN_PIPE_A_PANEL_FITTER)
107#define POWER_DOMAIN_TRANSCODER(tran) ((tran) + POWER_DOMAIN_TRANSCODER_A) 114#define POWER_DOMAIN_TRANSCODER(tran) \
115 ((tran) == TRANSCODER_EDP ? POWER_DOMAIN_TRANSCODER_EDP : \
116 (tran) + POWER_DOMAIN_TRANSCODER_A)
117
118#define HSW_ALWAYS_ON_POWER_DOMAINS ( \
119 BIT(POWER_DOMAIN_PIPE_A) | \
120 BIT(POWER_DOMAIN_TRANSCODER_EDP))
121#define BDW_ALWAYS_ON_POWER_DOMAINS ( \
122 BIT(POWER_DOMAIN_PIPE_A) | \
123 BIT(POWER_DOMAIN_TRANSCODER_EDP) | \
124 BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER))
108 125
109enum hpd_pin { 126enum hpd_pin {
110 HPD_NONE = 0, 127 HPD_NONE = 0,
@@ -225,9 +242,12 @@ struct intel_opregion {
225 struct opregion_header __iomem *header; 242 struct opregion_header __iomem *header;
226 struct opregion_acpi __iomem *acpi; 243 struct opregion_acpi __iomem *acpi;
227 struct opregion_swsci __iomem *swsci; 244 struct opregion_swsci __iomem *swsci;
245 u32 swsci_gbda_sub_functions;
246 u32 swsci_sbcb_sub_functions;
228 struct opregion_asle __iomem *asle; 247 struct opregion_asle __iomem *asle;
229 void __iomem *vbt; 248 void __iomem *vbt;
230 u32 __iomem *lid_state; 249 u32 __iomem *lid_state;
250 struct work_struct asle_work;
231}; 251};
232#define OPREGION_SIZE (8*1024) 252#define OPREGION_SIZE (8*1024)
233 253
@@ -285,6 +305,7 @@ struct drm_i915_error_state {
285 u32 cpu_ring_tail[I915_NUM_RINGS]; 305 u32 cpu_ring_tail[I915_NUM_RINGS];
286 u32 error; /* gen6+ */ 306 u32 error; /* gen6+ */
287 u32 err_int; /* gen7 */ 307 u32 err_int; /* gen7 */
308 u32 bbstate[I915_NUM_RINGS];
288 u32 instpm[I915_NUM_RINGS]; 309 u32 instpm[I915_NUM_RINGS];
289 u32 instps[I915_NUM_RINGS]; 310 u32 instps[I915_NUM_RINGS];
290 u32 extra_instdone[I915_NUM_INSTDONE_REG]; 311 u32 extra_instdone[I915_NUM_INSTDONE_REG];
@@ -321,11 +342,13 @@ struct drm_i915_error_state {
321 u32 dirty:1; 342 u32 dirty:1;
322 u32 purgeable:1; 343 u32 purgeable:1;
323 s32 ring:4; 344 s32 ring:4;
324 u32 cache_level:2; 345 u32 cache_level:3;
325 } **active_bo, **pinned_bo; 346 } **active_bo, **pinned_bo;
326 u32 *active_bo_count, *pinned_bo_count; 347 u32 *active_bo_count, *pinned_bo_count;
327 struct intel_overlay_error_state *overlay; 348 struct intel_overlay_error_state *overlay;
328 struct intel_display_error_state *display; 349 struct intel_display_error_state *display;
350 int hangcheck_score[I915_NUM_RINGS];
351 enum intel_ring_hangcheck_action hangcheck_action[I915_NUM_RINGS];
329}; 352};
330 353
331struct intel_crtc_config; 354struct intel_crtc_config;
@@ -357,7 +380,7 @@ struct drm_i915_display_funcs {
357 int target, int refclk, 380 int target, int refclk,
358 struct dpll *match_clock, 381 struct dpll *match_clock,
359 struct dpll *best_clock); 382 struct dpll *best_clock);
360 void (*update_wm)(struct drm_device *dev); 383 void (*update_wm)(struct drm_crtc *crtc);
361 void (*update_sprite_wm)(struct drm_plane *plane, 384 void (*update_sprite_wm)(struct drm_plane *plane,
362 struct drm_crtc *crtc, 385 struct drm_crtc *crtc,
363 uint32_t sprite_width, int pixel_size, 386 uint32_t sprite_width, int pixel_size,
@@ -367,7 +390,6 @@ struct drm_i915_display_funcs {
367 * fills out the pipe-config with the hw state. */ 390 * fills out the pipe-config with the hw state. */
368 bool (*get_pipe_config)(struct intel_crtc *, 391 bool (*get_pipe_config)(struct intel_crtc *,
369 struct intel_crtc_config *); 392 struct intel_crtc_config *);
370 void (*get_clock)(struct intel_crtc *, struct intel_crtc_config *);
371 int (*crtc_mode_set)(struct drm_crtc *crtc, 393 int (*crtc_mode_set)(struct drm_crtc *crtc,
372 int x, int y, 394 int x, int y,
373 struct drm_framebuffer *old_fb); 395 struct drm_framebuffer *old_fb);
@@ -375,7 +397,8 @@ struct drm_i915_display_funcs {
375 void (*crtc_disable)(struct drm_crtc *crtc); 397 void (*crtc_disable)(struct drm_crtc *crtc);
376 void (*off)(struct drm_crtc *crtc); 398 void (*off)(struct drm_crtc *crtc);
377 void (*write_eld)(struct drm_connector *connector, 399 void (*write_eld)(struct drm_connector *connector,
378 struct drm_crtc *crtc); 400 struct drm_crtc *crtc,
401 struct drm_display_mode *mode);
379 void (*fdi_link_train)(struct drm_crtc *crtc); 402 void (*fdi_link_train)(struct drm_crtc *crtc);
380 void (*init_clock_gating)(struct drm_device *dev); 403 void (*init_clock_gating)(struct drm_device *dev);
381 int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc, 404 int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
@@ -395,6 +418,20 @@ struct drm_i915_display_funcs {
395struct intel_uncore_funcs { 418struct intel_uncore_funcs {
396 void (*force_wake_get)(struct drm_i915_private *dev_priv); 419 void (*force_wake_get)(struct drm_i915_private *dev_priv);
397 void (*force_wake_put)(struct drm_i915_private *dev_priv); 420 void (*force_wake_put)(struct drm_i915_private *dev_priv);
421
422 uint8_t (*mmio_readb)(struct drm_i915_private *dev_priv, off_t offset, bool trace);
423 uint16_t (*mmio_readw)(struct drm_i915_private *dev_priv, off_t offset, bool trace);
424 uint32_t (*mmio_readl)(struct drm_i915_private *dev_priv, off_t offset, bool trace);
425 uint64_t (*mmio_readq)(struct drm_i915_private *dev_priv, off_t offset, bool trace);
426
427 void (*mmio_writeb)(struct drm_i915_private *dev_priv, off_t offset,
428 uint8_t val, bool trace);
429 void (*mmio_writew)(struct drm_i915_private *dev_priv, off_t offset,
430 uint16_t val, bool trace);
431 void (*mmio_writel)(struct drm_i915_private *dev_priv, off_t offset,
432 uint32_t val, bool trace);
433 void (*mmio_writeq)(struct drm_i915_private *dev_priv, off_t offset,
434 uint64_t val, bool trace);
398}; 435};
399 436
400struct intel_uncore { 437struct intel_uncore {
@@ -404,6 +441,8 @@ struct intel_uncore {
404 441
405 unsigned fifo_count; 442 unsigned fifo_count;
406 unsigned forcewake_count; 443 unsigned forcewake_count;
444
445 struct delayed_work force_wake_work;
407}; 446};
408 447
409#define DEV_INFO_FOR_EACH_FLAG(func, sep) \ 448#define DEV_INFO_FOR_EACH_FLAG(func, sep) \
@@ -420,7 +459,7 @@ struct intel_uncore {
420 func(is_ivybridge) sep \ 459 func(is_ivybridge) sep \
421 func(is_valleyview) sep \ 460 func(is_valleyview) sep \
422 func(is_haswell) sep \ 461 func(is_haswell) sep \
423 func(has_force_wake) sep \ 462 func(is_preliminary) sep \
424 func(has_fbc) sep \ 463 func(has_fbc) sep \
425 func(has_pipe_cxsr) sep \ 464 func(has_pipe_cxsr) sep \
426 func(has_hotplug) sep \ 465 func(has_hotplug) sep \
@@ -428,9 +467,6 @@ struct intel_uncore {
428 func(has_overlay) sep \ 467 func(has_overlay) sep \
429 func(overlay_needs_physical) sep \ 468 func(overlay_needs_physical) sep \
430 func(supports_tv) sep \ 469 func(supports_tv) sep \
431 func(has_bsd_ring) sep \
432 func(has_blt_ring) sep \
433 func(has_vebox_ring) sep \
434 func(has_llc) sep \ 470 func(has_llc) sep \
435 func(has_ddi) sep \ 471 func(has_ddi) sep \
436 func(has_fpga_dbg) 472 func(has_fpga_dbg)
@@ -442,6 +478,7 @@ struct intel_device_info {
442 u32 display_mmio_offset; 478 u32 display_mmio_offset;
443 u8 num_pipes:3; 479 u8 num_pipes:3;
444 u8 gen; 480 u8 gen;
481 u8 ring_mask; /* Rings supported by the HW */
445 DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG, SEP_SEMICOLON); 482 DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG, SEP_SEMICOLON);
446}; 483};
447 484
@@ -542,10 +579,21 @@ struct i915_gtt {
542struct i915_hw_ppgtt { 579struct i915_hw_ppgtt {
543 struct i915_address_space base; 580 struct i915_address_space base;
544 unsigned num_pd_entries; 581 unsigned num_pd_entries;
545 struct page **pt_pages; 582 union {
546 uint32_t pd_offset; 583 struct page **pt_pages;
547 dma_addr_t *pt_dma_addr; 584 struct page *gen8_pt_pages;
548 585 };
586 struct page *pd_pages;
587 int num_pd_pages;
588 int num_pt_pages;
589 union {
590 uint32_t pd_offset;
591 dma_addr_t pd_dma_addr[4];
592 };
593 union {
594 dma_addr_t *pt_dma_addr;
595 dma_addr_t *gen8_pt_dma_addr[4];
596 };
549 int (*enable)(struct drm_device *dev); 597 int (*enable)(struct drm_device *dev);
550}; 598};
551 599
@@ -570,6 +618,13 @@ struct i915_vma {
570 /** This vma's place in the batchbuffer or on the eviction list */ 618 /** This vma's place in the batchbuffer or on the eviction list */
571 struct list_head exec_list; 619 struct list_head exec_list;
572 620
621 /**
622 * Used for performing relocations during execbuffer insertion.
623 */
624 struct hlist_node exec_node;
625 unsigned long exec_handle;
626 struct drm_i915_gem_exec_object2 *exec_entry;
627
573}; 628};
574 629
575struct i915_ctx_hang_stats { 630struct i915_ctx_hang_stats {
@@ -578,6 +633,12 @@ struct i915_ctx_hang_stats {
578 633
579 /* This context had batch active when hang was declared */ 634 /* This context had batch active when hang was declared */
580 unsigned batch_active; 635 unsigned batch_active;
636
637 /* Time when this context was last blamed for a GPU reset */
638 unsigned long guilty_ts;
639
640 /* This context is banned to submit more work */
641 bool banned;
581}; 642};
582 643
583/* This must match up with the value previously used for execbuf2.rsvd1. */ 644/* This must match up with the value previously used for execbuf2.rsvd1. */
@@ -586,10 +647,13 @@ struct i915_hw_context {
586 struct kref ref; 647 struct kref ref;
587 int id; 648 int id;
588 bool is_initialized; 649 bool is_initialized;
650 uint8_t remap_slice;
589 struct drm_i915_file_private *file_priv; 651 struct drm_i915_file_private *file_priv;
590 struct intel_ring_buffer *ring; 652 struct intel_ring_buffer *ring;
591 struct drm_i915_gem_object *obj; 653 struct drm_i915_gem_object *obj;
592 struct i915_ctx_hang_stats hang_stats; 654 struct i915_ctx_hang_stats hang_stats;
655
656 struct list_head link;
593}; 657};
594 658
595struct i915_fbc { 659struct i915_fbc {
@@ -623,17 +687,9 @@ struct i915_fbc {
623 } no_fbc_reason; 687 } no_fbc_reason;
624}; 688};
625 689
626enum no_psr_reason { 690struct i915_psr {
627 PSR_NO_SOURCE, /* Not supported on platform */ 691 bool sink_support;
628 PSR_NO_SINK, /* Not supported by panel */ 692 bool source_ok;
629 PSR_MODULE_PARAM,
630 PSR_CRTC_NOT_ACTIVE,
631 PSR_PWR_WELL_ENABLED,
632 PSR_NOT_TILED,
633 PSR_SPRITE_ENABLED,
634 PSR_S3D_ENABLED,
635 PSR_INTERLACED_ENABLED,
636 PSR_HSW_NOT_DDIA,
637}; 693};
638 694
639enum intel_pch { 695enum intel_pch {
@@ -704,6 +760,9 @@ struct i915_suspend_saved_registers {
704 u32 saveBLC_HIST_CTL; 760 u32 saveBLC_HIST_CTL;
705 u32 saveBLC_PWM_CTL; 761 u32 saveBLC_PWM_CTL;
706 u32 saveBLC_PWM_CTL2; 762 u32 saveBLC_PWM_CTL2;
763 u32 saveBLC_HIST_CTL_B;
764 u32 saveBLC_PWM_CTL_B;
765 u32 saveBLC_PWM_CTL2_B;
707 u32 saveBLC_CPU_PWM_CTL; 766 u32 saveBLC_CPU_PWM_CTL;
708 u32 saveBLC_CPU_PWM_CTL2; 767 u32 saveBLC_CPU_PWM_CTL2;
709 u32 saveFPB0; 768 u32 saveFPB0;
@@ -823,17 +882,20 @@ struct intel_gen6_power_mgmt {
823 struct work_struct work; 882 struct work_struct work;
824 u32 pm_iir; 883 u32 pm_iir;
825 884
826 /* On vlv we need to manually drop to Vmin with a delayed work. */
827 struct delayed_work vlv_work;
828
829 /* The below variables an all the rps hw state are protected by 885 /* The below variables an all the rps hw state are protected by
830 * dev->struct mutext. */ 886 * dev->struct mutext. */
831 u8 cur_delay; 887 u8 cur_delay;
832 u8 min_delay; 888 u8 min_delay;
833 u8 max_delay; 889 u8 max_delay;
834 u8 rpe_delay; 890 u8 rpe_delay;
891 u8 rp1_delay;
892 u8 rp0_delay;
835 u8 hw_max; 893 u8 hw_max;
836 894
895 int last_adj;
896 enum { LOW_POWER, BETWEEN, HIGH_POWER } power;
897
898 bool enabled;
837 struct delayed_work delayed_resume_work; 899 struct delayed_work delayed_resume_work;
838 900
839 /* 901 /*
@@ -870,11 +932,21 @@ struct intel_ilk_power_mgmt {
870 932
871/* Power well structure for haswell */ 933/* Power well structure for haswell */
872struct i915_power_well { 934struct i915_power_well {
873 struct drm_device *device;
874 spinlock_t lock;
875 /* power well enable/disable usage count */ 935 /* power well enable/disable usage count */
876 int count; 936 int count;
877 int i915_request; 937};
938
939#define I915_MAX_POWER_WELLS 1
940
941struct i915_power_domains {
942 /*
943 * Power wells needed for initialization at driver init and suspend
944 * time are on. They are kept on until after the first modeset.
945 */
946 bool init_power_on;
947
948 struct mutex lock;
949 struct i915_power_well power_wells[I915_MAX_POWER_WELLS];
878}; 950};
879 951
880struct i915_dri1_state { 952struct i915_dri1_state {
@@ -902,9 +974,11 @@ struct i915_ums_state {
902 int mm_suspended; 974 int mm_suspended;
903}; 975};
904 976
977#define MAX_L3_SLICES 2
905struct intel_l3_parity { 978struct intel_l3_parity {
906 u32 *remap_info; 979 u32 *remap_info[MAX_L3_SLICES];
907 struct work_struct error_work; 980 struct work_struct error_work;
981 int which_slice;
908}; 982};
909 983
910struct i915_gem_mm { 984struct i915_gem_mm {
@@ -942,6 +1016,15 @@ struct i915_gem_mm {
942 struct delayed_work retire_work; 1016 struct delayed_work retire_work;
943 1017
944 /** 1018 /**
1019 * When we detect an idle GPU, we want to turn on
1020 * powersaving features. So once we see that there
1021 * are no more requests outstanding and no more
1022 * arrive within a small period of time, we fire
1023 * off the idle_work.
1024 */
1025 struct delayed_work idle_work;
1026
1027 /**
945 * Are we in a non-interruptible section of code like 1028 * Are we in a non-interruptible section of code like
946 * modesetting? 1029 * modesetting?
947 */ 1030 */
@@ -979,6 +1062,9 @@ struct i915_gpu_error {
979 /* For hangcheck timer */ 1062 /* For hangcheck timer */
980#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */ 1063#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
981#define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD) 1064#define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)
1065 /* Hang gpu twice in this window and your context gets banned */
1066#define DRM_I915_CTX_BAN_PERIOD DIV_ROUND_UP(8*DRM_I915_HANGCHECK_PERIOD, 1000)
1067
982 struct timer_list hangcheck_timer; 1068 struct timer_list hangcheck_timer;
983 1069
984 /* For reset and error_state handling. */ 1070 /* For reset and error_state handling. */
@@ -987,7 +1073,8 @@ struct i915_gpu_error {
987 struct drm_i915_error_state *first_error; 1073 struct drm_i915_error_state *first_error;
988 struct work_struct work; 1074 struct work_struct work;
989 1075
990 unsigned long last_reset; 1076
1077 unsigned long missed_irq_rings;
991 1078
992 /** 1079 /**
993 * State variable and reset counter controlling the reset flow 1080 * State variable and reset counter controlling the reset flow
@@ -1027,6 +1114,9 @@ struct i915_gpu_error {
1027 1114
1028 /* For gpu hang simulation. */ 1115 /* For gpu hang simulation. */
1029 unsigned int stop_rings; 1116 unsigned int stop_rings;
1117
1118 /* For missed irq/seqno simulation. */
1119 unsigned int test_irq_rings;
1030}; 1120};
1031 1121
1032enum modeset_restore { 1122enum modeset_restore {
@@ -1035,6 +1125,14 @@ enum modeset_restore {
1035 MODESET_SUSPENDED, 1125 MODESET_SUSPENDED,
1036}; 1126};
1037 1127
1128struct ddi_vbt_port_info {
1129 uint8_t hdmi_level_shift;
1130
1131 uint8_t supports_dvi:1;
1132 uint8_t supports_hdmi:1;
1133 uint8_t supports_dp:1;
1134};
1135
1038struct intel_vbt_data { 1136struct intel_vbt_data {
1039 struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */ 1137 struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
1040 struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */ 1138 struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
@@ -1060,10 +1158,17 @@ struct intel_vbt_data {
1060 int edp_bpp; 1158 int edp_bpp;
1061 struct edp_power_seq edp_pps; 1159 struct edp_power_seq edp_pps;
1062 1160
1161 /* MIPI DSI */
1162 struct {
1163 u16 panel_id;
1164 } dsi;
1165
1063 int crt_ddc_pin; 1166 int crt_ddc_pin;
1064 1167
1065 int child_dev_num; 1168 int child_dev_num;
1066 struct child_device_config *child_dev; 1169 union child_device_config *child_dev;
1170
1171 struct ddi_vbt_port_info ddi_port_info[I915_MAX_PORTS];
1067}; 1172};
1068 1173
1069enum intel_ddb_partitioning { 1174enum intel_ddb_partitioning {
@@ -1079,6 +1184,15 @@ struct intel_wm_level {
1079 uint32_t fbc_val; 1184 uint32_t fbc_val;
1080}; 1185};
1081 1186
1187struct hsw_wm_values {
1188 uint32_t wm_pipe[3];
1189 uint32_t wm_lp[3];
1190 uint32_t wm_lp_spr[3];
1191 uint32_t wm_linetime[3];
1192 bool enable_fbc_wm;
1193 enum intel_ddb_partitioning partitioning;
1194};
1195
1082/* 1196/*
1083 * This struct tracks the state needed for the Package C8+ feature. 1197 * This struct tracks the state needed for the Package C8+ feature.
1084 * 1198 *
@@ -1148,6 +1262,36 @@ struct i915_package_c8 {
1148 } regsave; 1262 } regsave;
1149}; 1263};
1150 1264
1265enum intel_pipe_crc_source {
1266 INTEL_PIPE_CRC_SOURCE_NONE,
1267 INTEL_PIPE_CRC_SOURCE_PLANE1,
1268 INTEL_PIPE_CRC_SOURCE_PLANE2,
1269 INTEL_PIPE_CRC_SOURCE_PF,
1270 INTEL_PIPE_CRC_SOURCE_PIPE,
1271 /* TV/DP on pre-gen5/vlv can't use the pipe source. */
1272 INTEL_PIPE_CRC_SOURCE_TV,
1273 INTEL_PIPE_CRC_SOURCE_DP_B,
1274 INTEL_PIPE_CRC_SOURCE_DP_C,
1275 INTEL_PIPE_CRC_SOURCE_DP_D,
1276 INTEL_PIPE_CRC_SOURCE_AUTO,
1277 INTEL_PIPE_CRC_SOURCE_MAX,
1278};
1279
1280struct intel_pipe_crc_entry {
1281 uint32_t frame;
1282 uint32_t crc[5];
1283};
1284
1285#define INTEL_PIPE_CRC_ENTRIES_NR 128
1286struct intel_pipe_crc {
1287 spinlock_t lock;
1288 bool opened; /* exclusive access to the result file */
1289 struct intel_pipe_crc_entry *entries;
1290 enum intel_pipe_crc_source source;
1291 int head, tail;
1292 wait_queue_head_t wq;
1293};
1294
1151typedef struct drm_i915_private { 1295typedef struct drm_i915_private {
1152 struct drm_device *dev; 1296 struct drm_device *dev;
1153 struct kmem_cache *slab; 1297 struct kmem_cache *slab;
@@ -1193,7 +1337,10 @@ typedef struct drm_i915_private {
1193 struct mutex dpio_lock; 1337 struct mutex dpio_lock;
1194 1338
1195 /** Cached value of IMR to avoid reads in updating the bitfield */ 1339 /** Cached value of IMR to avoid reads in updating the bitfield */
1196 u32 irq_mask; 1340 union {
1341 u32 irq_mask;
1342 u32 de_irq_mask[I915_MAX_PIPES];
1343 };
1197 u32 gt_irq_mask; 1344 u32 gt_irq_mask;
1198 u32 pm_irq_mask; 1345 u32 pm_irq_mask;
1199 1346
@@ -1272,6 +1419,10 @@ typedef struct drm_i915_private {
1272 struct drm_crtc *pipe_to_crtc_mapping[3]; 1419 struct drm_crtc *pipe_to_crtc_mapping[3];
1273 wait_queue_head_t pending_flip_queue; 1420 wait_queue_head_t pending_flip_queue;
1274 1421
1422#ifdef CONFIG_DEBUG_FS
1423 struct intel_pipe_crc pipe_crc[I915_MAX_PIPES];
1424#endif
1425
1275 int num_shared_dpll; 1426 int num_shared_dpll;
1276 struct intel_shared_dpll shared_dplls[I915_NUM_PLLS]; 1427 struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
1277 struct intel_ddi_plls ddi_plls; 1428 struct intel_ddi_plls ddi_plls;
@@ -1297,17 +1448,18 @@ typedef struct drm_i915_private {
1297 * mchdev_lock in intel_pm.c */ 1448 * mchdev_lock in intel_pm.c */
1298 struct intel_ilk_power_mgmt ips; 1449 struct intel_ilk_power_mgmt ips;
1299 1450
1300 /* Haswell power well */ 1451 struct i915_power_domains power_domains;
1301 struct i915_power_well power_well;
1302 1452
1303 enum no_psr_reason no_psr_reason; 1453 struct i915_psr psr;
1304 1454
1305 struct i915_gpu_error gpu_error; 1455 struct i915_gpu_error gpu_error;
1306 1456
1307 struct drm_i915_gem_object *vlv_pctx; 1457 struct drm_i915_gem_object *vlv_pctx;
1308 1458
1459#ifdef CONFIG_DRM_I915_FBDEV
1309 /* list of fbdev register on this device */ 1460 /* list of fbdev register on this device */
1310 struct intel_fbdev *fbdev; 1461 struct intel_fbdev *fbdev;
1462#endif
1311 1463
1312 /* 1464 /*
1313 * The console may be contended at resume, but we don't 1465 * The console may be contended at resume, but we don't
@@ -1320,6 +1472,7 @@ typedef struct drm_i915_private {
1320 1472
1321 bool hw_contexts_disabled; 1473 bool hw_contexts_disabled;
1322 uint32_t hw_context_size; 1474 uint32_t hw_context_size;
1475 struct list_head context_list;
1323 1476
1324 u32 fdi_rx_config; 1477 u32 fdi_rx_config;
1325 1478
@@ -1337,6 +1490,9 @@ typedef struct drm_i915_private {
1337 uint16_t spr_latency[5]; 1490 uint16_t spr_latency[5];
1338 /* cursor */ 1491 /* cursor */
1339 uint16_t cur_latency[5]; 1492 uint16_t cur_latency[5];
1493
1494 /* current hardware state */
1495 struct hsw_wm_values hw;
1340 } wm; 1496 } wm;
1341 1497
1342 struct i915_package_c8 pc8; 1498 struct i915_package_c8 pc8;
@@ -1400,8 +1556,6 @@ struct drm_i915_gem_object {
1400 struct list_head ring_list; 1556 struct list_head ring_list;
1401 /** Used in execbuf to temporarily hold a ref */ 1557 /** Used in execbuf to temporarily hold a ref */
1402 struct list_head obj_exec_link; 1558 struct list_head obj_exec_link;
1403 /** This object's place in the batchbuffer or on the eviction list */
1404 struct list_head exec_list;
1405 1559
1406 /** 1560 /**
1407 * This is set if the object is on the active lists (has pending 1561 * This is set if the object is on the active lists (has pending
@@ -1487,13 +1641,6 @@ struct drm_i915_gem_object {
1487 void *dma_buf_vmapping; 1641 void *dma_buf_vmapping;
1488 int vmapping_count; 1642 int vmapping_count;
1489 1643
1490 /**
1491 * Used for performing relocations during execbuffer insertion.
1492 */
1493 struct hlist_node exec_node;
1494 unsigned long exec_handle;
1495 struct drm_i915_gem_exec_object2 *exec_entry;
1496
1497 struct intel_ring_buffer *ring; 1644 struct intel_ring_buffer *ring;
1498 1645
1499 /** Breadcrumb of last rendering to the buffer. */ 1646 /** Breadcrumb of last rendering to the buffer. */
@@ -1505,11 +1652,14 @@ struct drm_i915_gem_object {
1505 /** Current tiling stride for the object, if it's tiled. */ 1652 /** Current tiling stride for the object, if it's tiled. */
1506 uint32_t stride; 1653 uint32_t stride;
1507 1654
1655 /** References from framebuffers, locks out tiling changes. */
1656 unsigned long framebuffer_references;
1657
1508 /** Record of address bit 17 of each page at last unbind. */ 1658 /** Record of address bit 17 of each page at last unbind. */
1509 unsigned long *bit_17; 1659 unsigned long *bit_17;
1510 1660
1511 /** User space pin count and filp owning the pin */ 1661 /** User space pin count and filp owning the pin */
1512 uint32_t user_pin_count; 1662 unsigned long user_pin_count;
1513 struct drm_file *pin_filp; 1663 struct drm_file *pin_filp;
1514 1664
1515 /** for phy allocated objects */ 1665 /** for phy allocated objects */
@@ -1560,48 +1710,56 @@ struct drm_i915_gem_request {
1560}; 1710};
1561 1711
1562struct drm_i915_file_private { 1712struct drm_i915_file_private {
1713 struct drm_i915_private *dev_priv;
1714
1563 struct { 1715 struct {
1564 spinlock_t lock; 1716 spinlock_t lock;
1565 struct list_head request_list; 1717 struct list_head request_list;
1718 struct delayed_work idle_work;
1566 } mm; 1719 } mm;
1567 struct idr context_idr; 1720 struct idr context_idr;
1568 1721
1569 struct i915_ctx_hang_stats hang_stats; 1722 struct i915_ctx_hang_stats hang_stats;
1723 atomic_t rps_wait_boost;
1570}; 1724};
1571 1725
1572#define INTEL_INFO(dev) (to_i915(dev)->info) 1726#define INTEL_INFO(dev) (to_i915(dev)->info)
1573 1727
1574#define IS_I830(dev) ((dev)->pci_device == 0x3577) 1728#define IS_I830(dev) ((dev)->pdev->device == 0x3577)
1575#define IS_845G(dev) ((dev)->pci_device == 0x2562) 1729#define IS_845G(dev) ((dev)->pdev->device == 0x2562)
1576#define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x) 1730#define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x)
1577#define IS_I865G(dev) ((dev)->pci_device == 0x2572) 1731#define IS_I865G(dev) ((dev)->pdev->device == 0x2572)
1578#define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g) 1732#define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g)
1579#define IS_I915GM(dev) ((dev)->pci_device == 0x2592) 1733#define IS_I915GM(dev) ((dev)->pdev->device == 0x2592)
1580#define IS_I945G(dev) ((dev)->pci_device == 0x2772) 1734#define IS_I945G(dev) ((dev)->pdev->device == 0x2772)
1581#define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm) 1735#define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm)
1582#define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater) 1736#define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater)
1583#define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline) 1737#define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline)
1584#define IS_GM45(dev) ((dev)->pci_device == 0x2A42) 1738#define IS_GM45(dev) ((dev)->pdev->device == 0x2A42)
1585#define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x) 1739#define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x)
1586#define IS_PINEVIEW_G(dev) ((dev)->pci_device == 0xa001) 1740#define IS_PINEVIEW_G(dev) ((dev)->pdev->device == 0xa001)
1587#define IS_PINEVIEW_M(dev) ((dev)->pci_device == 0xa011) 1741#define IS_PINEVIEW_M(dev) ((dev)->pdev->device == 0xa011)
1588#define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview) 1742#define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview)
1589#define IS_G33(dev) (INTEL_INFO(dev)->is_g33) 1743#define IS_G33(dev) (INTEL_INFO(dev)->is_g33)
1590#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046) 1744#define IS_IRONLAKE_M(dev) ((dev)->pdev->device == 0x0046)
1591#define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge) 1745#define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge)
1592#define IS_IVB_GT1(dev) ((dev)->pci_device == 0x0156 || \ 1746#define IS_IVB_GT1(dev) ((dev)->pdev->device == 0x0156 || \
1593 (dev)->pci_device == 0x0152 || \ 1747 (dev)->pdev->device == 0x0152 || \
1594 (dev)->pci_device == 0x015a) 1748 (dev)->pdev->device == 0x015a)
1595#define IS_SNB_GT1(dev) ((dev)->pci_device == 0x0102 || \ 1749#define IS_SNB_GT1(dev) ((dev)->pdev->device == 0x0102 || \
1596 (dev)->pci_device == 0x0106 || \ 1750 (dev)->pdev->device == 0x0106 || \
1597 (dev)->pci_device == 0x010A) 1751 (dev)->pdev->device == 0x010A)
1598#define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview) 1752#define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview)
1599#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell) 1753#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell)
1754#define IS_BROADWELL(dev) (INTEL_INFO(dev)->gen == 8)
1600#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) 1755#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
1601#define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \ 1756#define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \
1602 ((dev)->pci_device & 0xFF00) == 0x0C00) 1757 ((dev)->pdev->device & 0xFF00) == 0x0C00)
1603#define IS_ULT(dev) (IS_HASWELL(dev) && \ 1758#define IS_ULT(dev) (IS_HASWELL(dev) && \
1604 ((dev)->pci_device & 0xFF00) == 0x0A00) 1759 ((dev)->pdev->device & 0xFF00) == 0x0A00)
1760#define IS_HSW_GT3(dev) (IS_HASWELL(dev) && \
1761 ((dev)->pdev->device & 0x00F0) == 0x0020)
1762#define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary)
1605 1763
1606/* 1764/*
1607 * The genX designation typically refers to the render engine, so render 1765 * The genX designation typically refers to the render engine, so render
@@ -1615,10 +1773,15 @@ struct drm_i915_file_private {
1615#define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5) 1773#define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5)
1616#define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6) 1774#define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6)
1617#define IS_GEN7(dev) (INTEL_INFO(dev)->gen == 7) 1775#define IS_GEN7(dev) (INTEL_INFO(dev)->gen == 7)
1618 1776#define IS_GEN8(dev) (INTEL_INFO(dev)->gen == 8)
1619#define HAS_BSD(dev) (INTEL_INFO(dev)->has_bsd_ring) 1777
1620#define HAS_BLT(dev) (INTEL_INFO(dev)->has_blt_ring) 1778#define RENDER_RING (1<<RCS)
1621#define HAS_VEBOX(dev) (INTEL_INFO(dev)->has_vebox_ring) 1779#define BSD_RING (1<<VCS)
1780#define BLT_RING (1<<BCS)
1781#define VEBOX_RING (1<<VECS)
1782#define HAS_BSD(dev) (INTEL_INFO(dev)->ring_mask & BSD_RING)
1783#define HAS_BLT(dev) (INTEL_INFO(dev)->ring_mask & BLT_RING)
1784#define HAS_VEBOX(dev) (INTEL_INFO(dev)->ring_mask & VEBOX_RING)
1622#define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc) 1785#define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc)
1623#define HAS_WT(dev) (IS_HASWELL(dev) && to_i915(dev)->ellc_size) 1786#define HAS_WT(dev) (IS_HASWELL(dev) && to_i915(dev)->ellc_size)
1624#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) 1787#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
@@ -1640,7 +1803,6 @@ struct drm_i915_file_private {
1640#define SUPPORTS_DIGITAL_OUTPUTS(dev) (!IS_GEN2(dev) && !IS_PINEVIEW(dev)) 1803#define SUPPORTS_DIGITAL_OUTPUTS(dev) (!IS_GEN2(dev) && !IS_PINEVIEW(dev))
1641#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_GEN5(dev)) 1804#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_GEN5(dev))
1642#define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_GEN5(dev)) 1805#define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_GEN5(dev))
1643#define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev))
1644#define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv) 1806#define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv)
1645#define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug) 1807#define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug)
1646 1808
@@ -1648,11 +1810,12 @@ struct drm_i915_file_private {
1648#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr) 1810#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
1649#define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc) 1811#define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
1650 1812
1651#define HAS_IPS(dev) (IS_ULT(dev)) 1813#define HAS_IPS(dev) (IS_ULT(dev) || IS_BROADWELL(dev))
1652 1814
1653#define HAS_DDI(dev) (INTEL_INFO(dev)->has_ddi) 1815#define HAS_DDI(dev) (INTEL_INFO(dev)->has_ddi)
1654#define HAS_POWER_WELL(dev) (IS_HASWELL(dev)) 1816#define HAS_POWER_WELL(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev))
1655#define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg) 1817#define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg)
1818#define HAS_PSR(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev))
1656 1819
1657#define INTEL_PCH_DEVICE_ID_MASK 0xff00 1820#define INTEL_PCH_DEVICE_ID_MASK 0xff00
1658#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00 1821#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
@@ -1668,35 +1831,14 @@ struct drm_i915_file_private {
1668#define HAS_PCH_NOP(dev) (INTEL_PCH_TYPE(dev) == PCH_NOP) 1831#define HAS_PCH_NOP(dev) (INTEL_PCH_TYPE(dev) == PCH_NOP)
1669#define HAS_PCH_SPLIT(dev) (INTEL_PCH_TYPE(dev) != PCH_NONE) 1832#define HAS_PCH_SPLIT(dev) (INTEL_PCH_TYPE(dev) != PCH_NONE)
1670 1833
1671#define HAS_FORCE_WAKE(dev) (INTEL_INFO(dev)->has_force_wake) 1834/* DPF == dynamic parity feature */
1672 1835#define HAS_L3_DPF(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
1673#define HAS_L3_GPU_CACHE(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) 1836#define NUM_L3_SLICES(dev) (IS_HSW_GT3(dev) ? 2 : HAS_L3_DPF(dev))
1674 1837
1675#define GT_FREQUENCY_MULTIPLIER 50 1838#define GT_FREQUENCY_MULTIPLIER 50
1676 1839
1677#include "i915_trace.h" 1840#include "i915_trace.h"
1678 1841
1679/**
1680 * RC6 is a special power stage which allows the GPU to enter an very
1681 * low-voltage mode when idle, using down to 0V while at this stage. This
1682 * stage is entered automatically when the GPU is idle when RC6 support is
1683 * enabled, and as soon as new workload arises GPU wakes up automatically as well.
1684 *
1685 * There are different RC6 modes available in Intel GPU, which differentiate
1686 * among each other with the latency required to enter and leave RC6 and
1687 * voltage consumed by the GPU in different states.
1688 *
1689 * The combination of the following flags define which states GPU is allowed
1690 * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and
1691 * RC6pp is deepest RC6. Their support by hardware varies according to the
1692 * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one
1693 * which brings the most power savings; deeper states save more power, but
1694 * require higher latency to switch to and wake up.
1695 */
1696#define INTEL_RC6_ENABLE (1<<0)
1697#define INTEL_RC6p_ENABLE (1<<1)
1698#define INTEL_RC6pp_ENABLE (1<<2)
1699
1700extern const struct drm_ioctl_desc i915_ioctls[]; 1842extern const struct drm_ioctl_desc i915_ioctls[];
1701extern int i915_max_ioctl; 1843extern int i915_max_ioctl;
1702extern unsigned int i915_fbpercrtc __always_unused; 1844extern unsigned int i915_fbpercrtc __always_unused;
@@ -1767,12 +1909,13 @@ extern void intel_uncore_early_sanitize(struct drm_device *dev);
1767extern void intel_uncore_init(struct drm_device *dev); 1909extern void intel_uncore_init(struct drm_device *dev);
1768extern void intel_uncore_clear_errors(struct drm_device *dev); 1910extern void intel_uncore_clear_errors(struct drm_device *dev);
1769extern void intel_uncore_check_errors(struct drm_device *dev); 1911extern void intel_uncore_check_errors(struct drm_device *dev);
1912extern void intel_uncore_fini(struct drm_device *dev);
1770 1913
1771void 1914void
1772i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask); 1915i915_enable_pipestat(drm_i915_private_t *dev_priv, enum pipe pipe, u32 mask);
1773 1916
1774void 1917void
1775i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask); 1918i915_disable_pipestat(drm_i915_private_t *dev_priv, enum pipe pipe, u32 mask);
1776 1919
1777/* i915_gem.c */ 1920/* i915_gem.c */
1778int i915_gem_init_ioctl(struct drm_device *dev, void *data, 1921int i915_gem_init_ioctl(struct drm_device *dev, void *data,
@@ -1824,14 +1967,11 @@ int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
1824void i915_gem_load(struct drm_device *dev); 1967void i915_gem_load(struct drm_device *dev);
1825void *i915_gem_object_alloc(struct drm_device *dev); 1968void *i915_gem_object_alloc(struct drm_device *dev);
1826void i915_gem_object_free(struct drm_i915_gem_object *obj); 1969void i915_gem_object_free(struct drm_i915_gem_object *obj);
1827int i915_gem_init_object(struct drm_gem_object *obj);
1828void i915_gem_object_init(struct drm_i915_gem_object *obj, 1970void i915_gem_object_init(struct drm_i915_gem_object *obj,
1829 const struct drm_i915_gem_object_ops *ops); 1971 const struct drm_i915_gem_object_ops *ops);
1830struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, 1972struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
1831 size_t size); 1973 size_t size);
1832void i915_gem_free_object(struct drm_gem_object *obj); 1974void i915_gem_free_object(struct drm_gem_object *obj);
1833struct i915_vma *i915_gem_vma_create(struct drm_i915_gem_object *obj,
1834 struct i915_address_space *vm);
1835void i915_gem_vma_destroy(struct i915_vma *vma); 1975void i915_gem_vma_destroy(struct i915_vma *vma);
1836 1976
1837int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj, 1977int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
@@ -1870,9 +2010,8 @@ static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
1870int __must_check i915_mutex_lock_interruptible(struct drm_device *dev); 2010int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
1871int i915_gem_object_sync(struct drm_i915_gem_object *obj, 2011int i915_gem_object_sync(struct drm_i915_gem_object *obj,
1872 struct intel_ring_buffer *to); 2012 struct intel_ring_buffer *to);
1873void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, 2013void i915_vma_move_to_active(struct i915_vma *vma,
1874 struct intel_ring_buffer *ring); 2014 struct intel_ring_buffer *ring);
1875
1876int i915_gem_dumb_create(struct drm_file *file_priv, 2015int i915_gem_dumb_create(struct drm_file *file_priv,
1877 struct drm_device *dev, 2016 struct drm_device *dev,
1878 struct drm_mode_create_dumb *args); 2017 struct drm_mode_create_dumb *args);
@@ -1913,7 +2052,7 @@ i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
1913 } 2052 }
1914} 2053}
1915 2054
1916void i915_gem_retire_requests(struct drm_device *dev); 2055bool i915_gem_retire_requests(struct drm_device *dev);
1917void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring); 2056void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring);
1918int __must_check i915_gem_check_wedge(struct i915_gpu_error *error, 2057int __must_check i915_gem_check_wedge(struct i915_gpu_error *error,
1919 bool interruptible); 2058 bool interruptible);
@@ -1933,11 +2072,11 @@ bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
1933int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj); 2072int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj);
1934int __must_check i915_gem_init(struct drm_device *dev); 2073int __must_check i915_gem_init(struct drm_device *dev);
1935int __must_check i915_gem_init_hw(struct drm_device *dev); 2074int __must_check i915_gem_init_hw(struct drm_device *dev);
1936void i915_gem_l3_remap(struct drm_device *dev); 2075int i915_gem_l3_remap(struct intel_ring_buffer *ring, int slice);
1937void i915_gem_init_swizzling(struct drm_device *dev); 2076void i915_gem_init_swizzling(struct drm_device *dev);
1938void i915_gem_cleanup_ringbuffer(struct drm_device *dev); 2077void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
1939int __must_check i915_gpu_idle(struct drm_device *dev); 2078int __must_check i915_gpu_idle(struct drm_device *dev);
1940int __must_check i915_gem_idle(struct drm_device *dev); 2079int __must_check i915_gem_suspend(struct drm_device *dev);
1941int __i915_add_request(struct intel_ring_buffer *ring, 2080int __i915_add_request(struct intel_ring_buffer *ring,
1942 struct drm_file *file, 2081 struct drm_file *file,
1943 struct drm_i915_gem_object *batch_obj, 2082 struct drm_i915_gem_object *batch_obj,
@@ -1964,6 +2103,7 @@ int i915_gem_attach_phys_object(struct drm_device *dev,
1964void i915_gem_detach_phys_object(struct drm_device *dev, 2103void i915_gem_detach_phys_object(struct drm_device *dev,
1965 struct drm_i915_gem_object *obj); 2104 struct drm_i915_gem_object *obj);
1966void i915_gem_free_all_phys_object(struct drm_device *dev); 2105void i915_gem_free_all_phys_object(struct drm_device *dev);
2106int i915_gem_open(struct drm_device *dev, struct drm_file *file);
1967void i915_gem_release(struct drm_device *dev, struct drm_file *file); 2107void i915_gem_release(struct drm_device *dev, struct drm_file *file);
1968 2108
1969uint32_t 2109uint32_t
@@ -1995,6 +2135,9 @@ struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
1995struct i915_vma * 2135struct i915_vma *
1996i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj, 2136i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
1997 struct i915_address_space *vm); 2137 struct i915_address_space *vm);
2138
2139struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj);
2140
1998/* Some GGTT VM helpers */ 2141/* Some GGTT VM helpers */
1999#define obj_to_ggtt(obj) \ 2142#define obj_to_ggtt(obj) \
2000 (&((struct drm_i915_private *)(obj)->base.dev->dev_private)->gtt.base) 2143 (&((struct drm_i915_private *)(obj)->base.dev->dev_private)->gtt.base)
@@ -2031,7 +2174,6 @@ i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj,
2031 return i915_gem_object_pin(obj, obj_to_ggtt(obj), alignment, 2174 return i915_gem_object_pin(obj, obj_to_ggtt(obj), alignment,
2032 map_and_fenceable, nonblocking); 2175 map_and_fenceable, nonblocking);
2033} 2176}
2034#undef obj_to_ggtt
2035 2177
2036/* i915_gem_context.c */ 2178/* i915_gem_context.c */
2037void i915_gem_context_init(struct drm_device *dev); 2179void i915_gem_context_init(struct drm_device *dev);
@@ -2094,6 +2236,7 @@ int __must_check i915_gem_evict_something(struct drm_device *dev,
2094 unsigned cache_level, 2236 unsigned cache_level,
2095 bool mappable, 2237 bool mappable,
2096 bool nonblock); 2238 bool nonblock);
2239int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);
2097int i915_gem_evict_everything(struct drm_device *dev); 2240int i915_gem_evict_everything(struct drm_device *dev);
2098 2241
2099/* i915_gem_stolen.c */ 2242/* i915_gem_stolen.c */
@@ -2133,6 +2276,11 @@ int i915_verify_lists(struct drm_device *dev);
2133/* i915_debugfs.c */ 2276/* i915_debugfs.c */
2134int i915_debugfs_init(struct drm_minor *minor); 2277int i915_debugfs_init(struct drm_minor *minor);
2135void i915_debugfs_cleanup(struct drm_minor *minor); 2278void i915_debugfs_cleanup(struct drm_minor *minor);
2279#ifdef CONFIG_DEBUG_FS
2280void intel_display_crc_init(struct drm_device *dev);
2281#else
2282static inline void intel_display_crc_init(struct drm_device *dev) {}
2283#endif
2136 2284
2137/* i915_gpu_error.c */ 2285/* i915_gpu_error.c */
2138__printf(2, 3) 2286__printf(2, 3)
@@ -2186,15 +2334,30 @@ static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
2186extern void intel_i2c_reset(struct drm_device *dev); 2334extern void intel_i2c_reset(struct drm_device *dev);
2187 2335
2188/* intel_opregion.c */ 2336/* intel_opregion.c */
2337struct intel_encoder;
2189extern int intel_opregion_setup(struct drm_device *dev); 2338extern int intel_opregion_setup(struct drm_device *dev);
2190#ifdef CONFIG_ACPI 2339#ifdef CONFIG_ACPI
2191extern void intel_opregion_init(struct drm_device *dev); 2340extern void intel_opregion_init(struct drm_device *dev);
2192extern void intel_opregion_fini(struct drm_device *dev); 2341extern void intel_opregion_fini(struct drm_device *dev);
2193extern void intel_opregion_asle_intr(struct drm_device *dev); 2342extern void intel_opregion_asle_intr(struct drm_device *dev);
2343extern int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
2344 bool enable);
2345extern int intel_opregion_notify_adapter(struct drm_device *dev,
2346 pci_power_t state);
2194#else 2347#else
2195static inline void intel_opregion_init(struct drm_device *dev) { return; } 2348static inline void intel_opregion_init(struct drm_device *dev) { return; }
2196static inline void intel_opregion_fini(struct drm_device *dev) { return; } 2349static inline void intel_opregion_fini(struct drm_device *dev) { return; }
2197static inline void intel_opregion_asle_intr(struct drm_device *dev) { return; } 2350static inline void intel_opregion_asle_intr(struct drm_device *dev) { return; }
2351static inline int
2352intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, bool enable)
2353{
2354 return 0;
2355}
2356static inline int
2357intel_opregion_notify_adapter(struct drm_device *dev, pci_power_t state)
2358{
2359 return 0;
2360}
2198#endif 2361#endif
2199 2362
2200/* intel_acpi.c */ 2363/* intel_acpi.c */
@@ -2256,8 +2419,16 @@ int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val)
2256u32 vlv_punit_read(struct drm_i915_private *dev_priv, u8 addr); 2419u32 vlv_punit_read(struct drm_i915_private *dev_priv, u8 addr);
2257void vlv_punit_write(struct drm_i915_private *dev_priv, u8 addr, u32 val); 2420void vlv_punit_write(struct drm_i915_private *dev_priv, u8 addr, u32 val);
2258u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr); 2421u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr);
2259u32 vlv_dpio_read(struct drm_i915_private *dev_priv, int reg); 2422u32 vlv_gpio_nc_read(struct drm_i915_private *dev_priv, u32 reg);
2260void vlv_dpio_write(struct drm_i915_private *dev_priv, int reg, u32 val); 2423void vlv_gpio_nc_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
2424u32 vlv_cck_read(struct drm_i915_private *dev_priv, u32 reg);
2425void vlv_cck_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
2426u32 vlv_ccu_read(struct drm_i915_private *dev_priv, u32 reg);
2427void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
2428u32 vlv_gps_core_read(struct drm_i915_private *dev_priv, u32 reg);
2429void vlv_gps_core_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
2430u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum pipe pipe, int reg);
2431void vlv_dpio_write(struct drm_i915_private *dev_priv, enum pipe pipe, int reg, u32 val);
2261u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg, 2432u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
2262 enum intel_sbi_destination destination); 2433 enum intel_sbi_destination destination);
2263void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value, 2434void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
@@ -2266,37 +2437,21 @@ void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
2266int vlv_gpu_freq(int ddr_freq, int val); 2437int vlv_gpu_freq(int ddr_freq, int val);
2267int vlv_freq_opcode(int ddr_freq, int val); 2438int vlv_freq_opcode(int ddr_freq, int val);
2268 2439
2269#define __i915_read(x) \ 2440#define I915_READ8(reg) dev_priv->uncore.funcs.mmio_readb(dev_priv, (reg), true)
2270 u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg, bool trace); 2441#define I915_WRITE8(reg, val) dev_priv->uncore.funcs.mmio_writeb(dev_priv, (reg), (val), true)
2271__i915_read(8) 2442
2272__i915_read(16) 2443#define I915_READ16(reg) dev_priv->uncore.funcs.mmio_readw(dev_priv, (reg), true)
2273__i915_read(32) 2444#define I915_WRITE16(reg, val) dev_priv->uncore.funcs.mmio_writew(dev_priv, (reg), (val), true)
2274__i915_read(64) 2445#define I915_READ16_NOTRACE(reg) dev_priv->uncore.funcs.mmio_readw(dev_priv, (reg), false)
2275#undef __i915_read 2446#define I915_WRITE16_NOTRACE(reg, val) dev_priv->uncore.funcs.mmio_writew(dev_priv, (reg), (val), false)
2276 2447
2277#define __i915_write(x) \ 2448#define I915_READ(reg) dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), true)
2278 void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val, bool trace); 2449#define I915_WRITE(reg, val) dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), true)
2279__i915_write(8) 2450#define I915_READ_NOTRACE(reg) dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), false)
2280__i915_write(16) 2451#define I915_WRITE_NOTRACE(reg, val) dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), false)
2281__i915_write(32) 2452
2282__i915_write(64) 2453#define I915_WRITE64(reg, val) dev_priv->uncore.funcs.mmio_writeq(dev_priv, (reg), (val), true)
2283#undef __i915_write 2454#define I915_READ64(reg) dev_priv->uncore.funcs.mmio_readq(dev_priv, (reg), true)
2284
2285#define I915_READ8(reg) i915_read8(dev_priv, (reg), true)
2286#define I915_WRITE8(reg, val) i915_write8(dev_priv, (reg), (val), true)
2287
2288#define I915_READ16(reg) i915_read16(dev_priv, (reg), true)
2289#define I915_WRITE16(reg, val) i915_write16(dev_priv, (reg), (val), true)
2290#define I915_READ16_NOTRACE(reg) i915_read16(dev_priv, (reg), false)
2291#define I915_WRITE16_NOTRACE(reg, val) i915_write16(dev_priv, (reg), (val), false)
2292
2293#define I915_READ(reg) i915_read32(dev_priv, (reg), true)
2294#define I915_WRITE(reg, val) i915_write32(dev_priv, (reg), (val), true)
2295#define I915_READ_NOTRACE(reg) i915_read32(dev_priv, (reg), false)
2296#define I915_WRITE_NOTRACE(reg, val) i915_write32(dev_priv, (reg), (val), false)
2297
2298#define I915_WRITE64(reg, val) i915_write64(dev_priv, (reg), (val), true)
2299#define I915_READ64(reg) i915_read64(dev_priv, (reg), true)
2300 2455
2301#define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg) 2456#define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg)
2302#define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg) 2457#define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index cdfb9da0e4ce..12bbd5eac70d 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -41,6 +41,9 @@ static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *o
41static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj, 41static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
42 bool force); 42 bool force);
43static __must_check int 43static __must_check int
44i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
45 bool readonly);
46static __must_check int
44i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj, 47i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
45 struct i915_address_space *vm, 48 struct i915_address_space *vm,
46 unsigned alignment, 49 unsigned alignment,
@@ -61,8 +64,8 @@ static unsigned long i915_gem_inactive_count(struct shrinker *shrinker,
61 struct shrink_control *sc); 64 struct shrink_control *sc);
62static unsigned long i915_gem_inactive_scan(struct shrinker *shrinker, 65static unsigned long i915_gem_inactive_scan(struct shrinker *shrinker,
63 struct shrink_control *sc); 66 struct shrink_control *sc);
64static long i915_gem_purge(struct drm_i915_private *dev_priv, long target); 67static unsigned long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
65static long i915_gem_shrink_all(struct drm_i915_private *dev_priv); 68static unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
66static void i915_gem_object_truncate(struct drm_i915_gem_object *obj); 69static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
67 70
68static bool cpu_cache_is_coherent(struct drm_device *dev, 71static bool cpu_cache_is_coherent(struct drm_device *dev,
@@ -258,7 +261,7 @@ i915_gem_dumb_create(struct drm_file *file,
258 struct drm_mode_create_dumb *args) 261 struct drm_mode_create_dumb *args)
259{ 262{
260 /* have to work out size/pitch and return them */ 263 /* have to work out size/pitch and return them */
261 args->pitch = ALIGN(args->width * ((args->bpp + 7) / 8), 64); 264 args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
262 args->size = args->pitch * args->height; 265 args->size = args->pitch * args->height;
263 return i915_gem_create(file, dev, 266 return i915_gem_create(file, dev,
264 args->size, &args->handle); 267 args->size, &args->handle);
@@ -432,11 +435,9 @@ i915_gem_shmem_pread(struct drm_device *dev,
432 * optimizes for the case when the gpu will dirty the data 435 * optimizes for the case when the gpu will dirty the data
433 * anyway again before the next pread happens. */ 436 * anyway again before the next pread happens. */
434 needs_clflush = !cpu_cache_is_coherent(dev, obj->cache_level); 437 needs_clflush = !cpu_cache_is_coherent(dev, obj->cache_level);
435 if (i915_gem_obj_bound_any(obj)) { 438 ret = i915_gem_object_wait_rendering(obj, true);
436 ret = i915_gem_object_set_to_gtt_domain(obj, false); 439 if (ret)
437 if (ret) 440 return ret;
438 return ret;
439 }
440 } 441 }
441 442
442 ret = i915_gem_object_get_pages(obj); 443 ret = i915_gem_object_get_pages(obj);
@@ -748,11 +749,9 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
748 * optimizes for the case when the gpu will use the data 749 * optimizes for the case when the gpu will use the data
749 * right away and we therefore have to clflush anyway. */ 750 * right away and we therefore have to clflush anyway. */
750 needs_clflush_after = cpu_write_needs_clflush(obj); 751 needs_clflush_after = cpu_write_needs_clflush(obj);
751 if (i915_gem_obj_bound_any(obj)) { 752 ret = i915_gem_object_wait_rendering(obj, false);
752 ret = i915_gem_object_set_to_gtt_domain(obj, true); 753 if (ret)
753 if (ret) 754 return ret;
754 return ret;
755 }
756 } 755 }
757 /* Same trick applies to invalidate partially written cachelines read 756 /* Same trick applies to invalidate partially written cachelines read
758 * before writing. */ 757 * before writing. */
@@ -966,12 +965,31 @@ i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
966 BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex)); 965 BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex));
967 966
968 ret = 0; 967 ret = 0;
969 if (seqno == ring->outstanding_lazy_request) 968 if (seqno == ring->outstanding_lazy_seqno)
970 ret = i915_add_request(ring, NULL); 969 ret = i915_add_request(ring, NULL);
971 970
972 return ret; 971 return ret;
973} 972}
974 973
974static void fake_irq(unsigned long data)
975{
976 wake_up_process((struct task_struct *)data);
977}
978
979static bool missed_irq(struct drm_i915_private *dev_priv,
980 struct intel_ring_buffer *ring)
981{
982 return test_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings);
983}
984
985static bool can_wait_boost(struct drm_i915_file_private *file_priv)
986{
987 if (file_priv == NULL)
988 return true;
989
990 return !atomic_xchg(&file_priv->rps_wait_boost, true);
991}
992
975/** 993/**
976 * __wait_seqno - wait until execution of seqno has finished 994 * __wait_seqno - wait until execution of seqno has finished
977 * @ring: the ring expected to report seqno 995 * @ring: the ring expected to report seqno
@@ -992,13 +1010,14 @@ i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
992 */ 1010 */
993static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno, 1011static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
994 unsigned reset_counter, 1012 unsigned reset_counter,
995 bool interruptible, struct timespec *timeout) 1013 bool interruptible,
1014 struct timespec *timeout,
1015 struct drm_i915_file_private *file_priv)
996{ 1016{
997 drm_i915_private_t *dev_priv = ring->dev->dev_private; 1017 drm_i915_private_t *dev_priv = ring->dev->dev_private;
998 struct timespec before, now, wait_time={1,0}; 1018 struct timespec before, now;
999 unsigned long timeout_jiffies; 1019 DEFINE_WAIT(wait);
1000 long end; 1020 long timeout_jiffies;
1001 bool wait_forever = true;
1002 int ret; 1021 int ret;
1003 1022
1004 WARN(dev_priv->pc8.irqs_disabled, "IRQs disabled\n"); 1023 WARN(dev_priv->pc8.irqs_disabled, "IRQs disabled\n");
@@ -1006,51 +1025,79 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
1006 if (i915_seqno_passed(ring->get_seqno(ring, true), seqno)) 1025 if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
1007 return 0; 1026 return 0;
1008 1027
1009 trace_i915_gem_request_wait_begin(ring, seqno); 1028 timeout_jiffies = timeout ? timespec_to_jiffies_timeout(timeout) : 1;
1010 1029
1011 if (timeout != NULL) { 1030 if (dev_priv->info->gen >= 6 && can_wait_boost(file_priv)) {
1012 wait_time = *timeout; 1031 gen6_rps_boost(dev_priv);
1013 wait_forever = false; 1032 if (file_priv)
1033 mod_delayed_work(dev_priv->wq,
1034 &file_priv->mm.idle_work,
1035 msecs_to_jiffies(100));
1014 } 1036 }
1015 1037
1016 timeout_jiffies = timespec_to_jiffies_timeout(&wait_time); 1038 if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring)) &&
1017 1039 WARN_ON(!ring->irq_get(ring)))
1018 if (WARN_ON(!ring->irq_get(ring)))
1019 return -ENODEV; 1040 return -ENODEV;
1020 1041
1021 /* Record current time in case interrupted by signal, or wedged * */ 1042 /* Record current time in case interrupted by signal, or wedged */
1043 trace_i915_gem_request_wait_begin(ring, seqno);
1022 getrawmonotonic(&before); 1044 getrawmonotonic(&before);
1045 for (;;) {
1046 struct timer_list timer;
1047 unsigned long expire;
1023 1048
1024#define EXIT_COND \ 1049 prepare_to_wait(&ring->irq_queue, &wait,
1025 (i915_seqno_passed(ring->get_seqno(ring, false), seqno) || \ 1050 interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
1026 i915_reset_in_progress(&dev_priv->gpu_error) || \
1027 reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
1028 do {
1029 if (interruptible)
1030 end = wait_event_interruptible_timeout(ring->irq_queue,
1031 EXIT_COND,
1032 timeout_jiffies);
1033 else
1034 end = wait_event_timeout(ring->irq_queue, EXIT_COND,
1035 timeout_jiffies);
1036 1051
1037 /* We need to check whether any gpu reset happened in between 1052 /* We need to check whether any gpu reset happened in between
1038 * the caller grabbing the seqno and now ... */ 1053 * the caller grabbing the seqno and now ... */
1039 if (reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) 1054 if (reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) {
1040 end = -EAGAIN; 1055 /* ... but upgrade the -EAGAIN to an -EIO if the gpu
1056 * is truely gone. */
1057 ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
1058 if (ret == 0)
1059 ret = -EAGAIN;
1060 break;
1061 }
1041 1062
1042 /* ... but upgrade the -EGAIN to an -EIO if the gpu is truely 1063 if (i915_seqno_passed(ring->get_seqno(ring, false), seqno)) {
1043 * gone. */ 1064 ret = 0;
1044 ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible); 1065 break;
1045 if (ret) 1066 }
1046 end = ret;
1047 } while (end == 0 && wait_forever);
1048 1067
1068 if (interruptible && signal_pending(current)) {
1069 ret = -ERESTARTSYS;
1070 break;
1071 }
1072
1073 if (timeout_jiffies <= 0) {
1074 ret = -ETIME;
1075 break;
1076 }
1077
1078 timer.function = NULL;
1079 if (timeout || missed_irq(dev_priv, ring)) {
1080 setup_timer_on_stack(&timer, fake_irq, (unsigned long)current);
1081 expire = jiffies + (missed_irq(dev_priv, ring) ? 1: timeout_jiffies);
1082 mod_timer(&timer, expire);
1083 }
1084
1085 io_schedule();
1086
1087 if (timeout)
1088 timeout_jiffies = expire - jiffies;
1089
1090 if (timer.function) {
1091 del_singleshot_timer_sync(&timer);
1092 destroy_timer_on_stack(&timer);
1093 }
1094 }
1049 getrawmonotonic(&now); 1095 getrawmonotonic(&now);
1096 trace_i915_gem_request_wait_end(ring, seqno);
1050 1097
1051 ring->irq_put(ring); 1098 ring->irq_put(ring);
1052 trace_i915_gem_request_wait_end(ring, seqno); 1099
1053#undef EXIT_COND 1100 finish_wait(&ring->irq_queue, &wait);
1054 1101
1055 if (timeout) { 1102 if (timeout) {
1056 struct timespec sleep_time = timespec_sub(now, before); 1103 struct timespec sleep_time = timespec_sub(now, before);
@@ -1059,17 +1106,7 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
1059 set_normalized_timespec(timeout, 0, 0); 1106 set_normalized_timespec(timeout, 0, 0);
1060 } 1107 }
1061 1108
1062 switch (end) { 1109 return ret;
1063 case -EIO:
1064 case -EAGAIN: /* Wedged */
1065 case -ERESTARTSYS: /* Signal */
1066 return (int)end;
1067 case 0: /* Timeout */
1068 return -ETIME;
1069 default: /* Completed */
1070 WARN_ON(end < 0); /* We're not aware of other errors */
1071 return 0;
1072 }
1073} 1110}
1074 1111
1075/** 1112/**
@@ -1097,7 +1134,7 @@ i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
1097 1134
1098 return __wait_seqno(ring, seqno, 1135 return __wait_seqno(ring, seqno,
1099 atomic_read(&dev_priv->gpu_error.reset_counter), 1136 atomic_read(&dev_priv->gpu_error.reset_counter),
1100 interruptible, NULL); 1137 interruptible, NULL, NULL);
1101} 1138}
1102 1139
1103static int 1140static int
@@ -1147,6 +1184,7 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
1147 */ 1184 */
1148static __must_check int 1185static __must_check int
1149i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj, 1186i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
1187 struct drm_file *file,
1150 bool readonly) 1188 bool readonly)
1151{ 1189{
1152 struct drm_device *dev = obj->base.dev; 1190 struct drm_device *dev = obj->base.dev;
@@ -1173,7 +1211,7 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
1173 1211
1174 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); 1212 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
1175 mutex_unlock(&dev->struct_mutex); 1213 mutex_unlock(&dev->struct_mutex);
1176 ret = __wait_seqno(ring, seqno, reset_counter, true, NULL); 1214 ret = __wait_seqno(ring, seqno, reset_counter, true, NULL, file->driver_priv);
1177 mutex_lock(&dev->struct_mutex); 1215 mutex_lock(&dev->struct_mutex);
1178 if (ret) 1216 if (ret)
1179 return ret; 1217 return ret;
@@ -1222,7 +1260,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1222 * We will repeat the flush holding the lock in the normal manner 1260 * We will repeat the flush holding the lock in the normal manner
1223 * to catch cases where we are gazumped. 1261 * to catch cases where we are gazumped.
1224 */ 1262 */
1225 ret = i915_gem_object_wait_rendering__nonblocking(obj, !write_domain); 1263 ret = i915_gem_object_wait_rendering__nonblocking(obj, file, !write_domain);
1226 if (ret) 1264 if (ret)
1227 goto unref; 1265 goto unref;
1228 1266
@@ -1690,13 +1728,13 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
1690 return 0; 1728 return 0;
1691} 1729}
1692 1730
1693static long 1731static unsigned long
1694__i915_gem_shrink(struct drm_i915_private *dev_priv, long target, 1732__i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
1695 bool purgeable_only) 1733 bool purgeable_only)
1696{ 1734{
1697 struct list_head still_bound_list; 1735 struct list_head still_bound_list;
1698 struct drm_i915_gem_object *obj, *next; 1736 struct drm_i915_gem_object *obj, *next;
1699 long count = 0; 1737 unsigned long count = 0;
1700 1738
1701 list_for_each_entry_safe(obj, next, 1739 list_for_each_entry_safe(obj, next,
1702 &dev_priv->mm.unbound_list, 1740 &dev_priv->mm.unbound_list,
@@ -1762,13 +1800,13 @@ __i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
1762 return count; 1800 return count;
1763} 1801}
1764 1802
1765static long 1803static unsigned long
1766i915_gem_purge(struct drm_i915_private *dev_priv, long target) 1804i915_gem_purge(struct drm_i915_private *dev_priv, long target)
1767{ 1805{
1768 return __i915_gem_shrink(dev_priv, target, true); 1806 return __i915_gem_shrink(dev_priv, target, true);
1769} 1807}
1770 1808
1771static long 1809static unsigned long
1772i915_gem_shrink_all(struct drm_i915_private *dev_priv) 1810i915_gem_shrink_all(struct drm_i915_private *dev_priv)
1773{ 1811{
1774 struct drm_i915_gem_object *obj, *next; 1812 struct drm_i915_gem_object *obj, *next;
@@ -1778,9 +1816,8 @@ i915_gem_shrink_all(struct drm_i915_private *dev_priv)
1778 1816
1779 list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list, 1817 list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list,
1780 global_list) { 1818 global_list) {
1781 if (obj->pages_pin_count == 0) 1819 if (i915_gem_object_put_pages(obj) == 0)
1782 freed += obj->base.size >> PAGE_SHIFT; 1820 freed += obj->base.size >> PAGE_SHIFT;
1783 i915_gem_object_put_pages(obj);
1784 } 1821 }
1785 return freed; 1822 return freed;
1786} 1823}
@@ -1865,6 +1902,9 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
1865 sg->length += PAGE_SIZE; 1902 sg->length += PAGE_SIZE;
1866 } 1903 }
1867 last_pfn = page_to_pfn(page); 1904 last_pfn = page_to_pfn(page);
1905
1906 /* Check that the i965g/gm workaround works. */
1907 WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
1868 } 1908 }
1869#ifdef CONFIG_SWIOTLB 1909#ifdef CONFIG_SWIOTLB
1870 if (!swiotlb_nr_tbl()) 1910 if (!swiotlb_nr_tbl())
@@ -1918,7 +1958,7 @@ i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
1918 return 0; 1958 return 0;
1919} 1959}
1920 1960
1921void 1961static void
1922i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, 1962i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
1923 struct intel_ring_buffer *ring) 1963 struct intel_ring_buffer *ring)
1924{ 1964{
@@ -1957,6 +1997,13 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
1957 } 1997 }
1958} 1998}
1959 1999
2000void i915_vma_move_to_active(struct i915_vma *vma,
2001 struct intel_ring_buffer *ring)
2002{
2003 list_move_tail(&vma->mm_list, &vma->vm->active_list);
2004 return i915_gem_object_move_to_active(vma->obj, ring);
2005}
2006
1960static void 2007static void
1961i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj) 2008i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
1962{ 2009{
@@ -2078,11 +2125,10 @@ int __i915_add_request(struct intel_ring_buffer *ring,
2078 if (ret) 2125 if (ret)
2079 return ret; 2126 return ret;
2080 2127
2081 request = kmalloc(sizeof(*request), GFP_KERNEL); 2128 request = ring->preallocated_lazy_request;
2082 if (request == NULL) 2129 if (WARN_ON(request == NULL))
2083 return -ENOMEM; 2130 return -ENOMEM;
2084 2131
2085
2086 /* Record the position of the start of the request so that 2132 /* Record the position of the start of the request so that
2087 * should we detect the updated seqno part-way through the 2133 * should we detect the updated seqno part-way through the
2088 * GPU processing the request, we never over-estimate the 2134 * GPU processing the request, we never over-estimate the
@@ -2091,17 +2137,13 @@ int __i915_add_request(struct intel_ring_buffer *ring,
2091 request_ring_position = intel_ring_get_tail(ring); 2137 request_ring_position = intel_ring_get_tail(ring);
2092 2138
2093 ret = ring->add_request(ring); 2139 ret = ring->add_request(ring);
2094 if (ret) { 2140 if (ret)
2095 kfree(request);
2096 return ret; 2141 return ret;
2097 }
2098 2142
2099 request->seqno = intel_ring_get_seqno(ring); 2143 request->seqno = intel_ring_get_seqno(ring);
2100 request->ring = ring; 2144 request->ring = ring;
2101 request->head = request_start; 2145 request->head = request_start;
2102 request->tail = request_ring_position; 2146 request->tail = request_ring_position;
2103 request->ctx = ring->last_context;
2104 request->batch_obj = obj;
2105 2147
2106 /* Whilst this request exists, batch_obj will be on the 2148 /* Whilst this request exists, batch_obj will be on the
2107 * active_list, and so will hold the active reference. Only when this 2149 * active_list, and so will hold the active reference. Only when this
@@ -2109,7 +2151,12 @@ int __i915_add_request(struct intel_ring_buffer *ring,
2109 * inactive_list and lose its active reference. Hence we do not need 2151 * inactive_list and lose its active reference. Hence we do not need
2110 * to explicitly hold another reference here. 2152 * to explicitly hold another reference here.
2111 */ 2153 */
2154 request->batch_obj = obj;
2112 2155
2156 /* Hold a reference to the current context so that we can inspect
2157 * it later in case a hangcheck error event fires.
2158 */
2159 request->ctx = ring->last_context;
2113 if (request->ctx) 2160 if (request->ctx)
2114 i915_gem_context_reference(request->ctx); 2161 i915_gem_context_reference(request->ctx);
2115 2162
@@ -2129,12 +2176,14 @@ int __i915_add_request(struct intel_ring_buffer *ring,
2129 } 2176 }
2130 2177
2131 trace_i915_gem_request_add(ring, request->seqno); 2178 trace_i915_gem_request_add(ring, request->seqno);
2132 ring->outstanding_lazy_request = 0; 2179 ring->outstanding_lazy_seqno = 0;
2180 ring->preallocated_lazy_request = NULL;
2133 2181
2134 if (!dev_priv->ums.mm_suspended) { 2182 if (!dev_priv->ums.mm_suspended) {
2135 i915_queue_hangcheck(ring->dev); 2183 i915_queue_hangcheck(ring->dev);
2136 2184
2137 if (was_empty) { 2185 if (was_empty) {
2186 cancel_delayed_work_sync(&dev_priv->mm.idle_work);
2138 queue_delayed_work(dev_priv->wq, 2187 queue_delayed_work(dev_priv->wq,
2139 &dev_priv->mm.retire_work, 2188 &dev_priv->mm.retire_work,
2140 round_jiffies_up_relative(HZ)); 2189 round_jiffies_up_relative(HZ));
@@ -2156,10 +2205,8 @@ i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
2156 return; 2205 return;
2157 2206
2158 spin_lock(&file_priv->mm.lock); 2207 spin_lock(&file_priv->mm.lock);
2159 if (request->file_priv) { 2208 list_del(&request->client_list);
2160 list_del(&request->client_list); 2209 request->file_priv = NULL;
2161 request->file_priv = NULL;
2162 }
2163 spin_unlock(&file_priv->mm.lock); 2210 spin_unlock(&file_priv->mm.lock);
2164} 2211}
2165 2212
@@ -2224,6 +2271,21 @@ static bool i915_request_guilty(struct drm_i915_gem_request *request,
2224 return false; 2271 return false;
2225} 2272}
2226 2273
2274static bool i915_context_is_banned(const struct i915_ctx_hang_stats *hs)
2275{
2276 const unsigned long elapsed = get_seconds() - hs->guilty_ts;
2277
2278 if (hs->banned)
2279 return true;
2280
2281 if (elapsed <= DRM_I915_CTX_BAN_PERIOD) {
2282 DRM_ERROR("context hanging too fast, declaring banned!\n");
2283 return true;
2284 }
2285
2286 return false;
2287}
2288
2227static void i915_set_reset_status(struct intel_ring_buffer *ring, 2289static void i915_set_reset_status(struct intel_ring_buffer *ring,
2228 struct drm_i915_gem_request *request, 2290 struct drm_i915_gem_request *request,
2229 u32 acthd) 2291 u32 acthd)
@@ -2260,10 +2322,13 @@ static void i915_set_reset_status(struct intel_ring_buffer *ring,
2260 hs = &request->file_priv->hang_stats; 2322 hs = &request->file_priv->hang_stats;
2261 2323
2262 if (hs) { 2324 if (hs) {
2263 if (guilty) 2325 if (guilty) {
2326 hs->banned = i915_context_is_banned(hs);
2264 hs->batch_active++; 2327 hs->batch_active++;
2265 else 2328 hs->guilty_ts = get_seconds();
2329 } else {
2266 hs->batch_pending++; 2330 hs->batch_pending++;
2331 }
2267 } 2332 }
2268} 2333}
2269 2334
@@ -2341,6 +2406,8 @@ void i915_gem_reset(struct drm_device *dev)
2341 for_each_ring(ring, dev_priv, i) 2406 for_each_ring(ring, dev_priv, i)
2342 i915_gem_reset_ring_lists(dev_priv, ring); 2407 i915_gem_reset_ring_lists(dev_priv, ring);
2343 2408
2409 i915_gem_cleanup_ringbuffer(dev);
2410
2344 i915_gem_restore_fences(dev); 2411 i915_gem_restore_fences(dev);
2345} 2412}
2346 2413
@@ -2405,57 +2472,53 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
2405 WARN_ON(i915_verify_lists(ring->dev)); 2472 WARN_ON(i915_verify_lists(ring->dev));
2406} 2473}
2407 2474
2408void 2475bool
2409i915_gem_retire_requests(struct drm_device *dev) 2476i915_gem_retire_requests(struct drm_device *dev)
2410{ 2477{
2411 drm_i915_private_t *dev_priv = dev->dev_private; 2478 drm_i915_private_t *dev_priv = dev->dev_private;
2412 struct intel_ring_buffer *ring; 2479 struct intel_ring_buffer *ring;
2480 bool idle = true;
2413 int i; 2481 int i;
2414 2482
2415 for_each_ring(ring, dev_priv, i) 2483 for_each_ring(ring, dev_priv, i) {
2416 i915_gem_retire_requests_ring(ring); 2484 i915_gem_retire_requests_ring(ring);
2485 idle &= list_empty(&ring->request_list);
2486 }
2487
2488 if (idle)
2489 mod_delayed_work(dev_priv->wq,
2490 &dev_priv->mm.idle_work,
2491 msecs_to_jiffies(100));
2492
2493 return idle;
2417} 2494}
2418 2495
2419static void 2496static void
2420i915_gem_retire_work_handler(struct work_struct *work) 2497i915_gem_retire_work_handler(struct work_struct *work)
2421{ 2498{
2422 drm_i915_private_t *dev_priv; 2499 struct drm_i915_private *dev_priv =
2423 struct drm_device *dev; 2500 container_of(work, typeof(*dev_priv), mm.retire_work.work);
2424 struct intel_ring_buffer *ring; 2501 struct drm_device *dev = dev_priv->dev;
2425 bool idle; 2502 bool idle;
2426 int i;
2427
2428 dev_priv = container_of(work, drm_i915_private_t,
2429 mm.retire_work.work);
2430 dev = dev_priv->dev;
2431 2503
2432 /* Come back later if the device is busy... */ 2504 /* Come back later if the device is busy... */
2433 if (!mutex_trylock(&dev->struct_mutex)) { 2505 idle = false;
2434 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 2506 if (mutex_trylock(&dev->struct_mutex)) {
2435 round_jiffies_up_relative(HZ)); 2507 idle = i915_gem_retire_requests(dev);
2436 return; 2508 mutex_unlock(&dev->struct_mutex);
2437 }
2438
2439 i915_gem_retire_requests(dev);
2440
2441 /* Send a periodic flush down the ring so we don't hold onto GEM
2442 * objects indefinitely.
2443 */
2444 idle = true;
2445 for_each_ring(ring, dev_priv, i) {
2446 if (ring->gpu_caches_dirty)
2447 i915_add_request(ring, NULL);
2448
2449 idle &= list_empty(&ring->request_list);
2450 } 2509 }
2451 2510 if (!idle)
2452 if (!dev_priv->ums.mm_suspended && !idle)
2453 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 2511 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
2454 round_jiffies_up_relative(HZ)); 2512 round_jiffies_up_relative(HZ));
2455 if (idle) 2513}
2456 intel_mark_idle(dev);
2457 2514
2458 mutex_unlock(&dev->struct_mutex); 2515static void
2516i915_gem_idle_work_handler(struct work_struct *work)
2517{
2518 struct drm_i915_private *dev_priv =
2519 container_of(work, typeof(*dev_priv), mm.idle_work.work);
2520
2521 intel_mark_idle(dev_priv->dev);
2459} 2522}
2460 2523
2461/** 2524/**
@@ -2553,7 +2616,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2553 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); 2616 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
2554 mutex_unlock(&dev->struct_mutex); 2617 mutex_unlock(&dev->struct_mutex);
2555 2618
2556 ret = __wait_seqno(ring, seqno, reset_counter, true, timeout); 2619 ret = __wait_seqno(ring, seqno, reset_counter, true, timeout, file->driver_priv);
2557 if (timeout) 2620 if (timeout)
2558 args->timeout_ns = timespec_to_ns(timeout); 2621 args->timeout_ns = timespec_to_ns(timeout);
2559 return ret; 2622 return ret;
@@ -2600,6 +2663,7 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj,
2600 if (ret) 2663 if (ret)
2601 return ret; 2664 return ret;
2602 2665
2666 trace_i915_gem_ring_sync_to(from, to, seqno);
2603 ret = to->sync_to(to, from, seqno); 2667 ret = to->sync_to(to, from, seqno);
2604 if (!ret) 2668 if (!ret)
2605 /* We use last_read_seqno because sync_to() 2669 /* We use last_read_seqno because sync_to()
@@ -2641,11 +2705,17 @@ int i915_vma_unbind(struct i915_vma *vma)
2641 drm_i915_private_t *dev_priv = obj->base.dev->dev_private; 2705 drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
2642 int ret; 2706 int ret;
2643 2707
2708 /* For now we only ever use 1 vma per object */
2709 WARN_ON(!list_is_singular(&obj->vma_list));
2710
2644 if (list_empty(&vma->vma_link)) 2711 if (list_empty(&vma->vma_link))
2645 return 0; 2712 return 0;
2646 2713
2647 if (!drm_mm_node_allocated(&vma->node)) 2714 if (!drm_mm_node_allocated(&vma->node)) {
2648 goto destroy; 2715 i915_gem_vma_destroy(vma);
2716
2717 return 0;
2718 }
2649 2719
2650 if (obj->pin_count) 2720 if (obj->pin_count)
2651 return -EBUSY; 2721 return -EBUSY;
@@ -2685,13 +2755,10 @@ int i915_vma_unbind(struct i915_vma *vma)
2685 2755
2686 drm_mm_remove_node(&vma->node); 2756 drm_mm_remove_node(&vma->node);
2687 2757
2688destroy:
2689 i915_gem_vma_destroy(vma); 2758 i915_gem_vma_destroy(vma);
2690 2759
2691 /* Since the unbound list is global, only move to that list if 2760 /* Since the unbound list is global, only move to that list if
2692 * no more VMAs exist. 2761 * no more VMAs exist. */
2693 * NB: Until we have real VMAs there will only ever be one */
2694 WARN_ON(!list_empty(&obj->vma_list));
2695 if (list_empty(&obj->vma_list)) 2762 if (list_empty(&obj->vma_list))
2696 list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list); 2763 list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
2697 2764
@@ -2887,6 +2954,7 @@ static void i915_gem_write_fence(struct drm_device *dev, int reg,
2887 obj->stride, obj->tiling_mode); 2954 obj->stride, obj->tiling_mode);
2888 2955
2889 switch (INTEL_INFO(dev)->gen) { 2956 switch (INTEL_INFO(dev)->gen) {
2957 case 8:
2890 case 7: 2958 case 7:
2891 case 6: 2959 case 6:
2892 case 5: 2960 case 5:
@@ -3389,8 +3457,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
3389 3457
3390 /* And bump the LRU for this access */ 3458 /* And bump the LRU for this access */
3391 if (i915_gem_object_is_inactive(obj)) { 3459 if (i915_gem_object_is_inactive(obj)) {
3392 struct i915_vma *vma = i915_gem_obj_to_vma(obj, 3460 struct i915_vma *vma = i915_gem_obj_to_ggtt(obj);
3393 &dev_priv->gtt.base);
3394 if (vma) 3461 if (vma)
3395 list_move_tail(&vma->mm_list, 3462 list_move_tail(&vma->mm_list,
3396 &dev_priv->gtt.base.inactive_list); 3463 &dev_priv->gtt.base.inactive_list);
@@ -3761,7 +3828,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
3761 if (seqno == 0) 3828 if (seqno == 0)
3762 return 0; 3829 return 0;
3763 3830
3764 ret = __wait_seqno(ring, seqno, reset_counter, true, NULL); 3831 ret = __wait_seqno(ring, seqno, reset_counter, true, NULL, NULL);
3765 if (ret == 0) 3832 if (ret == 0)
3766 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0); 3833 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
3767 3834
@@ -3865,6 +3932,11 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
3865 goto out; 3932 goto out;
3866 } 3933 }
3867 3934
3935 if (obj->user_pin_count == ULONG_MAX) {
3936 ret = -EBUSY;
3937 goto out;
3938 }
3939
3868 if (obj->user_pin_count == 0) { 3940 if (obj->user_pin_count == 0) {
3869 ret = i915_gem_obj_ggtt_pin(obj, args->alignment, true, false); 3941 ret = i915_gem_obj_ggtt_pin(obj, args->alignment, true, false);
3870 if (ret) 3942 if (ret)
@@ -4015,7 +4087,6 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
4015{ 4087{
4016 INIT_LIST_HEAD(&obj->global_list); 4088 INIT_LIST_HEAD(&obj->global_list);
4017 INIT_LIST_HEAD(&obj->ring_list); 4089 INIT_LIST_HEAD(&obj->ring_list);
4018 INIT_LIST_HEAD(&obj->exec_list);
4019 INIT_LIST_HEAD(&obj->obj_exec_link); 4090 INIT_LIST_HEAD(&obj->obj_exec_link);
4020 INIT_LIST_HEAD(&obj->vma_list); 4091 INIT_LIST_HEAD(&obj->vma_list);
4021 4092
@@ -4087,13 +4158,6 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
4087 return obj; 4158 return obj;
4088} 4159}
4089 4160
4090int i915_gem_init_object(struct drm_gem_object *obj)
4091{
4092 BUG();
4093
4094 return 0;
4095}
4096
4097void i915_gem_free_object(struct drm_gem_object *gem_obj) 4161void i915_gem_free_object(struct drm_gem_object *gem_obj)
4098{ 4162{
4099 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj); 4163 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
@@ -4147,9 +4211,20 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
4147 i915_gem_object_free(obj); 4211 i915_gem_object_free(obj);
4148} 4212}
4149 4213
4150struct i915_vma *i915_gem_vma_create(struct drm_i915_gem_object *obj, 4214struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
4151 struct i915_address_space *vm) 4215 struct i915_address_space *vm)
4152{ 4216{
4217 struct i915_vma *vma;
4218 list_for_each_entry(vma, &obj->vma_list, vma_link)
4219 if (vma->vm == vm)
4220 return vma;
4221
4222 return NULL;
4223}
4224
4225static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj,
4226 struct i915_address_space *vm)
4227{
4153 struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL); 4228 struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL);
4154 if (vma == NULL) 4229 if (vma == NULL)
4155 return ERR_PTR(-ENOMEM); 4230 return ERR_PTR(-ENOMEM);
@@ -4169,76 +4244,103 @@ struct i915_vma *i915_gem_vma_create(struct drm_i915_gem_object *obj,
4169 return vma; 4244 return vma;
4170} 4245}
4171 4246
4247struct i915_vma *
4248i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
4249 struct i915_address_space *vm)
4250{
4251 struct i915_vma *vma;
4252
4253 vma = i915_gem_obj_to_vma(obj, vm);
4254 if (!vma)
4255 vma = __i915_gem_vma_create(obj, vm);
4256
4257 return vma;
4258}
4259
4172void i915_gem_vma_destroy(struct i915_vma *vma) 4260void i915_gem_vma_destroy(struct i915_vma *vma)
4173{ 4261{
4174 WARN_ON(vma->node.allocated); 4262 WARN_ON(vma->node.allocated);
4263
4264 /* Keep the vma as a placeholder in the execbuffer reservation lists */
4265 if (!list_empty(&vma->exec_list))
4266 return;
4267
4175 list_del(&vma->vma_link); 4268 list_del(&vma->vma_link);
4269
4176 kfree(vma); 4270 kfree(vma);
4177} 4271}
4178 4272
4179int 4273int
4180i915_gem_idle(struct drm_device *dev) 4274i915_gem_suspend(struct drm_device *dev)
4181{ 4275{
4182 drm_i915_private_t *dev_priv = dev->dev_private; 4276 drm_i915_private_t *dev_priv = dev->dev_private;
4183 int ret; 4277 int ret = 0;
4184 4278
4185 if (dev_priv->ums.mm_suspended) { 4279 mutex_lock(&dev->struct_mutex);
4186 mutex_unlock(&dev->struct_mutex); 4280 if (dev_priv->ums.mm_suspended)
4187 return 0; 4281 goto err;
4188 }
4189 4282
4190 ret = i915_gpu_idle(dev); 4283 ret = i915_gpu_idle(dev);
4191 if (ret) { 4284 if (ret)
4192 mutex_unlock(&dev->struct_mutex); 4285 goto err;
4193 return ret; 4286
4194 }
4195 i915_gem_retire_requests(dev); 4287 i915_gem_retire_requests(dev);
4196 4288
4197 /* Under UMS, be paranoid and evict. */ 4289 /* Under UMS, be paranoid and evict. */
4198 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 4290 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4199 i915_gem_evict_everything(dev); 4291 i915_gem_evict_everything(dev);
4200 4292
4201 del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
4202
4203 i915_kernel_lost_context(dev); 4293 i915_kernel_lost_context(dev);
4204 i915_gem_cleanup_ringbuffer(dev); 4294 i915_gem_cleanup_ringbuffer(dev);
4205 4295
4206 /* Cancel the retire work handler, which should be idle now. */ 4296 /* Hack! Don't let anybody do execbuf while we don't control the chip.
4297 * We need to replace this with a semaphore, or something.
4298 * And not confound ums.mm_suspended!
4299 */
4300 dev_priv->ums.mm_suspended = !drm_core_check_feature(dev,
4301 DRIVER_MODESET);
4302 mutex_unlock(&dev->struct_mutex);
4303
4304 del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
4207 cancel_delayed_work_sync(&dev_priv->mm.retire_work); 4305 cancel_delayed_work_sync(&dev_priv->mm.retire_work);
4306 cancel_delayed_work_sync(&dev_priv->mm.idle_work);
4208 4307
4209 return 0; 4308 return 0;
4309
4310err:
4311 mutex_unlock(&dev->struct_mutex);
4312 return ret;
4210} 4313}
4211 4314
4212void i915_gem_l3_remap(struct drm_device *dev) 4315int i915_gem_l3_remap(struct intel_ring_buffer *ring, int slice)
4213{ 4316{
4317 struct drm_device *dev = ring->dev;
4214 drm_i915_private_t *dev_priv = dev->dev_private; 4318 drm_i915_private_t *dev_priv = dev->dev_private;
4215 u32 misccpctl; 4319 u32 reg_base = GEN7_L3LOG_BASE + (slice * 0x200);
4216 int i; 4320 u32 *remap_info = dev_priv->l3_parity.remap_info[slice];
4217 4321 int i, ret;
4218 if (!HAS_L3_GPU_CACHE(dev))
4219 return;
4220 4322
4221 if (!dev_priv->l3_parity.remap_info) 4323 if (!HAS_L3_DPF(dev) || !remap_info)
4222 return; 4324 return 0;
4223 4325
4224 misccpctl = I915_READ(GEN7_MISCCPCTL); 4326 ret = intel_ring_begin(ring, GEN7_L3LOG_SIZE / 4 * 3);
4225 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); 4327 if (ret)
4226 POSTING_READ(GEN7_MISCCPCTL); 4328 return ret;
4227 4329
4330 /*
4331 * Note: We do not worry about the concurrent register cacheline hang
4332 * here because no other code should access these registers other than
4333 * at initialization time.
4334 */
4228 for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) { 4335 for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) {
4229 u32 remap = I915_READ(GEN7_L3LOG_BASE + i); 4336 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
4230 if (remap && remap != dev_priv->l3_parity.remap_info[i/4]) 4337 intel_ring_emit(ring, reg_base + i);
4231 DRM_DEBUG("0x%x was already programmed to %x\n", 4338 intel_ring_emit(ring, remap_info[i/4]);
4232 GEN7_L3LOG_BASE + i, remap);
4233 if (remap && !dev_priv->l3_parity.remap_info[i/4])
4234 DRM_DEBUG_DRIVER("Clearing remapped register\n");
4235 I915_WRITE(GEN7_L3LOG_BASE + i, dev_priv->l3_parity.remap_info[i/4]);
4236 } 4339 }
4237 4340
4238 /* Make sure all the writes land before disabling dop clock gating */ 4341 intel_ring_advance(ring);
4239 POSTING_READ(GEN7_L3LOG_BASE);
4240 4342
4241 I915_WRITE(GEN7_MISCCPCTL, misccpctl); 4343 return ret;
4242} 4344}
4243 4345
4244void i915_gem_init_swizzling(struct drm_device *dev) 4346void i915_gem_init_swizzling(struct drm_device *dev)
@@ -4260,6 +4362,8 @@ void i915_gem_init_swizzling(struct drm_device *dev)
4260 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB)); 4362 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
4261 else if (IS_GEN7(dev)) 4363 else if (IS_GEN7(dev))
4262 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB)); 4364 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
4365 else if (IS_GEN8(dev))
4366 I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
4263 else 4367 else
4264 BUG(); 4368 BUG();
4265} 4369}
@@ -4330,7 +4434,7 @@ int
4330i915_gem_init_hw(struct drm_device *dev) 4434i915_gem_init_hw(struct drm_device *dev)
4331{ 4435{
4332 drm_i915_private_t *dev_priv = dev->dev_private; 4436 drm_i915_private_t *dev_priv = dev->dev_private;
4333 int ret; 4437 int ret, i;
4334 4438
4335 if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt()) 4439 if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
4336 return -EIO; 4440 return -EIO;
@@ -4338,20 +4442,26 @@ i915_gem_init_hw(struct drm_device *dev)
4338 if (dev_priv->ellc_size) 4442 if (dev_priv->ellc_size)
4339 I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf)); 4443 I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
4340 4444
4445 if (IS_HSW_GT3(dev))
4446 I915_WRITE(MI_PREDICATE_RESULT_2, LOWER_SLICE_ENABLED);
4447 else
4448 I915_WRITE(MI_PREDICATE_RESULT_2, LOWER_SLICE_DISABLED);
4449
4341 if (HAS_PCH_NOP(dev)) { 4450 if (HAS_PCH_NOP(dev)) {
4342 u32 temp = I915_READ(GEN7_MSG_CTL); 4451 u32 temp = I915_READ(GEN7_MSG_CTL);
4343 temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK); 4452 temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
4344 I915_WRITE(GEN7_MSG_CTL, temp); 4453 I915_WRITE(GEN7_MSG_CTL, temp);
4345 } 4454 }
4346 4455
4347 i915_gem_l3_remap(dev);
4348
4349 i915_gem_init_swizzling(dev); 4456 i915_gem_init_swizzling(dev);
4350 4457
4351 ret = i915_gem_init_rings(dev); 4458 ret = i915_gem_init_rings(dev);
4352 if (ret) 4459 if (ret)
4353 return ret; 4460 return ret;
4354 4461
4462 for (i = 0; i < NUM_L3_SLICES(dev); i++)
4463 i915_gem_l3_remap(&dev_priv->ring[RCS], i);
4464
4355 /* 4465 /*
4356 * XXX: There was some w/a described somewhere suggesting loading 4466 * XXX: There was some w/a described somewhere suggesting loading
4357 * contexts before PPGTT. 4467 * contexts before PPGTT.
@@ -4454,26 +4564,12 @@ int
4454i915_gem_leavevt_ioctl(struct drm_device *dev, void *data, 4564i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
4455 struct drm_file *file_priv) 4565 struct drm_file *file_priv)
4456{ 4566{
4457 struct drm_i915_private *dev_priv = dev->dev_private;
4458 int ret;
4459
4460 if (drm_core_check_feature(dev, DRIVER_MODESET)) 4567 if (drm_core_check_feature(dev, DRIVER_MODESET))
4461 return 0; 4568 return 0;
4462 4569
4463 drm_irq_uninstall(dev); 4570 drm_irq_uninstall(dev);
4464 4571
4465 mutex_lock(&dev->struct_mutex); 4572 return i915_gem_suspend(dev);
4466 ret = i915_gem_idle(dev);
4467
4468 /* Hack! Don't let anybody do execbuf while we don't control the chip.
4469 * We need to replace this with a semaphore, or something.
4470 * And not confound ums.mm_suspended!
4471 */
4472 if (ret != 0)
4473 dev_priv->ums.mm_suspended = 1;
4474 mutex_unlock(&dev->struct_mutex);
4475
4476 return ret;
4477} 4573}
4478 4574
4479void 4575void
@@ -4484,11 +4580,9 @@ i915_gem_lastclose(struct drm_device *dev)
4484 if (drm_core_check_feature(dev, DRIVER_MODESET)) 4580 if (drm_core_check_feature(dev, DRIVER_MODESET))
4485 return; 4581 return;
4486 4582
4487 mutex_lock(&dev->struct_mutex); 4583 ret = i915_gem_suspend(dev);
4488 ret = i915_gem_idle(dev);
4489 if (ret) 4584 if (ret)
4490 DRM_ERROR("failed to idle hardware: %d\n", ret); 4585 DRM_ERROR("failed to idle hardware: %d\n", ret);
4491 mutex_unlock(&dev->struct_mutex);
4492} 4586}
4493 4587
4494static void 4588static void
@@ -4523,6 +4617,7 @@ i915_gem_load(struct drm_device *dev)
4523 INIT_LIST_HEAD(&dev_priv->vm_list); 4617 INIT_LIST_HEAD(&dev_priv->vm_list);
4524 i915_init_vm(dev_priv, &dev_priv->gtt.base); 4618 i915_init_vm(dev_priv, &dev_priv->gtt.base);
4525 4619
4620 INIT_LIST_HEAD(&dev_priv->context_list);
4526 INIT_LIST_HEAD(&dev_priv->mm.unbound_list); 4621 INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
4527 INIT_LIST_HEAD(&dev_priv->mm.bound_list); 4622 INIT_LIST_HEAD(&dev_priv->mm.bound_list);
4528 INIT_LIST_HEAD(&dev_priv->mm.fence_list); 4623 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
@@ -4532,6 +4627,8 @@ i915_gem_load(struct drm_device *dev)
4532 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list); 4627 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
4533 INIT_DELAYED_WORK(&dev_priv->mm.retire_work, 4628 INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
4534 i915_gem_retire_work_handler); 4629 i915_gem_retire_work_handler);
4630 INIT_DELAYED_WORK(&dev_priv->mm.idle_work,
4631 i915_gem_idle_work_handler);
4535 init_waitqueue_head(&dev_priv->gpu_error.reset_queue); 4632 init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
4536 4633
4537 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */ 4634 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
@@ -4582,7 +4679,7 @@ static int i915_gem_init_phys_object(struct drm_device *dev,
4582 if (dev_priv->mm.phys_objs[id - 1] || !size) 4679 if (dev_priv->mm.phys_objs[id - 1] || !size)
4583 return 0; 4680 return 0;
4584 4681
4585 phys_obj = kzalloc(sizeof(struct drm_i915_gem_phys_object), GFP_KERNEL); 4682 phys_obj = kzalloc(sizeof(*phys_obj), GFP_KERNEL);
4586 if (!phys_obj) 4683 if (!phys_obj)
4587 return -ENOMEM; 4684 return -ENOMEM;
4588 4685
@@ -4756,6 +4853,8 @@ void i915_gem_release(struct drm_device *dev, struct drm_file *file)
4756{ 4853{
4757 struct drm_i915_file_private *file_priv = file->driver_priv; 4854 struct drm_i915_file_private *file_priv = file->driver_priv;
4758 4855
4856 cancel_delayed_work_sync(&file_priv->mm.idle_work);
4857
4759 /* Clean up our request list when the client is going away, so that 4858 /* Clean up our request list when the client is going away, so that
4760 * later retire_requests won't dereference our soon-to-be-gone 4859 * later retire_requests won't dereference our soon-to-be-gone
4761 * file_priv. 4860 * file_priv.
@@ -4773,6 +4872,38 @@ void i915_gem_release(struct drm_device *dev, struct drm_file *file)
4773 spin_unlock(&file_priv->mm.lock); 4872 spin_unlock(&file_priv->mm.lock);
4774} 4873}
4775 4874
4875static void
4876i915_gem_file_idle_work_handler(struct work_struct *work)
4877{
4878 struct drm_i915_file_private *file_priv =
4879 container_of(work, typeof(*file_priv), mm.idle_work.work);
4880
4881 atomic_set(&file_priv->rps_wait_boost, false);
4882}
4883
4884int i915_gem_open(struct drm_device *dev, struct drm_file *file)
4885{
4886 struct drm_i915_file_private *file_priv;
4887
4888 DRM_DEBUG_DRIVER("\n");
4889
4890 file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
4891 if (!file_priv)
4892 return -ENOMEM;
4893
4894 file->driver_priv = file_priv;
4895 file_priv->dev_priv = dev->dev_private;
4896
4897 spin_lock_init(&file_priv->mm.lock);
4898 INIT_LIST_HEAD(&file_priv->mm.request_list);
4899 INIT_DELAYED_WORK(&file_priv->mm.idle_work,
4900 i915_gem_file_idle_work_handler);
4901
4902 idr_init(&file_priv->context_idr);
4903
4904 return 0;
4905}
4906
4776static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task) 4907static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
4777{ 4908{
4778 if (!mutex_is_locked(mutex)) 4909 if (!mutex_is_locked(mutex))
@@ -4823,6 +4954,7 @@ i915_gem_inactive_count(struct shrinker *shrinker, struct shrink_control *sc)
4823 4954
4824 if (unlock) 4955 if (unlock)
4825 mutex_unlock(&dev->struct_mutex); 4956 mutex_unlock(&dev->struct_mutex);
4957
4826 return count; 4958 return count;
4827} 4959}
4828 4960
@@ -4859,11 +4991,10 @@ bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
4859 4991
4860bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o) 4992bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o)
4861{ 4993{
4862 struct drm_i915_private *dev_priv = o->base.dev->dev_private; 4994 struct i915_vma *vma;
4863 struct i915_address_space *vm;
4864 4995
4865 list_for_each_entry(vm, &dev_priv->vm_list, global_link) 4996 list_for_each_entry(vma, &o->vma_list, vma_link)
4866 if (i915_gem_obj_bound(o, vm)) 4997 if (drm_mm_node_allocated(&vma->node))
4867 return true; 4998 return true;
4868 4999
4869 return false; 5000 return false;
@@ -4895,7 +5026,6 @@ i915_gem_inactive_scan(struct shrinker *shrinker, struct shrink_control *sc)
4895 struct drm_i915_private, 5026 struct drm_i915_private,
4896 mm.inactive_shrinker); 5027 mm.inactive_shrinker);
4897 struct drm_device *dev = dev_priv->dev; 5028 struct drm_device *dev = dev_priv->dev;
4898 int nr_to_scan = sc->nr_to_scan;
4899 unsigned long freed; 5029 unsigned long freed;
4900 bool unlock = true; 5030 bool unlock = true;
4901 5031
@@ -4909,38 +5039,30 @@ i915_gem_inactive_scan(struct shrinker *shrinker, struct shrink_control *sc)
4909 unlock = false; 5039 unlock = false;
4910 } 5040 }
4911 5041
4912 freed = i915_gem_purge(dev_priv, nr_to_scan); 5042 freed = i915_gem_purge(dev_priv, sc->nr_to_scan);
4913 if (freed < nr_to_scan) 5043 if (freed < sc->nr_to_scan)
4914 freed += __i915_gem_shrink(dev_priv, nr_to_scan, 5044 freed += __i915_gem_shrink(dev_priv,
4915 false); 5045 sc->nr_to_scan - freed,
4916 if (freed < nr_to_scan) 5046 false);
5047 if (freed < sc->nr_to_scan)
4917 freed += i915_gem_shrink_all(dev_priv); 5048 freed += i915_gem_shrink_all(dev_priv);
4918 5049
4919 if (unlock) 5050 if (unlock)
4920 mutex_unlock(&dev->struct_mutex); 5051 mutex_unlock(&dev->struct_mutex);
5052
4921 return freed; 5053 return freed;
4922} 5054}
4923 5055
4924struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj, 5056struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj)
4925 struct i915_address_space *vm)
4926{ 5057{
4927 struct i915_vma *vma; 5058 struct i915_vma *vma;
4928 list_for_each_entry(vma, &obj->vma_list, vma_link)
4929 if (vma->vm == vm)
4930 return vma;
4931 5059
4932 return NULL; 5060 if (WARN_ON(list_empty(&obj->vma_list)))
4933} 5061 return NULL;
4934
4935struct i915_vma *
4936i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
4937 struct i915_address_space *vm)
4938{
4939 struct i915_vma *vma;
4940 5062
4941 vma = i915_gem_obj_to_vma(obj, vm); 5063 vma = list_first_entry(&obj->vma_list, typeof(*vma), vma_link);
4942 if (!vma) 5064 if (WARN_ON(vma->vm != obj_to_ggtt(obj)))
4943 vma = i915_gem_vma_create(obj, vm); 5065 return NULL;
4944 5066
4945 return vma; 5067 return vma;
4946} 5068}
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 403309c2a7d6..72a3df32292f 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -73,7 +73,7 @@
73 * 73 *
74 * There are two confusing terms used above: 74 * There are two confusing terms used above:
75 * The "current context" means the context which is currently running on the 75 * The "current context" means the context which is currently running on the
76 * GPU. The GPU has loaded it's state already and has stored away the gtt 76 * GPU. The GPU has loaded its state already and has stored away the gtt
77 * offset of the BO. The GPU is not actively referencing the data at this 77 * offset of the BO. The GPU is not actively referencing the data at this
78 * offset, but it will on the next context switch. The only way to avoid this 78 * offset, but it will on the next context switch. The only way to avoid this
79 * is to do a GPU reset. 79 * is to do a GPU reset.
@@ -117,6 +117,9 @@ static int get_context_size(struct drm_device *dev)
117 else 117 else
118 ret = GEN7_CXT_TOTAL_SIZE(reg) * 64; 118 ret = GEN7_CXT_TOTAL_SIZE(reg) * 64;
119 break; 119 break;
120 case 8:
121 ret = GEN8_CXT_TOTAL_SIZE;
122 break;
120 default: 123 default:
121 BUG(); 124 BUG();
122 } 125 }
@@ -129,6 +132,7 @@ void i915_gem_context_free(struct kref *ctx_ref)
129 struct i915_hw_context *ctx = container_of(ctx_ref, 132 struct i915_hw_context *ctx = container_of(ctx_ref,
130 typeof(*ctx), ref); 133 typeof(*ctx), ref);
131 134
135 list_del(&ctx->link);
132 drm_gem_object_unreference(&ctx->obj->base); 136 drm_gem_object_unreference(&ctx->obj->base);
133 kfree(ctx); 137 kfree(ctx);
134} 138}
@@ -147,6 +151,7 @@ create_hw_context(struct drm_device *dev,
147 151
148 kref_init(&ctx->ref); 152 kref_init(&ctx->ref);
149 ctx->obj = i915_gem_alloc_object(dev, dev_priv->hw_context_size); 153 ctx->obj = i915_gem_alloc_object(dev, dev_priv->hw_context_size);
154 INIT_LIST_HEAD(&ctx->link);
150 if (ctx->obj == NULL) { 155 if (ctx->obj == NULL) {
151 kfree(ctx); 156 kfree(ctx);
152 DRM_DEBUG_DRIVER("Context object allocated failed\n"); 157 DRM_DEBUG_DRIVER("Context object allocated failed\n");
@@ -166,6 +171,7 @@ create_hw_context(struct drm_device *dev,
166 * assertion in the context switch code. 171 * assertion in the context switch code.
167 */ 172 */
168 ctx->ring = &dev_priv->ring[RCS]; 173 ctx->ring = &dev_priv->ring[RCS];
174 list_add_tail(&ctx->link, &dev_priv->context_list);
169 175
170 /* Default context will never have a file_priv */ 176 /* Default context will never have a file_priv */
171 if (file_priv == NULL) 177 if (file_priv == NULL)
@@ -178,6 +184,10 @@ create_hw_context(struct drm_device *dev,
178 184
179 ctx->file_priv = file_priv; 185 ctx->file_priv = file_priv;
180 ctx->id = ret; 186 ctx->id = ret;
187 /* NB: Mark all slices as needing a remap so that when the context first
188 * loads it will restore whatever remap state already exists. If there
189 * is no remap info, it will be a NOP. */
190 ctx->remap_slice = (1 << NUM_L3_SLICES(dev)) - 1;
181 191
182 return ctx; 192 return ctx;
183 193
@@ -213,7 +223,6 @@ static int create_default_context(struct drm_i915_private *dev_priv)
213 * may not be available. To avoid this we always pin the 223 * may not be available. To avoid this we always pin the
214 * default context. 224 * default context.
215 */ 225 */
216 dev_priv->ring[RCS].default_context = ctx;
217 ret = i915_gem_obj_ggtt_pin(ctx->obj, CONTEXT_ALIGN, false, false); 226 ret = i915_gem_obj_ggtt_pin(ctx->obj, CONTEXT_ALIGN, false, false);
218 if (ret) { 227 if (ret) {
219 DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret); 228 DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret);
@@ -226,6 +235,8 @@ static int create_default_context(struct drm_i915_private *dev_priv)
226 goto err_unpin; 235 goto err_unpin;
227 } 236 }
228 237
238 dev_priv->ring[RCS].default_context = ctx;
239
229 DRM_DEBUG_DRIVER("Default HW context loaded\n"); 240 DRM_DEBUG_DRIVER("Default HW context loaded\n");
230 return 0; 241 return 0;
231 242
@@ -281,16 +292,24 @@ void i915_gem_context_fini(struct drm_device *dev)
281 * other code, leading to spurious errors. */ 292 * other code, leading to spurious errors. */
282 intel_gpu_reset(dev); 293 intel_gpu_reset(dev);
283 294
284 i915_gem_object_unpin(dctx->obj);
285
286 /* When default context is created and switched to, base object refcount 295 /* When default context is created and switched to, base object refcount
287 * will be 2 (+1 from object creation and +1 from do_switch()). 296 * will be 2 (+1 from object creation and +1 from do_switch()).
288 * i915_gem_context_fini() will be called after gpu_idle() has switched 297 * i915_gem_context_fini() will be called after gpu_idle() has switched
289 * to default context. So we need to unreference the base object once 298 * to default context. So we need to unreference the base object once
290 * to offset the do_switch part, so that i915_gem_context_unreference() 299 * to offset the do_switch part, so that i915_gem_context_unreference()
291 * can then free the base object correctly. */ 300 * can then free the base object correctly. */
292 drm_gem_object_unreference(&dctx->obj->base); 301 WARN_ON(!dev_priv->ring[RCS].last_context);
302 if (dev_priv->ring[RCS].last_context == dctx) {
303 /* Fake switch to NULL context */
304 WARN_ON(dctx->obj->active);
305 i915_gem_object_unpin(dctx->obj);
306 i915_gem_context_unreference(dctx);
307 }
308
309 i915_gem_object_unpin(dctx->obj);
293 i915_gem_context_unreference(dctx); 310 i915_gem_context_unreference(dctx);
311 dev_priv->ring[RCS].default_context = NULL;
312 dev_priv->ring[RCS].last_context = NULL;
294} 313}
295 314
296static int context_idr_cleanup(int id, void *p, void *data) 315static int context_idr_cleanup(int id, void *p, void *data)
@@ -393,11 +412,11 @@ static int do_switch(struct i915_hw_context *to)
393 struct intel_ring_buffer *ring = to->ring; 412 struct intel_ring_buffer *ring = to->ring;
394 struct i915_hw_context *from = ring->last_context; 413 struct i915_hw_context *from = ring->last_context;
395 u32 hw_flags = 0; 414 u32 hw_flags = 0;
396 int ret; 415 int ret, i;
397 416
398 BUG_ON(from != NULL && from->obj != NULL && from->obj->pin_count == 0); 417 BUG_ON(from != NULL && from->obj != NULL && from->obj->pin_count == 0);
399 418
400 if (from == to) 419 if (from == to && !to->remap_slice)
401 return 0; 420 return 0;
402 421
403 ret = i915_gem_obj_ggtt_pin(to->obj, CONTEXT_ALIGN, false, false); 422 ret = i915_gem_obj_ggtt_pin(to->obj, CONTEXT_ALIGN, false, false);
@@ -420,8 +439,6 @@ static int do_switch(struct i915_hw_context *to)
420 439
421 if (!to->is_initialized || is_default_context(to)) 440 if (!to->is_initialized || is_default_context(to))
422 hw_flags |= MI_RESTORE_INHIBIT; 441 hw_flags |= MI_RESTORE_INHIBIT;
423 else if (WARN_ON_ONCE(from == to)) /* not yet expected */
424 hw_flags |= MI_FORCE_RESTORE;
425 442
426 ret = mi_set_context(ring, to, hw_flags); 443 ret = mi_set_context(ring, to, hw_flags);
427 if (ret) { 444 if (ret) {
@@ -429,6 +446,18 @@ static int do_switch(struct i915_hw_context *to)
429 return ret; 446 return ret;
430 } 447 }
431 448
449 for (i = 0; i < MAX_L3_SLICES; i++) {
450 if (!(to->remap_slice & (1<<i)))
451 continue;
452
453 ret = i915_gem_l3_remap(ring, i);
454 /* If it failed, try again next round */
455 if (ret)
456 DRM_DEBUG_DRIVER("L3 remapping failed\n");
457 else
458 to->remap_slice &= ~(1<<i);
459 }
460
432 /* The backing object for the context is done after switching to the 461 /* The backing object for the context is done after switching to the
433 * *next* context. Therefore we cannot retire the previous context until 462 * *next* context. Therefore we cannot retire the previous context until
434 * the next context has already started running. In fact, the below code 463 * the next context has already started running. In fact, the below code
@@ -436,11 +465,8 @@ static int do_switch(struct i915_hw_context *to)
436 * MI_SET_CONTEXT instead of when the next seqno has completed. 465 * MI_SET_CONTEXT instead of when the next seqno has completed.
437 */ 466 */
438 if (from != NULL) { 467 if (from != NULL) {
439 struct drm_i915_private *dev_priv = from->obj->base.dev->dev_private;
440 struct i915_address_space *ggtt = &dev_priv->gtt.base;
441 from->obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION; 468 from->obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
442 list_move_tail(&i915_gem_obj_to_vma(from->obj, ggtt)->mm_list, &ggtt->active_list); 469 i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->obj), ring);
443 i915_gem_object_move_to_active(from->obj, ring);
444 /* As long as MI_SET_CONTEXT is serializing, ie. it flushes the 470 /* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
445 * whole damn pipeline, we don't need to explicitly mark the 471 * whole damn pipeline, we don't need to explicitly mark the
446 * object dirty. The only exception is that the context must be 472 * object dirty. The only exception is that the context must be
@@ -451,17 +477,7 @@ static int do_switch(struct i915_hw_context *to)
451 from->obj->dirty = 1; 477 from->obj->dirty = 1;
452 BUG_ON(from->obj->ring != ring); 478 BUG_ON(from->obj->ring != ring);
453 479
454 ret = i915_add_request(ring, NULL); 480 /* obj is kept alive until the next request by its active ref */
455 if (ret) {
456 /* Too late, we've already scheduled a context switch.
457 * Try to undo the change so that the hw state is
458 * consistent with out tracking. In case of emergency,
459 * scream.
460 */
461 WARN_ON(mi_set_context(ring, from, MI_RESTORE_INHIBIT));
462 return ret;
463 }
464
465 i915_gem_object_unpin(from->obj); 481 i915_gem_object_unpin(from->obj);
466 i915_gem_context_unreference(from); 482 i915_gem_context_unreference(from);
467 } 483 }
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 91b700155850..b7376533633d 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -37,6 +37,9 @@ mark_free(struct i915_vma *vma, struct list_head *unwind)
37 if (vma->obj->pin_count) 37 if (vma->obj->pin_count)
38 return false; 38 return false;
39 39
40 if (WARN_ON(!list_empty(&vma->exec_list)))
41 return false;
42
40 list_add(&vma->exec_list, unwind); 43 list_add(&vma->exec_list, unwind);
41 return drm_mm_scan_add_block(&vma->node); 44 return drm_mm_scan_add_block(&vma->node);
42} 45}
@@ -113,7 +116,7 @@ none:
113 } 116 }
114 117
115 /* We expect the caller to unpin, evict all and try again, or give up. 118 /* We expect the caller to unpin, evict all and try again, or give up.
116 * So calling i915_gem_evict_everything() is unnecessary. 119 * So calling i915_gem_evict_vm() is unnecessary.
117 */ 120 */
118 return -ENOSPC; 121 return -ENOSPC;
119 122
@@ -152,12 +155,48 @@ found:
152 return ret; 155 return ret;
153} 156}
154 157
158/**
159 * i915_gem_evict_vm - Try to free up VM space
160 *
161 * @vm: Address space to evict from
162 * @do_idle: Boolean directing whether to idle first.
163 *
164 * VM eviction is about freeing up virtual address space. If one wants fine
165 * grained eviction, they should see evict something for more details. In terms
166 * of freeing up actual system memory, this function may not accomplish the
167 * desired result. An object may be shared in multiple address space, and this
168 * function will not assert those objects be freed.
169 *
170 * Using do_idle will result in a more complete eviction because it retires, and
171 * inactivates current BOs.
172 */
173int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle)
174{
175 struct i915_vma *vma, *next;
176 int ret;
177
178 trace_i915_gem_evict_vm(vm);
179
180 if (do_idle) {
181 ret = i915_gpu_idle(vm->dev);
182 if (ret)
183 return ret;
184
185 i915_gem_retire_requests(vm->dev);
186 }
187
188 list_for_each_entry_safe(vma, next, &vm->inactive_list, mm_list)
189 if (vma->obj->pin_count == 0)
190 WARN_ON(i915_vma_unbind(vma));
191
192 return 0;
193}
194
155int 195int
156i915_gem_evict_everything(struct drm_device *dev) 196i915_gem_evict_everything(struct drm_device *dev)
157{ 197{
158 drm_i915_private_t *dev_priv = dev->dev_private; 198 drm_i915_private_t *dev_priv = dev->dev_private;
159 struct i915_address_space *vm; 199 struct i915_address_space *vm;
160 struct i915_vma *vma, *next;
161 bool lists_empty = true; 200 bool lists_empty = true;
162 int ret; 201 int ret;
163 202
@@ -184,11 +223,8 @@ i915_gem_evict_everything(struct drm_device *dev)
184 i915_gem_retire_requests(dev); 223 i915_gem_retire_requests(dev);
185 224
186 /* Having flushed everything, unbind() should never raise an error */ 225 /* Having flushed everything, unbind() should never raise an error */
187 list_for_each_entry(vm, &dev_priv->vm_list, global_link) { 226 list_for_each_entry(vm, &dev_priv->vm_list, global_link)
188 list_for_each_entry_safe(vma, next, &vm->inactive_list, mm_list) 227 WARN_ON(i915_gem_evict_vm(vm, false));
189 if (vma->obj->pin_count == 0)
190 WARN_ON(i915_vma_unbind(vma));
191 }
192 228
193 return 0; 229 return 0;
194} 230}
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index bf345777ae9f..885d595e0e02 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -33,35 +33,35 @@
33#include "intel_drv.h" 33#include "intel_drv.h"
34#include <linux/dma_remapping.h> 34#include <linux/dma_remapping.h>
35 35
36struct eb_objects { 36struct eb_vmas {
37 struct list_head objects; 37 struct list_head vmas;
38 int and; 38 int and;
39 union { 39 union {
40 struct drm_i915_gem_object *lut[0]; 40 struct i915_vma *lut[0];
41 struct hlist_head buckets[0]; 41 struct hlist_head buckets[0];
42 }; 42 };
43}; 43};
44 44
45static struct eb_objects * 45static struct eb_vmas *
46eb_create(struct drm_i915_gem_execbuffer2 *args) 46eb_create(struct drm_i915_gem_execbuffer2 *args, struct i915_address_space *vm)
47{ 47{
48 struct eb_objects *eb = NULL; 48 struct eb_vmas *eb = NULL;
49 49
50 if (args->flags & I915_EXEC_HANDLE_LUT) { 50 if (args->flags & I915_EXEC_HANDLE_LUT) {
51 int size = args->buffer_count; 51 unsigned size = args->buffer_count;
52 size *= sizeof(struct drm_i915_gem_object *); 52 size *= sizeof(struct i915_vma *);
53 size += sizeof(struct eb_objects); 53 size += sizeof(struct eb_vmas);
54 eb = kmalloc(size, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY); 54 eb = kmalloc(size, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
55 } 55 }
56 56
57 if (eb == NULL) { 57 if (eb == NULL) {
58 int size = args->buffer_count; 58 unsigned size = args->buffer_count;
59 int count = PAGE_SIZE / sizeof(struct hlist_head) / 2; 59 unsigned count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
60 BUILD_BUG_ON_NOT_POWER_OF_2(PAGE_SIZE / sizeof(struct hlist_head)); 60 BUILD_BUG_ON_NOT_POWER_OF_2(PAGE_SIZE / sizeof(struct hlist_head));
61 while (count > 2*size) 61 while (count > 2*size)
62 count >>= 1; 62 count >>= 1;
63 eb = kzalloc(count*sizeof(struct hlist_head) + 63 eb = kzalloc(count*sizeof(struct hlist_head) +
64 sizeof(struct eb_objects), 64 sizeof(struct eb_vmas),
65 GFP_TEMPORARY); 65 GFP_TEMPORARY);
66 if (eb == NULL) 66 if (eb == NULL)
67 return eb; 67 return eb;
@@ -70,64 +70,102 @@ eb_create(struct drm_i915_gem_execbuffer2 *args)
70 } else 70 } else
71 eb->and = -args->buffer_count; 71 eb->and = -args->buffer_count;
72 72
73 INIT_LIST_HEAD(&eb->objects); 73 INIT_LIST_HEAD(&eb->vmas);
74 return eb; 74 return eb;
75} 75}
76 76
77static void 77static void
78eb_reset(struct eb_objects *eb) 78eb_reset(struct eb_vmas *eb)
79{ 79{
80 if (eb->and >= 0) 80 if (eb->and >= 0)
81 memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head)); 81 memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head));
82} 82}
83 83
84static int 84static int
85eb_lookup_objects(struct eb_objects *eb, 85eb_lookup_vmas(struct eb_vmas *eb,
86 struct drm_i915_gem_exec_object2 *exec, 86 struct drm_i915_gem_exec_object2 *exec,
87 const struct drm_i915_gem_execbuffer2 *args, 87 const struct drm_i915_gem_execbuffer2 *args,
88 struct drm_file *file) 88 struct i915_address_space *vm,
89 struct drm_file *file)
89{ 90{
90 int i; 91 struct drm_i915_gem_object *obj;
92 struct list_head objects;
93 int i, ret = 0;
91 94
95 INIT_LIST_HEAD(&objects);
92 spin_lock(&file->table_lock); 96 spin_lock(&file->table_lock);
97 /* Grab a reference to the object and release the lock so we can lookup
98 * or create the VMA without using GFP_ATOMIC */
93 for (i = 0; i < args->buffer_count; i++) { 99 for (i = 0; i < args->buffer_count; i++) {
94 struct drm_i915_gem_object *obj;
95
96 obj = to_intel_bo(idr_find(&file->object_idr, exec[i].handle)); 100 obj = to_intel_bo(idr_find(&file->object_idr, exec[i].handle));
97 if (obj == NULL) { 101 if (obj == NULL) {
98 spin_unlock(&file->table_lock); 102 spin_unlock(&file->table_lock);
99 DRM_DEBUG("Invalid object handle %d at index %d\n", 103 DRM_DEBUG("Invalid object handle %d at index %d\n",
100 exec[i].handle, i); 104 exec[i].handle, i);
101 return -ENOENT; 105 ret = -ENOENT;
106 goto out;
102 } 107 }
103 108
104 if (!list_empty(&obj->exec_list)) { 109 if (!list_empty(&obj->obj_exec_link)) {
105 spin_unlock(&file->table_lock); 110 spin_unlock(&file->table_lock);
106 DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n", 111 DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
107 obj, exec[i].handle, i); 112 obj, exec[i].handle, i);
108 return -EINVAL; 113 ret = -EINVAL;
114 goto out;
109 } 115 }
110 116
111 drm_gem_object_reference(&obj->base); 117 drm_gem_object_reference(&obj->base);
112 list_add_tail(&obj->exec_list, &eb->objects); 118 list_add_tail(&obj->obj_exec_link, &objects);
119 }
120 spin_unlock(&file->table_lock);
113 121
114 obj->exec_entry = &exec[i]; 122 i = 0;
123 list_for_each_entry(obj, &objects, obj_exec_link) {
124 struct i915_vma *vma;
125
126 /*
127 * NOTE: We can leak any vmas created here when something fails
128 * later on. But that's no issue since vma_unbind can deal with
129 * vmas which are not actually bound. And since only
130 * lookup_or_create exists as an interface to get at the vma
131 * from the (obj, vm) we don't run the risk of creating
132 * duplicated vmas for the same vm.
133 */
134 vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
135 if (IS_ERR(vma)) {
136 DRM_DEBUG("Failed to lookup VMA\n");
137 ret = PTR_ERR(vma);
138 goto out;
139 }
140
141 list_add_tail(&vma->exec_list, &eb->vmas);
142
143 vma->exec_entry = &exec[i];
115 if (eb->and < 0) { 144 if (eb->and < 0) {
116 eb->lut[i] = obj; 145 eb->lut[i] = vma;
117 } else { 146 } else {
118 uint32_t handle = args->flags & I915_EXEC_HANDLE_LUT ? i : exec[i].handle; 147 uint32_t handle = args->flags & I915_EXEC_HANDLE_LUT ? i : exec[i].handle;
119 obj->exec_handle = handle; 148 vma->exec_handle = handle;
120 hlist_add_head(&obj->exec_node, 149 hlist_add_head(&vma->exec_node,
121 &eb->buckets[handle & eb->and]); 150 &eb->buckets[handle & eb->and]);
122 } 151 }
152 ++i;
123 } 153 }
124 spin_unlock(&file->table_lock);
125 154
126 return 0; 155
156out:
157 while (!list_empty(&objects)) {
158 obj = list_first_entry(&objects,
159 struct drm_i915_gem_object,
160 obj_exec_link);
161 list_del_init(&obj->obj_exec_link);
162 if (ret)
163 drm_gem_object_unreference(&obj->base);
164 }
165 return ret;
127} 166}
128 167
129static struct drm_i915_gem_object * 168static struct i915_vma *eb_get_vma(struct eb_vmas *eb, unsigned long handle)
130eb_get_object(struct eb_objects *eb, unsigned long handle)
131{ 169{
132 if (eb->and < 0) { 170 if (eb->and < 0) {
133 if (handle >= -eb->and) 171 if (handle >= -eb->and)
@@ -139,34 +177,33 @@ eb_get_object(struct eb_objects *eb, unsigned long handle)
139 177
140 head = &eb->buckets[handle & eb->and]; 178 head = &eb->buckets[handle & eb->and];
141 hlist_for_each(node, head) { 179 hlist_for_each(node, head) {
142 struct drm_i915_gem_object *obj; 180 struct i915_vma *vma;
143 181
144 obj = hlist_entry(node, struct drm_i915_gem_object, exec_node); 182 vma = hlist_entry(node, struct i915_vma, exec_node);
145 if (obj->exec_handle == handle) 183 if (vma->exec_handle == handle)
146 return obj; 184 return vma;
147 } 185 }
148 return NULL; 186 return NULL;
149 } 187 }
150} 188}
151 189
152static void 190static void eb_destroy(struct eb_vmas *eb) {
153eb_destroy(struct eb_objects *eb) 191 while (!list_empty(&eb->vmas)) {
154{ 192 struct i915_vma *vma;
155 while (!list_empty(&eb->objects)) {
156 struct drm_i915_gem_object *obj;
157 193
158 obj = list_first_entry(&eb->objects, 194 vma = list_first_entry(&eb->vmas,
159 struct drm_i915_gem_object, 195 struct i915_vma,
160 exec_list); 196 exec_list);
161 list_del_init(&obj->exec_list); 197 list_del_init(&vma->exec_list);
162 drm_gem_object_unreference(&obj->base); 198 drm_gem_object_unreference(&vma->obj->base);
163 } 199 }
164 kfree(eb); 200 kfree(eb);
165} 201}
166 202
167static inline int use_cpu_reloc(struct drm_i915_gem_object *obj) 203static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
168{ 204{
169 return (obj->base.write_domain == I915_GEM_DOMAIN_CPU || 205 return (HAS_LLC(obj->base.dev) ||
206 obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
170 !obj->map_and_fenceable || 207 !obj->map_and_fenceable ||
171 obj->cache_level != I915_CACHE_NONE); 208 obj->cache_level != I915_CACHE_NONE);
172} 209}
@@ -175,17 +212,31 @@ static int
175relocate_entry_cpu(struct drm_i915_gem_object *obj, 212relocate_entry_cpu(struct drm_i915_gem_object *obj,
176 struct drm_i915_gem_relocation_entry *reloc) 213 struct drm_i915_gem_relocation_entry *reloc)
177{ 214{
215 struct drm_device *dev = obj->base.dev;
178 uint32_t page_offset = offset_in_page(reloc->offset); 216 uint32_t page_offset = offset_in_page(reloc->offset);
179 char *vaddr; 217 char *vaddr;
180 int ret = -EINVAL; 218 int ret = -EINVAL;
181 219
182 ret = i915_gem_object_set_to_cpu_domain(obj, 1); 220 ret = i915_gem_object_set_to_cpu_domain(obj, true);
183 if (ret) 221 if (ret)
184 return ret; 222 return ret;
185 223
186 vaddr = kmap_atomic(i915_gem_object_get_page(obj, 224 vaddr = kmap_atomic(i915_gem_object_get_page(obj,
187 reloc->offset >> PAGE_SHIFT)); 225 reloc->offset >> PAGE_SHIFT));
188 *(uint32_t *)(vaddr + page_offset) = reloc->delta; 226 *(uint32_t *)(vaddr + page_offset) = reloc->delta;
227
228 if (INTEL_INFO(dev)->gen >= 8) {
229 page_offset = offset_in_page(page_offset + sizeof(uint32_t));
230
231 if (page_offset == 0) {
232 kunmap_atomic(vaddr);
233 vaddr = kmap_atomic(i915_gem_object_get_page(obj,
234 (reloc->offset + sizeof(uint32_t)) >> PAGE_SHIFT));
235 }
236
237 *(uint32_t *)(vaddr + page_offset) = 0;
238 }
239
189 kunmap_atomic(vaddr); 240 kunmap_atomic(vaddr);
190 241
191 return 0; 242 return 0;
@@ -216,6 +267,21 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj,
216 reloc_entry = (uint32_t __iomem *) 267 reloc_entry = (uint32_t __iomem *)
217 (reloc_page + offset_in_page(reloc->offset)); 268 (reloc_page + offset_in_page(reloc->offset));
218 iowrite32(reloc->delta, reloc_entry); 269 iowrite32(reloc->delta, reloc_entry);
270
271 if (INTEL_INFO(dev)->gen >= 8) {
272 reloc_entry += 1;
273
274 if (offset_in_page(reloc->offset + sizeof(uint32_t)) == 0) {
275 io_mapping_unmap_atomic(reloc_page);
276 reloc_page = io_mapping_map_atomic_wc(
277 dev_priv->gtt.mappable,
278 reloc->offset + sizeof(uint32_t));
279 reloc_entry = reloc_page;
280 }
281
282 iowrite32(0, reloc_entry);
283 }
284
219 io_mapping_unmap_atomic(reloc_page); 285 io_mapping_unmap_atomic(reloc_page);
220 286
221 return 0; 287 return 0;
@@ -223,22 +289,24 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj,
223 289
224static int 290static int
225i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, 291i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
226 struct eb_objects *eb, 292 struct eb_vmas *eb,
227 struct drm_i915_gem_relocation_entry *reloc, 293 struct drm_i915_gem_relocation_entry *reloc,
228 struct i915_address_space *vm) 294 struct i915_address_space *vm)
229{ 295{
230 struct drm_device *dev = obj->base.dev; 296 struct drm_device *dev = obj->base.dev;
231 struct drm_gem_object *target_obj; 297 struct drm_gem_object *target_obj;
232 struct drm_i915_gem_object *target_i915_obj; 298 struct drm_i915_gem_object *target_i915_obj;
299 struct i915_vma *target_vma;
233 uint32_t target_offset; 300 uint32_t target_offset;
234 int ret = -EINVAL; 301 int ret = -EINVAL;
235 302
236 /* we've already hold a reference to all valid objects */ 303 /* we've already hold a reference to all valid objects */
237 target_obj = &eb_get_object(eb, reloc->target_handle)->base; 304 target_vma = eb_get_vma(eb, reloc->target_handle);
238 if (unlikely(target_obj == NULL)) 305 if (unlikely(target_vma == NULL))
239 return -ENOENT; 306 return -ENOENT;
307 target_i915_obj = target_vma->obj;
308 target_obj = &target_vma->obj->base;
240 309
241 target_i915_obj = to_intel_bo(target_obj);
242 target_offset = i915_gem_obj_ggtt_offset(target_i915_obj); 310 target_offset = i915_gem_obj_ggtt_offset(target_i915_obj);
243 311
244 /* Sandybridge PPGTT errata: We need a global gtt mapping for MI and 312 /* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
@@ -284,7 +352,8 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
284 return 0; 352 return 0;
285 353
286 /* Check that the relocation address is valid... */ 354 /* Check that the relocation address is valid... */
287 if (unlikely(reloc->offset > obj->base.size - 4)) { 355 if (unlikely(reloc->offset >
356 obj->base.size - (INTEL_INFO(dev)->gen >= 8 ? 8 : 4))) {
288 DRM_DEBUG("Relocation beyond object bounds: " 357 DRM_DEBUG("Relocation beyond object bounds: "
289 "obj %p target %d offset %d size %d.\n", 358 "obj %p target %d offset %d size %d.\n",
290 obj, reloc->target_handle, 359 obj, reloc->target_handle,
@@ -320,14 +389,13 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
320} 389}
321 390
322static int 391static int
323i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj, 392i915_gem_execbuffer_relocate_vma(struct i915_vma *vma,
324 struct eb_objects *eb, 393 struct eb_vmas *eb)
325 struct i915_address_space *vm)
326{ 394{
327#define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry)) 395#define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
328 struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)]; 396 struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)];
329 struct drm_i915_gem_relocation_entry __user *user_relocs; 397 struct drm_i915_gem_relocation_entry __user *user_relocs;
330 struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; 398 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
331 int remain, ret; 399 int remain, ret;
332 400
333 user_relocs = to_user_ptr(entry->relocs_ptr); 401 user_relocs = to_user_ptr(entry->relocs_ptr);
@@ -346,8 +414,8 @@ i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
346 do { 414 do {
347 u64 offset = r->presumed_offset; 415 u64 offset = r->presumed_offset;
348 416
349 ret = i915_gem_execbuffer_relocate_entry(obj, eb, r, 417 ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, r,
350 vm); 418 vma->vm);
351 if (ret) 419 if (ret)
352 return ret; 420 return ret;
353 421
@@ -368,17 +436,16 @@ i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
368} 436}
369 437
370static int 438static int
371i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj, 439i915_gem_execbuffer_relocate_vma_slow(struct i915_vma *vma,
372 struct eb_objects *eb, 440 struct eb_vmas *eb,
373 struct drm_i915_gem_relocation_entry *relocs, 441 struct drm_i915_gem_relocation_entry *relocs)
374 struct i915_address_space *vm)
375{ 442{
376 const struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; 443 const struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
377 int i, ret; 444 int i, ret;
378 445
379 for (i = 0; i < entry->relocation_count; i++) { 446 for (i = 0; i < entry->relocation_count; i++) {
380 ret = i915_gem_execbuffer_relocate_entry(obj, eb, &relocs[i], 447 ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, &relocs[i],
381 vm); 448 vma->vm);
382 if (ret) 449 if (ret)
383 return ret; 450 return ret;
384 } 451 }
@@ -387,10 +454,10 @@ i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
387} 454}
388 455
389static int 456static int
390i915_gem_execbuffer_relocate(struct eb_objects *eb, 457i915_gem_execbuffer_relocate(struct eb_vmas *eb,
391 struct i915_address_space *vm) 458 struct i915_address_space *vm)
392{ 459{
393 struct drm_i915_gem_object *obj; 460 struct i915_vma *vma;
394 int ret = 0; 461 int ret = 0;
395 462
396 /* This is the fast path and we cannot handle a pagefault whilst 463 /* This is the fast path and we cannot handle a pagefault whilst
@@ -401,8 +468,8 @@ i915_gem_execbuffer_relocate(struct eb_objects *eb,
401 * lockdep complains vehemently. 468 * lockdep complains vehemently.
402 */ 469 */
403 pagefault_disable(); 470 pagefault_disable();
404 list_for_each_entry(obj, &eb->objects, exec_list) { 471 list_for_each_entry(vma, &eb->vmas, exec_list) {
405 ret = i915_gem_execbuffer_relocate_object(obj, eb, vm); 472 ret = i915_gem_execbuffer_relocate_vma(vma, eb);
406 if (ret) 473 if (ret)
407 break; 474 break;
408 } 475 }
@@ -415,31 +482,32 @@ i915_gem_execbuffer_relocate(struct eb_objects *eb,
415#define __EXEC_OBJECT_HAS_FENCE (1<<30) 482#define __EXEC_OBJECT_HAS_FENCE (1<<30)
416 483
417static int 484static int
418need_reloc_mappable(struct drm_i915_gem_object *obj) 485need_reloc_mappable(struct i915_vma *vma)
419{ 486{
420 struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; 487 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
421 return entry->relocation_count && !use_cpu_reloc(obj); 488 return entry->relocation_count && !use_cpu_reloc(vma->obj) &&
489 i915_is_ggtt(vma->vm);
422} 490}
423 491
424static int 492static int
425i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj, 493i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
426 struct intel_ring_buffer *ring, 494 struct intel_ring_buffer *ring,
427 struct i915_address_space *vm, 495 bool *need_reloc)
428 bool *need_reloc)
429{ 496{
430 struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 497 struct drm_i915_private *dev_priv = ring->dev->dev_private;
431 struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; 498 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
432 bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4; 499 bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
433 bool need_fence, need_mappable; 500 bool need_fence, need_mappable;
501 struct drm_i915_gem_object *obj = vma->obj;
434 int ret; 502 int ret;
435 503
436 need_fence = 504 need_fence =
437 has_fenced_gpu_access && 505 has_fenced_gpu_access &&
438 entry->flags & EXEC_OBJECT_NEEDS_FENCE && 506 entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
439 obj->tiling_mode != I915_TILING_NONE; 507 obj->tiling_mode != I915_TILING_NONE;
440 need_mappable = need_fence || need_reloc_mappable(obj); 508 need_mappable = need_fence || need_reloc_mappable(vma);
441 509
442 ret = i915_gem_object_pin(obj, vm, entry->alignment, need_mappable, 510 ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, need_mappable,
443 false); 511 false);
444 if (ret) 512 if (ret)
445 return ret; 513 return ret;
@@ -467,8 +535,8 @@ i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
467 obj->has_aliasing_ppgtt_mapping = 1; 535 obj->has_aliasing_ppgtt_mapping = 1;
468 } 536 }
469 537
470 if (entry->offset != i915_gem_obj_offset(obj, vm)) { 538 if (entry->offset != vma->node.start) {
471 entry->offset = i915_gem_obj_offset(obj, vm); 539 entry->offset = vma->node.start;
472 *need_reloc = true; 540 *need_reloc = true;
473 } 541 }
474 542
@@ -485,14 +553,15 @@ i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
485} 553}
486 554
487static void 555static void
488i915_gem_execbuffer_unreserve_object(struct drm_i915_gem_object *obj) 556i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
489{ 557{
490 struct drm_i915_gem_exec_object2 *entry; 558 struct drm_i915_gem_exec_object2 *entry;
559 struct drm_i915_gem_object *obj = vma->obj;
491 560
492 if (!i915_gem_obj_bound_any(obj)) 561 if (!drm_mm_node_allocated(&vma->node))
493 return; 562 return;
494 563
495 entry = obj->exec_entry; 564 entry = vma->exec_entry;
496 565
497 if (entry->flags & __EXEC_OBJECT_HAS_FENCE) 566 if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
498 i915_gem_object_unpin_fence(obj); 567 i915_gem_object_unpin_fence(obj);
@@ -505,41 +574,46 @@ i915_gem_execbuffer_unreserve_object(struct drm_i915_gem_object *obj)
505 574
506static int 575static int
507i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, 576i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
508 struct list_head *objects, 577 struct list_head *vmas,
509 struct i915_address_space *vm,
510 bool *need_relocs) 578 bool *need_relocs)
511{ 579{
512 struct drm_i915_gem_object *obj; 580 struct drm_i915_gem_object *obj;
513 struct list_head ordered_objects; 581 struct i915_vma *vma;
582 struct i915_address_space *vm;
583 struct list_head ordered_vmas;
514 bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4; 584 bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
515 int retry; 585 int retry;
516 586
517 INIT_LIST_HEAD(&ordered_objects); 587 if (list_empty(vmas))
518 while (!list_empty(objects)) { 588 return 0;
589
590 vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm;
591
592 INIT_LIST_HEAD(&ordered_vmas);
593 while (!list_empty(vmas)) {
519 struct drm_i915_gem_exec_object2 *entry; 594 struct drm_i915_gem_exec_object2 *entry;
520 bool need_fence, need_mappable; 595 bool need_fence, need_mappable;
521 596
522 obj = list_first_entry(objects, 597 vma = list_first_entry(vmas, struct i915_vma, exec_list);
523 struct drm_i915_gem_object, 598 obj = vma->obj;
524 exec_list); 599 entry = vma->exec_entry;
525 entry = obj->exec_entry;
526 600
527 need_fence = 601 need_fence =
528 has_fenced_gpu_access && 602 has_fenced_gpu_access &&
529 entry->flags & EXEC_OBJECT_NEEDS_FENCE && 603 entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
530 obj->tiling_mode != I915_TILING_NONE; 604 obj->tiling_mode != I915_TILING_NONE;
531 need_mappable = need_fence || need_reloc_mappable(obj); 605 need_mappable = need_fence || need_reloc_mappable(vma);
532 606
533 if (need_mappable) 607 if (need_mappable)
534 list_move(&obj->exec_list, &ordered_objects); 608 list_move(&vma->exec_list, &ordered_vmas);
535 else 609 else
536 list_move_tail(&obj->exec_list, &ordered_objects); 610 list_move_tail(&vma->exec_list, &ordered_vmas);
537 611
538 obj->base.pending_read_domains = I915_GEM_GPU_DOMAINS & ~I915_GEM_DOMAIN_COMMAND; 612 obj->base.pending_read_domains = I915_GEM_GPU_DOMAINS & ~I915_GEM_DOMAIN_COMMAND;
539 obj->base.pending_write_domain = 0; 613 obj->base.pending_write_domain = 0;
540 obj->pending_fenced_gpu_access = false; 614 obj->pending_fenced_gpu_access = false;
541 } 615 }
542 list_splice(&ordered_objects, objects); 616 list_splice(&ordered_vmas, vmas);
543 617
544 /* Attempt to pin all of the buffers into the GTT. 618 /* Attempt to pin all of the buffers into the GTT.
545 * This is done in 3 phases: 619 * This is done in 3 phases:
@@ -558,52 +632,52 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
558 int ret = 0; 632 int ret = 0;
559 633
560 /* Unbind any ill-fitting objects or pin. */ 634 /* Unbind any ill-fitting objects or pin. */
561 list_for_each_entry(obj, objects, exec_list) { 635 list_for_each_entry(vma, vmas, exec_list) {
562 struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; 636 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
563 bool need_fence, need_mappable; 637 bool need_fence, need_mappable;
564 u32 obj_offset;
565 638
566 if (!i915_gem_obj_bound(obj, vm)) 639 obj = vma->obj;
640
641 if (!drm_mm_node_allocated(&vma->node))
567 continue; 642 continue;
568 643
569 obj_offset = i915_gem_obj_offset(obj, vm);
570 need_fence = 644 need_fence =
571 has_fenced_gpu_access && 645 has_fenced_gpu_access &&
572 entry->flags & EXEC_OBJECT_NEEDS_FENCE && 646 entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
573 obj->tiling_mode != I915_TILING_NONE; 647 obj->tiling_mode != I915_TILING_NONE;
574 need_mappable = need_fence || need_reloc_mappable(obj); 648 need_mappable = need_fence || need_reloc_mappable(vma);
575 649
576 WARN_ON((need_mappable || need_fence) && 650 WARN_ON((need_mappable || need_fence) &&
577 !i915_is_ggtt(vm)); 651 !i915_is_ggtt(vma->vm));
578 652
579 if ((entry->alignment && 653 if ((entry->alignment &&
580 obj_offset & (entry->alignment - 1)) || 654 vma->node.start & (entry->alignment - 1)) ||
581 (need_mappable && !obj->map_and_fenceable)) 655 (need_mappable && !obj->map_and_fenceable))
582 ret = i915_vma_unbind(i915_gem_obj_to_vma(obj, vm)); 656 ret = i915_vma_unbind(vma);
583 else 657 else
584 ret = i915_gem_execbuffer_reserve_object(obj, ring, vm, need_relocs); 658 ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
585 if (ret) 659 if (ret)
586 goto err; 660 goto err;
587 } 661 }
588 662
589 /* Bind fresh objects */ 663 /* Bind fresh objects */
590 list_for_each_entry(obj, objects, exec_list) { 664 list_for_each_entry(vma, vmas, exec_list) {
591 if (i915_gem_obj_bound(obj, vm)) 665 if (drm_mm_node_allocated(&vma->node))
592 continue; 666 continue;
593 667
594 ret = i915_gem_execbuffer_reserve_object(obj, ring, vm, need_relocs); 668 ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
595 if (ret) 669 if (ret)
596 goto err; 670 goto err;
597 } 671 }
598 672
599err: /* Decrement pin count for bound objects */ 673err: /* Decrement pin count for bound objects */
600 list_for_each_entry(obj, objects, exec_list) 674 list_for_each_entry(vma, vmas, exec_list)
601 i915_gem_execbuffer_unreserve_object(obj); 675 i915_gem_execbuffer_unreserve_vma(vma);
602 676
603 if (ret != -ENOSPC || retry++) 677 if (ret != -ENOSPC || retry++)
604 return ret; 678 return ret;
605 679
606 ret = i915_gem_evict_everything(ring->dev); 680 ret = i915_gem_evict_vm(vm, true);
607 if (ret) 681 if (ret)
608 return ret; 682 return ret;
609 } while (1); 683 } while (1);
@@ -614,24 +688,27 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
614 struct drm_i915_gem_execbuffer2 *args, 688 struct drm_i915_gem_execbuffer2 *args,
615 struct drm_file *file, 689 struct drm_file *file,
616 struct intel_ring_buffer *ring, 690 struct intel_ring_buffer *ring,
617 struct eb_objects *eb, 691 struct eb_vmas *eb,
618 struct drm_i915_gem_exec_object2 *exec, 692 struct drm_i915_gem_exec_object2 *exec)
619 struct i915_address_space *vm)
620{ 693{
621 struct drm_i915_gem_relocation_entry *reloc; 694 struct drm_i915_gem_relocation_entry *reloc;
622 struct drm_i915_gem_object *obj; 695 struct i915_address_space *vm;
696 struct i915_vma *vma;
623 bool need_relocs; 697 bool need_relocs;
624 int *reloc_offset; 698 int *reloc_offset;
625 int i, total, ret; 699 int i, total, ret;
626 int count = args->buffer_count; 700 unsigned count = args->buffer_count;
701
702 if (WARN_ON(list_empty(&eb->vmas)))
703 return 0;
704
705 vm = list_first_entry(&eb->vmas, struct i915_vma, exec_list)->vm;
627 706
628 /* We may process another execbuffer during the unlock... */ 707 /* We may process another execbuffer during the unlock... */
629 while (!list_empty(&eb->objects)) { 708 while (!list_empty(&eb->vmas)) {
630 obj = list_first_entry(&eb->objects, 709 vma = list_first_entry(&eb->vmas, struct i915_vma, exec_list);
631 struct drm_i915_gem_object, 710 list_del_init(&vma->exec_list);
632 exec_list); 711 drm_gem_object_unreference(&vma->obj->base);
633 list_del_init(&obj->exec_list);
634 drm_gem_object_unreference(&obj->base);
635 } 712 }
636 713
637 mutex_unlock(&dev->struct_mutex); 714 mutex_unlock(&dev->struct_mutex);
@@ -695,20 +772,19 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
695 772
696 /* reacquire the objects */ 773 /* reacquire the objects */
697 eb_reset(eb); 774 eb_reset(eb);
698 ret = eb_lookup_objects(eb, exec, args, file); 775 ret = eb_lookup_vmas(eb, exec, args, vm, file);
699 if (ret) 776 if (ret)
700 goto err; 777 goto err;
701 778
702 need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0; 779 need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
703 ret = i915_gem_execbuffer_reserve(ring, &eb->objects, vm, &need_relocs); 780 ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, &need_relocs);
704 if (ret) 781 if (ret)
705 goto err; 782 goto err;
706 783
707 list_for_each_entry(obj, &eb->objects, exec_list) { 784 list_for_each_entry(vma, &eb->vmas, exec_list) {
708 int offset = obj->exec_entry - exec; 785 int offset = vma->exec_entry - exec;
709 ret = i915_gem_execbuffer_relocate_object_slow(obj, eb, 786 ret = i915_gem_execbuffer_relocate_vma_slow(vma, eb,
710 reloc + reloc_offset[offset], 787 reloc + reloc_offset[offset]);
711 vm);
712 if (ret) 788 if (ret)
713 goto err; 789 goto err;
714 } 790 }
@@ -727,14 +803,15 @@ err:
727 803
728static int 804static int
729i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring, 805i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
730 struct list_head *objects) 806 struct list_head *vmas)
731{ 807{
732 struct drm_i915_gem_object *obj; 808 struct i915_vma *vma;
733 uint32_t flush_domains = 0; 809 uint32_t flush_domains = 0;
734 bool flush_chipset = false; 810 bool flush_chipset = false;
735 int ret; 811 int ret;
736 812
737 list_for_each_entry(obj, objects, exec_list) { 813 list_for_each_entry(vma, vmas, exec_list) {
814 struct drm_i915_gem_object *obj = vma->obj;
738 ret = i915_gem_object_sync(obj, ring); 815 ret = i915_gem_object_sync(obj, ring);
739 if (ret) 816 if (ret)
740 return ret; 817 return ret;
@@ -771,8 +848,8 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
771 int count) 848 int count)
772{ 849{
773 int i; 850 int i;
774 int relocs_total = 0; 851 unsigned relocs_total = 0;
775 int relocs_max = INT_MAX / sizeof(struct drm_i915_gem_relocation_entry); 852 unsigned relocs_max = UINT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
776 853
777 for (i = 0; i < count; i++) { 854 for (i = 0; i < count; i++) {
778 char __user *ptr = to_user_ptr(exec[i].relocs_ptr); 855 char __user *ptr = to_user_ptr(exec[i].relocs_ptr);
@@ -809,13 +886,13 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
809} 886}
810 887
811static void 888static void
812i915_gem_execbuffer_move_to_active(struct list_head *objects, 889i915_gem_execbuffer_move_to_active(struct list_head *vmas,
813 struct i915_address_space *vm,
814 struct intel_ring_buffer *ring) 890 struct intel_ring_buffer *ring)
815{ 891{
816 struct drm_i915_gem_object *obj; 892 struct i915_vma *vma;
817 893
818 list_for_each_entry(obj, objects, exec_list) { 894 list_for_each_entry(vma, vmas, exec_list) {
895 struct drm_i915_gem_object *obj = vma->obj;
819 u32 old_read = obj->base.read_domains; 896 u32 old_read = obj->base.read_domains;
820 u32 old_write = obj->base.write_domain; 897 u32 old_write = obj->base.write_domain;
821 898
@@ -825,9 +902,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects,
825 obj->base.read_domains = obj->base.pending_read_domains; 902 obj->base.read_domains = obj->base.pending_read_domains;
826 obj->fenced_gpu_access = obj->pending_fenced_gpu_access; 903 obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
827 904
828 /* FIXME: This lookup gets fixed later <-- danvet */ 905 i915_vma_move_to_active(vma, ring);
829 list_move_tail(&i915_gem_obj_to_vma(obj, vm)->mm_list, &vm->active_list);
830 i915_gem_object_move_to_active(obj, ring);
831 if (obj->base.write_domain) { 906 if (obj->base.write_domain) {
832 obj->dirty = 1; 907 obj->dirty = 1;
833 obj->last_write_seqno = intel_ring_get_seqno(ring); 908 obj->last_write_seqno = intel_ring_get_seqno(ring);
@@ -885,10 +960,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
885 struct i915_address_space *vm) 960 struct i915_address_space *vm)
886{ 961{
887 drm_i915_private_t *dev_priv = dev->dev_private; 962 drm_i915_private_t *dev_priv = dev->dev_private;
888 struct eb_objects *eb; 963 struct eb_vmas *eb;
889 struct drm_i915_gem_object *batch_obj; 964 struct drm_i915_gem_object *batch_obj;
890 struct drm_clip_rect *cliprects = NULL; 965 struct drm_clip_rect *cliprects = NULL;
891 struct intel_ring_buffer *ring; 966 struct intel_ring_buffer *ring;
967 struct i915_ctx_hang_stats *hs;
892 u32 ctx_id = i915_execbuffer2_get_context_id(*args); 968 u32 ctx_id = i915_execbuffer2_get_context_id(*args);
893 u32 exec_start, exec_len; 969 u32 exec_start, exec_len;
894 u32 mask, flags; 970 u32 mask, flags;
@@ -1000,7 +1076,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1000 return -EINVAL; 1076 return -EINVAL;
1001 } 1077 }
1002 1078
1003 cliprects = kmalloc(args->num_cliprects * sizeof(*cliprects), 1079 cliprects = kcalloc(args->num_cliprects,
1080 sizeof(*cliprects),
1004 GFP_KERNEL); 1081 GFP_KERNEL);
1005 if (cliprects == NULL) { 1082 if (cliprects == NULL) {
1006 ret = -ENOMEM; 1083 ret = -ENOMEM;
@@ -1025,7 +1102,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1025 goto pre_mutex_err; 1102 goto pre_mutex_err;
1026 } 1103 }
1027 1104
1028 eb = eb_create(args); 1105 eb = eb_create(args, vm);
1029 if (eb == NULL) { 1106 if (eb == NULL) {
1030 mutex_unlock(&dev->struct_mutex); 1107 mutex_unlock(&dev->struct_mutex);
1031 ret = -ENOMEM; 1108 ret = -ENOMEM;
@@ -1033,18 +1110,16 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1033 } 1110 }
1034 1111
1035 /* Look up object handles */ 1112 /* Look up object handles */
1036 ret = eb_lookup_objects(eb, exec, args, file); 1113 ret = eb_lookup_vmas(eb, exec, args, vm, file);
1037 if (ret) 1114 if (ret)
1038 goto err; 1115 goto err;
1039 1116
1040 /* take note of the batch buffer before we might reorder the lists */ 1117 /* take note of the batch buffer before we might reorder the lists */
1041 batch_obj = list_entry(eb->objects.prev, 1118 batch_obj = list_entry(eb->vmas.prev, struct i915_vma, exec_list)->obj;
1042 struct drm_i915_gem_object,
1043 exec_list);
1044 1119
1045 /* Move the objects en-masse into the GTT, evicting if necessary. */ 1120 /* Move the objects en-masse into the GTT, evicting if necessary. */
1046 need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0; 1121 need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
1047 ret = i915_gem_execbuffer_reserve(ring, &eb->objects, vm, &need_relocs); 1122 ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, &need_relocs);
1048 if (ret) 1123 if (ret)
1049 goto err; 1124 goto err;
1050 1125
@@ -1054,7 +1129,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1054 if (ret) { 1129 if (ret) {
1055 if (ret == -EFAULT) { 1130 if (ret == -EFAULT) {
1056 ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring, 1131 ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring,
1057 eb, exec, vm); 1132 eb, exec);
1058 BUG_ON(!mutex_is_locked(&dev->struct_mutex)); 1133 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1059 } 1134 }
1060 if (ret) 1135 if (ret)
@@ -1071,15 +1146,25 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1071 1146
1072 /* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure 1147 /* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
1073 * batch" bit. Hence we need to pin secure batches into the global gtt. 1148 * batch" bit. Hence we need to pin secure batches into the global gtt.
1074 * hsw should have this fixed, but let's be paranoid and do it 1149 * hsw should have this fixed, but bdw mucks it up again. */
1075 * unconditionally for now. */
1076 if (flags & I915_DISPATCH_SECURE && !batch_obj->has_global_gtt_mapping) 1150 if (flags & I915_DISPATCH_SECURE && !batch_obj->has_global_gtt_mapping)
1077 i915_gem_gtt_bind_object(batch_obj, batch_obj->cache_level); 1151 i915_gem_gtt_bind_object(batch_obj, batch_obj->cache_level);
1078 1152
1079 ret = i915_gem_execbuffer_move_to_gpu(ring, &eb->objects); 1153 ret = i915_gem_execbuffer_move_to_gpu(ring, &eb->vmas);
1080 if (ret) 1154 if (ret)
1081 goto err; 1155 goto err;
1082 1156
1157 hs = i915_gem_context_get_hang_stats(dev, file, ctx_id);
1158 if (IS_ERR(hs)) {
1159 ret = PTR_ERR(hs);
1160 goto err;
1161 }
1162
1163 if (hs->banned) {
1164 ret = -EIO;
1165 goto err;
1166 }
1167
1083 ret = i915_switch_context(ring, file, ctx_id); 1168 ret = i915_switch_context(ring, file, ctx_id);
1084 if (ret) 1169 if (ret)
1085 goto err; 1170 goto err;
@@ -1131,7 +1216,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1131 1216
1132 trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags); 1217 trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);
1133 1218
1134 i915_gem_execbuffer_move_to_active(&eb->objects, vm, ring); 1219 i915_gem_execbuffer_move_to_active(&eb->vmas, ring);
1135 i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj); 1220 i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
1136 1221
1137err: 1222err:
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 1f7b4caefb6e..3620a1b0a73c 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -30,6 +30,8 @@
30 30
31#define GEN6_PPGTT_PD_ENTRIES 512 31#define GEN6_PPGTT_PD_ENTRIES 512
32#define I915_PPGTT_PT_ENTRIES (PAGE_SIZE / sizeof(gen6_gtt_pte_t)) 32#define I915_PPGTT_PT_ENTRIES (PAGE_SIZE / sizeof(gen6_gtt_pte_t))
33typedef uint64_t gen8_gtt_pte_t;
34typedef gen8_gtt_pte_t gen8_ppgtt_pde_t;
33 35
34/* PPGTT stuff */ 36/* PPGTT stuff */
35#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0)) 37#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
@@ -57,6 +59,41 @@
57#define HSW_WB_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0xb) 59#define HSW_WB_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0xb)
58#define HSW_WT_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x6) 60#define HSW_WT_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x6)
59 61
62#define GEN8_PTES_PER_PAGE (PAGE_SIZE / sizeof(gen8_gtt_pte_t))
63#define GEN8_PDES_PER_PAGE (PAGE_SIZE / sizeof(gen8_ppgtt_pde_t))
64#define GEN8_LEGACY_PDPS 4
65
66#define PPAT_UNCACHED_INDEX (_PAGE_PWT | _PAGE_PCD)
67#define PPAT_CACHED_PDE_INDEX 0 /* WB LLC */
68#define PPAT_CACHED_INDEX _PAGE_PAT /* WB LLCeLLC */
69#define PPAT_DISPLAY_ELLC_INDEX _PAGE_PCD /* WT eLLC */
70
71static inline gen8_gtt_pte_t gen8_pte_encode(dma_addr_t addr,
72 enum i915_cache_level level,
73 bool valid)
74{
75 gen8_gtt_pte_t pte = valid ? _PAGE_PRESENT | _PAGE_RW : 0;
76 pte |= addr;
77 if (level != I915_CACHE_NONE)
78 pte |= PPAT_CACHED_INDEX;
79 else
80 pte |= PPAT_UNCACHED_INDEX;
81 return pte;
82}
83
84static inline gen8_ppgtt_pde_t gen8_pde_encode(struct drm_device *dev,
85 dma_addr_t addr,
86 enum i915_cache_level level)
87{
88 gen8_ppgtt_pde_t pde = _PAGE_PRESENT | _PAGE_RW;
89 pde |= addr;
90 if (level != I915_CACHE_NONE)
91 pde |= PPAT_CACHED_PDE_INDEX;
92 else
93 pde |= PPAT_UNCACHED_INDEX;
94 return pde;
95}
96
60static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr, 97static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr,
61 enum i915_cache_level level, 98 enum i915_cache_level level,
62 bool valid) 99 bool valid)
@@ -158,6 +195,257 @@ static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr,
158 return pte; 195 return pte;
159} 196}
160 197
198/* Broadwell Page Directory Pointer Descriptors */
199static int gen8_write_pdp(struct intel_ring_buffer *ring, unsigned entry,
200 uint64_t val)
201{
202 int ret;
203
204 BUG_ON(entry >= 4);
205
206 ret = intel_ring_begin(ring, 6);
207 if (ret)
208 return ret;
209
210 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
211 intel_ring_emit(ring, GEN8_RING_PDP_UDW(ring, entry));
212 intel_ring_emit(ring, (u32)(val >> 32));
213 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
214 intel_ring_emit(ring, GEN8_RING_PDP_LDW(ring, entry));
215 intel_ring_emit(ring, (u32)(val));
216 intel_ring_advance(ring);
217
218 return 0;
219}
220
221static int gen8_ppgtt_enable(struct drm_device *dev)
222{
223 struct drm_i915_private *dev_priv = dev->dev_private;
224 struct intel_ring_buffer *ring;
225 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
226 int i, j, ret;
227
228 /* bit of a hack to find the actual last used pd */
229 int used_pd = ppgtt->num_pd_entries / GEN8_PDES_PER_PAGE;
230
231 for_each_ring(ring, dev_priv, j) {
232 I915_WRITE(RING_MODE_GEN7(ring),
233 _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
234 }
235
236 for (i = used_pd - 1; i >= 0; i--) {
237 dma_addr_t addr = ppgtt->pd_dma_addr[i];
238 for_each_ring(ring, dev_priv, j) {
239 ret = gen8_write_pdp(ring, i, addr);
240 if (ret)
241 return ret;
242 }
243 }
244 return 0;
245}
246
247static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
248 unsigned first_entry,
249 unsigned num_entries,
250 bool use_scratch)
251{
252 struct i915_hw_ppgtt *ppgtt =
253 container_of(vm, struct i915_hw_ppgtt, base);
254 gen8_gtt_pte_t *pt_vaddr, scratch_pte;
255 unsigned act_pt = first_entry / GEN8_PTES_PER_PAGE;
256 unsigned first_pte = first_entry % GEN8_PTES_PER_PAGE;
257 unsigned last_pte, i;
258
259 scratch_pte = gen8_pte_encode(ppgtt->base.scratch.addr,
260 I915_CACHE_LLC, use_scratch);
261
262 while (num_entries) {
263 struct page *page_table = &ppgtt->gen8_pt_pages[act_pt];
264
265 last_pte = first_pte + num_entries;
266 if (last_pte > GEN8_PTES_PER_PAGE)
267 last_pte = GEN8_PTES_PER_PAGE;
268
269 pt_vaddr = kmap_atomic(page_table);
270
271 for (i = first_pte; i < last_pte; i++)
272 pt_vaddr[i] = scratch_pte;
273
274 kunmap_atomic(pt_vaddr);
275
276 num_entries -= last_pte - first_pte;
277 first_pte = 0;
278 act_pt++;
279 }
280}
281
282static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
283 struct sg_table *pages,
284 unsigned first_entry,
285 enum i915_cache_level cache_level)
286{
287 struct i915_hw_ppgtt *ppgtt =
288 container_of(vm, struct i915_hw_ppgtt, base);
289 gen8_gtt_pte_t *pt_vaddr;
290 unsigned act_pt = first_entry / GEN8_PTES_PER_PAGE;
291 unsigned act_pte = first_entry % GEN8_PTES_PER_PAGE;
292 struct sg_page_iter sg_iter;
293
294 pt_vaddr = kmap_atomic(&ppgtt->gen8_pt_pages[act_pt]);
295 for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
296 dma_addr_t page_addr;
297
298 page_addr = sg_dma_address(sg_iter.sg) +
299 (sg_iter.sg_pgoffset << PAGE_SHIFT);
300 pt_vaddr[act_pte] = gen8_pte_encode(page_addr, cache_level,
301 true);
302 if (++act_pte == GEN8_PTES_PER_PAGE) {
303 kunmap_atomic(pt_vaddr);
304 act_pt++;
305 pt_vaddr = kmap_atomic(&ppgtt->gen8_pt_pages[act_pt]);
306 act_pte = 0;
307
308 }
309 }
310 kunmap_atomic(pt_vaddr);
311}
312
313static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
314{
315 struct i915_hw_ppgtt *ppgtt =
316 container_of(vm, struct i915_hw_ppgtt, base);
317 int i, j;
318
319 for (i = 0; i < ppgtt->num_pd_pages ; i++) {
320 if (ppgtt->pd_dma_addr[i]) {
321 pci_unmap_page(ppgtt->base.dev->pdev,
322 ppgtt->pd_dma_addr[i],
323 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
324
325 for (j = 0; j < GEN8_PDES_PER_PAGE; j++) {
326 dma_addr_t addr = ppgtt->gen8_pt_dma_addr[i][j];
327 if (addr)
328 pci_unmap_page(ppgtt->base.dev->pdev,
329 addr,
330 PAGE_SIZE,
331 PCI_DMA_BIDIRECTIONAL);
332
333 }
334 }
335 kfree(ppgtt->gen8_pt_dma_addr[i]);
336 }
337
338 __free_pages(ppgtt->gen8_pt_pages, ppgtt->num_pt_pages << PAGE_SHIFT);
339 __free_pages(ppgtt->pd_pages, ppgtt->num_pd_pages << PAGE_SHIFT);
340}
341
342/**
343 * GEN8 legacy ppgtt programming is accomplished through 4 PDP registers with a
344 * net effect resembling a 2-level page table in normal x86 terms. Each PDP
345 * represents 1GB of memory
346 * 4 * 512 * 512 * 4096 = 4GB legacy 32b address space.
347 *
348 * TODO: Do something with the size parameter
349 **/
350static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size)
351{
352 struct page *pt_pages;
353 int i, j, ret = -ENOMEM;
354 const int max_pdp = DIV_ROUND_UP(size, 1 << 30);
355 const int num_pt_pages = GEN8_PDES_PER_PAGE * max_pdp;
356
357 if (size % (1<<30))
358 DRM_INFO("Pages will be wasted unless GTT size (%llu) is divisible by 1GB\n", size);
359
360 /* FIXME: split allocation into smaller pieces. For now we only ever do
361 * this once, but with full PPGTT, the multiple contiguous allocations
362 * will be bad.
363 */
364 ppgtt->pd_pages = alloc_pages(GFP_KERNEL, get_order(max_pdp << PAGE_SHIFT));
365 if (!ppgtt->pd_pages)
366 return -ENOMEM;
367
368 pt_pages = alloc_pages(GFP_KERNEL, get_order(num_pt_pages << PAGE_SHIFT));
369 if (!pt_pages) {
370 __free_pages(ppgtt->pd_pages, get_order(max_pdp << PAGE_SHIFT));
371 return -ENOMEM;
372 }
373
374 ppgtt->gen8_pt_pages = pt_pages;
375 ppgtt->num_pd_pages = 1 << get_order(max_pdp << PAGE_SHIFT);
376 ppgtt->num_pt_pages = 1 << get_order(num_pt_pages << PAGE_SHIFT);
377 ppgtt->num_pd_entries = max_pdp * GEN8_PDES_PER_PAGE;
378 ppgtt->enable = gen8_ppgtt_enable;
379 ppgtt->base.clear_range = gen8_ppgtt_clear_range;
380 ppgtt->base.insert_entries = gen8_ppgtt_insert_entries;
381 ppgtt->base.cleanup = gen8_ppgtt_cleanup;
382
383 BUG_ON(ppgtt->num_pd_pages > GEN8_LEGACY_PDPS);
384
385 /*
386 * - Create a mapping for the page directories.
387 * - For each page directory:
388 * allocate space for page table mappings.
389 * map each page table
390 */
391 for (i = 0; i < max_pdp; i++) {
392 dma_addr_t temp;
393 temp = pci_map_page(ppgtt->base.dev->pdev,
394 &ppgtt->pd_pages[i], 0,
395 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
396 if (pci_dma_mapping_error(ppgtt->base.dev->pdev, temp))
397 goto err_out;
398
399 ppgtt->pd_dma_addr[i] = temp;
400
401 ppgtt->gen8_pt_dma_addr[i] = kmalloc(sizeof(dma_addr_t) * GEN8_PDES_PER_PAGE, GFP_KERNEL);
402 if (!ppgtt->gen8_pt_dma_addr[i])
403 goto err_out;
404
405 for (j = 0; j < GEN8_PDES_PER_PAGE; j++) {
406 struct page *p = &pt_pages[i * GEN8_PDES_PER_PAGE + j];
407 temp = pci_map_page(ppgtt->base.dev->pdev,
408 p, 0, PAGE_SIZE,
409 PCI_DMA_BIDIRECTIONAL);
410
411 if (pci_dma_mapping_error(ppgtt->base.dev->pdev, temp))
412 goto err_out;
413
414 ppgtt->gen8_pt_dma_addr[i][j] = temp;
415 }
416 }
417
418 /* For now, the PPGTT helper functions all require that the PDEs are
419 * plugged in correctly. So we do that now/here. For aliasing PPGTT, we
420 * will never need to touch the PDEs again */
421 for (i = 0; i < max_pdp; i++) {
422 gen8_ppgtt_pde_t *pd_vaddr;
423 pd_vaddr = kmap_atomic(&ppgtt->pd_pages[i]);
424 for (j = 0; j < GEN8_PDES_PER_PAGE; j++) {
425 dma_addr_t addr = ppgtt->gen8_pt_dma_addr[i][j];
426 pd_vaddr[j] = gen8_pde_encode(ppgtt->base.dev, addr,
427 I915_CACHE_LLC);
428 }
429 kunmap_atomic(pd_vaddr);
430 }
431
432 ppgtt->base.clear_range(&ppgtt->base, 0,
433 ppgtt->num_pd_entries * GEN8_PTES_PER_PAGE,
434 true);
435
436 DRM_DEBUG_DRIVER("Allocated %d pages for page directories (%d wasted)\n",
437 ppgtt->num_pd_pages, ppgtt->num_pd_pages - max_pdp);
438 DRM_DEBUG_DRIVER("Allocated %d pages for page tables (%lld wasted)\n",
439 ppgtt->num_pt_pages,
440 (ppgtt->num_pt_pages - num_pt_pages) +
441 size % (1<<30));
442 return 0;
443
444err_out:
445 ppgtt->base.cleanup(&ppgtt->base);
446 return ret;
447}
448
161static void gen6_write_pdes(struct i915_hw_ppgtt *ppgtt) 449static void gen6_write_pdes(struct i915_hw_ppgtt *ppgtt)
162{ 450{
163 struct drm_i915_private *dev_priv = ppgtt->base.dev->dev_private; 451 struct drm_i915_private *dev_priv = ppgtt->base.dev->dev_private;
@@ -342,7 +630,7 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
342 ppgtt->base.insert_entries = gen6_ppgtt_insert_entries; 630 ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
343 ppgtt->base.cleanup = gen6_ppgtt_cleanup; 631 ppgtt->base.cleanup = gen6_ppgtt_cleanup;
344 ppgtt->base.scratch = dev_priv->gtt.base.scratch; 632 ppgtt->base.scratch = dev_priv->gtt.base.scratch;
345 ppgtt->pt_pages = kzalloc(sizeof(struct page *)*ppgtt->num_pd_entries, 633 ppgtt->pt_pages = kcalloc(ppgtt->num_pd_entries, sizeof(struct page *),
346 GFP_KERNEL); 634 GFP_KERNEL);
347 if (!ppgtt->pt_pages) 635 if (!ppgtt->pt_pages)
348 return -ENOMEM; 636 return -ENOMEM;
@@ -353,7 +641,7 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
353 goto err_pt_alloc; 641 goto err_pt_alloc;
354 } 642 }
355 643
356 ppgtt->pt_dma_addr = kzalloc(sizeof(dma_addr_t) *ppgtt->num_pd_entries, 644 ppgtt->pt_dma_addr = kcalloc(ppgtt->num_pd_entries, sizeof(dma_addr_t),
357 GFP_KERNEL); 645 GFP_KERNEL);
358 if (!ppgtt->pt_dma_addr) 646 if (!ppgtt->pt_dma_addr)
359 goto err_pt_alloc; 647 goto err_pt_alloc;
@@ -410,6 +698,8 @@ static int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
410 698
411 if (INTEL_INFO(dev)->gen < 8) 699 if (INTEL_INFO(dev)->gen < 8)
412 ret = gen6_ppgtt_init(ppgtt); 700 ret = gen6_ppgtt_init(ppgtt);
701 else if (IS_GEN8(dev))
702 ret = gen8_ppgtt_init(ppgtt, dev_priv->gtt.base.total);
413 else 703 else
414 BUG(); 704 BUG();
415 705
@@ -573,6 +863,57 @@ int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
573 return 0; 863 return 0;
574} 864}
575 865
866static inline void gen8_set_pte(void __iomem *addr, gen8_gtt_pte_t pte)
867{
868#ifdef writeq
869 writeq(pte, addr);
870#else
871 iowrite32((u32)pte, addr);
872 iowrite32(pte >> 32, addr + 4);
873#endif
874}
875
876static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
877 struct sg_table *st,
878 unsigned int first_entry,
879 enum i915_cache_level level)
880{
881 struct drm_i915_private *dev_priv = vm->dev->dev_private;
882 gen8_gtt_pte_t __iomem *gtt_entries =
883 (gen8_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
884 int i = 0;
885 struct sg_page_iter sg_iter;
886 dma_addr_t addr;
887
888 for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
889 addr = sg_dma_address(sg_iter.sg) +
890 (sg_iter.sg_pgoffset << PAGE_SHIFT);
891 gen8_set_pte(&gtt_entries[i],
892 gen8_pte_encode(addr, level, true));
893 i++;
894 }
895
896 /*
897 * XXX: This serves as a posting read to make sure that the PTE has
898 * actually been updated. There is some concern that even though
899 * registers and PTEs are within the same BAR that they are potentially
900 * of NUMA access patterns. Therefore, even with the way we assume
901 * hardware should work, we must keep this posting read for paranoia.
902 */
903 if (i != 0)
904 WARN_ON(readq(&gtt_entries[i-1])
905 != gen8_pte_encode(addr, level, true));
906
907#if 0 /* TODO: Still needed on GEN8? */
908 /* This next bit makes the above posting read even more important. We
909 * want to flush the TLBs only after we're certain all the PTE updates
910 * have finished.
911 */
912 I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
913 POSTING_READ(GFX_FLSH_CNTL_GEN6);
914#endif
915}
916
576/* 917/*
577 * Binds an object into the global gtt with the specified cache level. The object 918 * Binds an object into the global gtt with the specified cache level. The object
578 * will be accessible to the GPU via commands whose operands reference offsets 919 * will be accessible to the GPU via commands whose operands reference offsets
@@ -615,6 +956,30 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
615 POSTING_READ(GFX_FLSH_CNTL_GEN6); 956 POSTING_READ(GFX_FLSH_CNTL_GEN6);
616} 957}
617 958
959static void gen8_ggtt_clear_range(struct i915_address_space *vm,
960 unsigned int first_entry,
961 unsigned int num_entries,
962 bool use_scratch)
963{
964 struct drm_i915_private *dev_priv = vm->dev->dev_private;
965 gen8_gtt_pte_t scratch_pte, __iomem *gtt_base =
966 (gen8_gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
967 const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry;
968 int i;
969
970 if (WARN(num_entries > max_entries,
971 "First entry = %d; Num entries = %d (max=%d)\n",
972 first_entry, num_entries, max_entries))
973 num_entries = max_entries;
974
975 scratch_pte = gen8_pte_encode(vm->scratch.addr,
976 I915_CACHE_LLC,
977 use_scratch);
978 for (i = 0; i < num_entries; i++)
979 gen8_set_pte(&gtt_base[i], scratch_pte);
980 readl(gtt_base);
981}
982
618static void gen6_ggtt_clear_range(struct i915_address_space *vm, 983static void gen6_ggtt_clear_range(struct i915_address_space *vm,
619 unsigned int first_entry, 984 unsigned int first_entry,
620 unsigned int num_entries, 985 unsigned int num_entries,
@@ -638,7 +1003,6 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm,
638 readl(gtt_base); 1003 readl(gtt_base);
639} 1004}
640 1005
641
642static void i915_ggtt_insert_entries(struct i915_address_space *vm, 1006static void i915_ggtt_insert_entries(struct i915_address_space *vm,
643 struct sg_table *st, 1007 struct sg_table *st,
644 unsigned int pg_start, 1008 unsigned int pg_start,
@@ -720,6 +1084,7 @@ static void i915_gtt_color_adjust(struct drm_mm_node *node,
720 *end -= 4096; 1084 *end -= 4096;
721 } 1085 }
722} 1086}
1087
723void i915_gem_setup_global_gtt(struct drm_device *dev, 1088void i915_gem_setup_global_gtt(struct drm_device *dev,
724 unsigned long start, 1089 unsigned long start,
725 unsigned long mappable_end, 1090 unsigned long mappable_end,
@@ -817,7 +1182,8 @@ void i915_gem_init_global_gtt(struct drm_device *dev)
817 1182
818 DRM_ERROR("Aliased PPGTT setup failed %d\n", ret); 1183 DRM_ERROR("Aliased PPGTT setup failed %d\n", ret);
819 drm_mm_takedown(&dev_priv->gtt.base.mm); 1184 drm_mm_takedown(&dev_priv->gtt.base.mm);
820 gtt_size += GEN6_PPGTT_PD_ENTRIES * PAGE_SIZE; 1185 if (INTEL_INFO(dev)->gen < 8)
1186 gtt_size += GEN6_PPGTT_PD_ENTRIES*PAGE_SIZE;
821 } 1187 }
822 i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size); 1188 i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
823} 1189}
@@ -867,6 +1233,15 @@ static inline unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
867 return snb_gmch_ctl << 20; 1233 return snb_gmch_ctl << 20;
868} 1234}
869 1235
1236static inline unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl)
1237{
1238 bdw_gmch_ctl >>= BDW_GMCH_GGMS_SHIFT;
1239 bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK;
1240 if (bdw_gmch_ctl)
1241 bdw_gmch_ctl = 1 << bdw_gmch_ctl;
1242 return bdw_gmch_ctl << 20;
1243}
1244
870static inline size_t gen6_get_stolen_size(u16 snb_gmch_ctl) 1245static inline size_t gen6_get_stolen_size(u16 snb_gmch_ctl)
871{ 1246{
872 snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT; 1247 snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT;
@@ -874,6 +1249,108 @@ static inline size_t gen6_get_stolen_size(u16 snb_gmch_ctl)
874 return snb_gmch_ctl << 25; /* 32 MB units */ 1249 return snb_gmch_ctl << 25; /* 32 MB units */
875} 1250}
876 1251
1252static inline size_t gen8_get_stolen_size(u16 bdw_gmch_ctl)
1253{
1254 bdw_gmch_ctl >>= BDW_GMCH_GMS_SHIFT;
1255 bdw_gmch_ctl &= BDW_GMCH_GMS_MASK;
1256 return bdw_gmch_ctl << 25; /* 32 MB units */
1257}
1258
1259static int ggtt_probe_common(struct drm_device *dev,
1260 size_t gtt_size)
1261{
1262 struct drm_i915_private *dev_priv = dev->dev_private;
1263 phys_addr_t gtt_bus_addr;
1264 int ret;
1265
1266 /* For Modern GENs the PTEs and register space are split in the BAR */
1267 gtt_bus_addr = pci_resource_start(dev->pdev, 0) +
1268 (pci_resource_len(dev->pdev, 0) / 2);
1269
1270 dev_priv->gtt.gsm = ioremap_wc(gtt_bus_addr, gtt_size);
1271 if (!dev_priv->gtt.gsm) {
1272 DRM_ERROR("Failed to map the gtt page table\n");
1273 return -ENOMEM;
1274 }
1275
1276 ret = setup_scratch_page(dev);
1277 if (ret) {
1278 DRM_ERROR("Scratch setup failed\n");
1279 /* iounmap will also get called at remove, but meh */
1280 iounmap(dev_priv->gtt.gsm);
1281 }
1282
1283 return ret;
1284}
1285
1286/* The GGTT and PPGTT need a private PPAT setup in order to handle cacheability
1287 * bits. When using advanced contexts each context stores its own PAT, but
1288 * writing this data shouldn't be harmful even in those cases. */
1289static void gen8_setup_private_ppat(struct drm_i915_private *dev_priv)
1290{
1291#define GEN8_PPAT_UC (0<<0)
1292#define GEN8_PPAT_WC (1<<0)
1293#define GEN8_PPAT_WT (2<<0)
1294#define GEN8_PPAT_WB (3<<0)
1295#define GEN8_PPAT_ELLC_OVERRIDE (0<<2)
1296/* FIXME(BDW): Bspec is completely confused about cache control bits. */
1297#define GEN8_PPAT_LLC (1<<2)
1298#define GEN8_PPAT_LLCELLC (2<<2)
1299#define GEN8_PPAT_LLCeLLC (3<<2)
1300#define GEN8_PPAT_AGE(x) (x<<4)
1301#define GEN8_PPAT(i, x) ((uint64_t) (x) << ((i) * 8))
1302 uint64_t pat;
1303
1304 pat = GEN8_PPAT(0, GEN8_PPAT_WB | GEN8_PPAT_LLC) | /* for normal objects, no eLLC */
1305 GEN8_PPAT(1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC) | /* for something pointing to ptes? */
1306 GEN8_PPAT(2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC) | /* for scanout with eLLC */
1307 GEN8_PPAT(3, GEN8_PPAT_UC) | /* Uncached objects, mostly for scanout */
1308 GEN8_PPAT(4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)) |
1309 GEN8_PPAT(5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)) |
1310 GEN8_PPAT(6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)) |
1311 GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
1312
1313 /* XXX: spec defines this as 2 distinct registers. It's unclear if a 64b
1314 * write would work. */
1315 I915_WRITE(GEN8_PRIVATE_PAT, pat);
1316 I915_WRITE(GEN8_PRIVATE_PAT + 4, pat >> 32);
1317}
1318
1319static int gen8_gmch_probe(struct drm_device *dev,
1320 size_t *gtt_total,
1321 size_t *stolen,
1322 phys_addr_t *mappable_base,
1323 unsigned long *mappable_end)
1324{
1325 struct drm_i915_private *dev_priv = dev->dev_private;
1326 unsigned int gtt_size;
1327 u16 snb_gmch_ctl;
1328 int ret;
1329
1330 /* TODO: We're not aware of mappable constraints on gen8 yet */
1331 *mappable_base = pci_resource_start(dev->pdev, 2);
1332 *mappable_end = pci_resource_len(dev->pdev, 2);
1333
1334 if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(39)))
1335 pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(39));
1336
1337 pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
1338
1339 *stolen = gen8_get_stolen_size(snb_gmch_ctl);
1340
1341 gtt_size = gen8_get_total_gtt_size(snb_gmch_ctl);
1342 *gtt_total = (gtt_size / sizeof(gen8_gtt_pte_t)) << PAGE_SHIFT;
1343
1344 gen8_setup_private_ppat(dev_priv);
1345
1346 ret = ggtt_probe_common(dev, gtt_size);
1347
1348 dev_priv->gtt.base.clear_range = gen8_ggtt_clear_range;
1349 dev_priv->gtt.base.insert_entries = gen8_ggtt_insert_entries;
1350
1351 return ret;
1352}
1353
877static int gen6_gmch_probe(struct drm_device *dev, 1354static int gen6_gmch_probe(struct drm_device *dev,
878 size_t *gtt_total, 1355 size_t *gtt_total,
879 size_t *stolen, 1356 size_t *stolen,
@@ -881,7 +1358,6 @@ static int gen6_gmch_probe(struct drm_device *dev,
881 unsigned long *mappable_end) 1358 unsigned long *mappable_end)
882{ 1359{
883 struct drm_i915_private *dev_priv = dev->dev_private; 1360 struct drm_i915_private *dev_priv = dev->dev_private;
884 phys_addr_t gtt_bus_addr;
885 unsigned int gtt_size; 1361 unsigned int gtt_size;
886 u16 snb_gmch_ctl; 1362 u16 snb_gmch_ctl;
887 int ret; 1363 int ret;
@@ -901,24 +1377,13 @@ static int gen6_gmch_probe(struct drm_device *dev,
901 if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(40))) 1377 if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(40)))
902 pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(40)); 1378 pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(40));
903 pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl); 1379 pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
904 gtt_size = gen6_get_total_gtt_size(snb_gmch_ctl);
905 1380
906 *stolen = gen6_get_stolen_size(snb_gmch_ctl); 1381 *stolen = gen6_get_stolen_size(snb_gmch_ctl);
907 *gtt_total = (gtt_size / sizeof(gen6_gtt_pte_t)) << PAGE_SHIFT;
908
909 /* For Modern GENs the PTEs and register space are split in the BAR */
910 gtt_bus_addr = pci_resource_start(dev->pdev, 0) +
911 (pci_resource_len(dev->pdev, 0) / 2);
912 1382
913 dev_priv->gtt.gsm = ioremap_wc(gtt_bus_addr, gtt_size); 1383 gtt_size = gen6_get_total_gtt_size(snb_gmch_ctl);
914 if (!dev_priv->gtt.gsm) { 1384 *gtt_total = (gtt_size / sizeof(gen6_gtt_pte_t)) << PAGE_SHIFT;
915 DRM_ERROR("Failed to map the gtt page table\n");
916 return -ENOMEM;
917 }
918 1385
919 ret = setup_scratch_page(dev); 1386 ret = ggtt_probe_common(dev, gtt_size);
920 if (ret)
921 DRM_ERROR("Scratch setup failed\n");
922 1387
923 dev_priv->gtt.base.clear_range = gen6_ggtt_clear_range; 1388 dev_priv->gtt.base.clear_range = gen6_ggtt_clear_range;
924 dev_priv->gtt.base.insert_entries = gen6_ggtt_insert_entries; 1389 dev_priv->gtt.base.insert_entries = gen6_ggtt_insert_entries;
@@ -972,7 +1437,7 @@ int i915_gem_gtt_init(struct drm_device *dev)
972 if (INTEL_INFO(dev)->gen <= 5) { 1437 if (INTEL_INFO(dev)->gen <= 5) {
973 gtt->gtt_probe = i915_gmch_probe; 1438 gtt->gtt_probe = i915_gmch_probe;
974 gtt->base.cleanup = i915_gmch_remove; 1439 gtt->base.cleanup = i915_gmch_remove;
975 } else { 1440 } else if (INTEL_INFO(dev)->gen < 8) {
976 gtt->gtt_probe = gen6_gmch_probe; 1441 gtt->gtt_probe = gen6_gmch_probe;
977 gtt->base.cleanup = gen6_gmch_remove; 1442 gtt->base.cleanup = gen6_gmch_remove;
978 if (IS_HASWELL(dev) && dev_priv->ellc_size) 1443 if (IS_HASWELL(dev) && dev_priv->ellc_size)
@@ -985,6 +1450,9 @@ int i915_gem_gtt_init(struct drm_device *dev)
985 gtt->base.pte_encode = ivb_pte_encode; 1450 gtt->base.pte_encode = ivb_pte_encode;
986 else 1451 else
987 gtt->base.pte_encode = snb_pte_encode; 1452 gtt->base.pte_encode = snb_pte_encode;
1453 } else {
1454 dev_priv->gtt.gtt_probe = gen8_gmch_probe;
1455 dev_priv->gtt.base.cleanup = gen6_gmch_remove;
988 } 1456 }
989 1457
990 ret = gtt->gtt_probe(dev, &gtt->base.total, &gtt->stolen_size, 1458 ret = gtt->gtt_probe(dev, &gtt->base.total, &gtt->stolen_size,
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index e15a1d90037d..d284d892ed94 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -395,7 +395,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
395 if (gtt_offset == I915_GTT_OFFSET_NONE) 395 if (gtt_offset == I915_GTT_OFFSET_NONE)
396 return obj; 396 return obj;
397 397
398 vma = i915_gem_vma_create(obj, ggtt); 398 vma = i915_gem_obj_lookup_or_create_vma(obj, ggtt);
399 if (IS_ERR(vma)) { 399 if (IS_ERR(vma)) {
400 ret = PTR_ERR(vma); 400 ret = PTR_ERR(vma);
401 goto err_out; 401 goto err_out;
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 032e9ef9c896..b13905348048 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -308,7 +308,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
308 return -EINVAL; 308 return -EINVAL;
309 } 309 }
310 310
311 if (obj->pin_count) { 311 if (obj->pin_count || obj->framebuffer_references) {
312 drm_gem_object_unreference_unlocked(&obj->base); 312 drm_gem_object_unreference_unlocked(&obj->base);
313 return -EBUSY; 313 return -EBUSY;
314 } 314 }
@@ -393,7 +393,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
393 /* Try to preallocate memory required to save swizzling on put-pages */ 393 /* Try to preallocate memory required to save swizzling on put-pages */
394 if (i915_gem_object_needs_bit17_swizzle(obj)) { 394 if (i915_gem_object_needs_bit17_swizzle(obj)) {
395 if (obj->bit_17 == NULL) { 395 if (obj->bit_17 == NULL) {
396 obj->bit_17 = kmalloc(BITS_TO_LONGS(obj->base.size >> PAGE_SHIFT) * 396 obj->bit_17 = kcalloc(BITS_TO_LONGS(obj->base.size >> PAGE_SHIFT),
397 sizeof(long), GFP_KERNEL); 397 sizeof(long), GFP_KERNEL);
398 } 398 }
399 } else { 399 } else {
@@ -504,8 +504,8 @@ i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
504 int i; 504 int i;
505 505
506 if (obj->bit_17 == NULL) { 506 if (obj->bit_17 == NULL) {
507 obj->bit_17 = kmalloc(BITS_TO_LONGS(page_count) * 507 obj->bit_17 = kcalloc(BITS_TO_LONGS(page_count),
508 sizeof(long), GFP_KERNEL); 508 sizeof(long), GFP_KERNEL);
509 if (obj->bit_17 == NULL) { 509 if (obj->bit_17 == NULL) {
510 DRM_ERROR("Failed to allocate memory for bit 17 " 510 DRM_ERROR("Failed to allocate memory for bit 17 "
511 "record\n"); 511 "record\n");
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index dae364f0028c..79dcb8f896c6 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -215,6 +215,24 @@ static void print_error_buffers(struct drm_i915_error_state_buf *m,
215 } 215 }
216} 216}
217 217
218static const char *hangcheck_action_to_str(enum intel_ring_hangcheck_action a)
219{
220 switch (a) {
221 case HANGCHECK_IDLE:
222 return "idle";
223 case HANGCHECK_WAIT:
224 return "wait";
225 case HANGCHECK_ACTIVE:
226 return "active";
227 case HANGCHECK_KICK:
228 return "kick";
229 case HANGCHECK_HUNG:
230 return "hung";
231 }
232
233 return "unknown";
234}
235
218static void i915_ring_error_state(struct drm_i915_error_state_buf *m, 236static void i915_ring_error_state(struct drm_i915_error_state_buf *m,
219 struct drm_device *dev, 237 struct drm_device *dev,
220 struct drm_i915_error_state *error, 238 struct drm_i915_error_state *error,
@@ -231,7 +249,8 @@ static void i915_ring_error_state(struct drm_i915_error_state_buf *m,
231 err_printf(m, " INSTDONE: 0x%08x\n", error->instdone[ring]); 249 err_printf(m, " INSTDONE: 0x%08x\n", error->instdone[ring]);
232 if (ring == RCS && INTEL_INFO(dev)->gen >= 4) 250 if (ring == RCS && INTEL_INFO(dev)->gen >= 4)
233 err_printf(m, " BBADDR: 0x%08llx\n", error->bbaddr); 251 err_printf(m, " BBADDR: 0x%08llx\n", error->bbaddr);
234 252 if (INTEL_INFO(dev)->gen >= 4)
253 err_printf(m, " BB_STATE: 0x%08x\n", error->bbstate[ring]);
235 if (INTEL_INFO(dev)->gen >= 4) 254 if (INTEL_INFO(dev)->gen >= 4)
236 err_printf(m, " INSTPS: 0x%08x\n", error->instps[ring]); 255 err_printf(m, " INSTPS: 0x%08x\n", error->instps[ring]);
237 err_printf(m, " INSTPM: 0x%08x\n", error->instpm[ring]); 256 err_printf(m, " INSTPM: 0x%08x\n", error->instpm[ring]);
@@ -255,6 +274,9 @@ static void i915_ring_error_state(struct drm_i915_error_state_buf *m,
255 err_printf(m, " waiting: %s\n", yesno(error->waiting[ring])); 274 err_printf(m, " waiting: %s\n", yesno(error->waiting[ring]));
256 err_printf(m, " ring->head: 0x%08x\n", error->cpu_ring_head[ring]); 275 err_printf(m, " ring->head: 0x%08x\n", error->cpu_ring_head[ring]);
257 err_printf(m, " ring->tail: 0x%08x\n", error->cpu_ring_tail[ring]); 276 err_printf(m, " ring->tail: 0x%08x\n", error->cpu_ring_tail[ring]);
277 err_printf(m, " hangcheck: %s [%d]\n",
278 hangcheck_action_to_str(error->hangcheck_action[ring]),
279 error->hangcheck_score[ring]);
258} 280}
259 281
260void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...) 282void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
@@ -283,13 +305,14 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
283 err_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec, 305 err_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
284 error->time.tv_usec); 306 error->time.tv_usec);
285 err_printf(m, "Kernel: " UTS_RELEASE "\n"); 307 err_printf(m, "Kernel: " UTS_RELEASE "\n");
286 err_printf(m, "PCI ID: 0x%04x\n", dev->pci_device); 308 err_printf(m, "PCI ID: 0x%04x\n", dev->pdev->device);
287 err_printf(m, "EIR: 0x%08x\n", error->eir); 309 err_printf(m, "EIR: 0x%08x\n", error->eir);
288 err_printf(m, "IER: 0x%08x\n", error->ier); 310 err_printf(m, "IER: 0x%08x\n", error->ier);
289 err_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er); 311 err_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
290 err_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake); 312 err_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake);
291 err_printf(m, "DERRMR: 0x%08x\n", error->derrmr); 313 err_printf(m, "DERRMR: 0x%08x\n", error->derrmr);
292 err_printf(m, "CCID: 0x%08x\n", error->ccid); 314 err_printf(m, "CCID: 0x%08x\n", error->ccid);
315 err_printf(m, "Missed interrupts: 0x%08lx\n", dev_priv->gpu_error.missed_irq_rings);
293 316
294 for (i = 0; i < dev_priv->num_fence_regs; i++) 317 for (i = 0; i < dev_priv->num_fence_regs; i++)
295 err_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]); 318 err_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]);
@@ -601,6 +624,7 @@ static void i915_gem_record_fences(struct drm_device *dev,
601 624
602 /* Fences */ 625 /* Fences */
603 switch (INTEL_INFO(dev)->gen) { 626 switch (INTEL_INFO(dev)->gen) {
627 case 8:
604 case 7: 628 case 7:
605 case 6: 629 case 6:
606 for (i = 0; i < dev_priv->num_fence_regs; i++) 630 for (i = 0; i < dev_priv->num_fence_regs; i++)
@@ -703,6 +727,7 @@ static void i915_record_ring_state(struct drm_device *dev,
703 error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base)); 727 error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base));
704 if (ring->id == RCS) 728 if (ring->id == RCS)
705 error->bbaddr = I915_READ64(BB_ADDR); 729 error->bbaddr = I915_READ64(BB_ADDR);
730 error->bbstate[ring->id] = I915_READ(RING_BBSTATE(ring->mmio_base));
706 } else { 731 } else {
707 error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX); 732 error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX);
708 error->ipeir[ring->id] = I915_READ(IPEIR); 733 error->ipeir[ring->id] = I915_READ(IPEIR);
@@ -720,6 +745,9 @@ static void i915_record_ring_state(struct drm_device *dev,
720 745
721 error->cpu_ring_head[ring->id] = ring->head; 746 error->cpu_ring_head[ring->id] = ring->head;
722 error->cpu_ring_tail[ring->id] = ring->tail; 747 error->cpu_ring_tail[ring->id] = ring->tail;
748
749 error->hangcheck_score[ring->id] = ring->hangcheck.score;
750 error->hangcheck_action[ring->id] = ring->hangcheck.action;
723} 751}
724 752
725 753
@@ -769,7 +797,7 @@ static void i915_gem_record_rings(struct drm_device *dev,
769 797
770 error->ring[i].num_requests = count; 798 error->ring[i].num_requests = count;
771 error->ring[i].requests = 799 error->ring[i].requests =
772 kmalloc(count*sizeof(struct drm_i915_error_request), 800 kcalloc(count, sizeof(*error->ring[i].requests),
773 GFP_ATOMIC); 801 GFP_ATOMIC);
774 if (error->ring[i].requests == NULL) { 802 if (error->ring[i].requests == NULL) {
775 error->ring[i].num_requests = 0; 803 error->ring[i].num_requests = 0;
@@ -811,7 +839,7 @@ static void i915_gem_capture_vm(struct drm_i915_private *dev_priv,
811 error->pinned_bo_count[ndx] = i - error->active_bo_count[ndx]; 839 error->pinned_bo_count[ndx] = i - error->active_bo_count[ndx];
812 840
813 if (i) { 841 if (i) {
814 active_bo = kmalloc(sizeof(*active_bo)*i, GFP_ATOMIC); 842 active_bo = kcalloc(i, sizeof(*active_bo), GFP_ATOMIC);
815 if (active_bo) 843 if (active_bo)
816 pinned_bo = active_bo + error->active_bo_count[ndx]; 844 pinned_bo = active_bo + error->active_bo_count[ndx];
817 } 845 }
@@ -885,8 +913,12 @@ void i915_capture_error_state(struct drm_device *dev)
885 return; 913 return;
886 } 914 }
887 915
888 DRM_INFO("capturing error event; look for more information in " 916 DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n",
889 "/sys/class/drm/card%d/error\n", dev->primary->index); 917 dev->primary->index);
918 DRM_INFO("GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n");
919 DRM_INFO("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n");
920 DRM_INFO("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
921 DRM_INFO("The gpu crash dump is required to analyze gpu hangs, so please always attach it.\n");
890 922
891 kref_init(&error->ref); 923 kref_init(&error->ref);
892 error->eir = I915_READ(EIR); 924 error->eir = I915_READ(EIR);
@@ -988,6 +1020,7 @@ const char *i915_cache_level_str(int type)
988 case I915_CACHE_NONE: return " uncached"; 1020 case I915_CACHE_NONE: return " uncached";
989 case I915_CACHE_LLC: return " snooped or LLC"; 1021 case I915_CACHE_LLC: return " snooped or LLC";
990 case I915_CACHE_L3_LLC: return " L3+LLC"; 1022 case I915_CACHE_L3_LLC: return " L3+LLC";
1023 case I915_CACHE_WT: return " WT";
991 default: return ""; 1024 default: return "";
992 } 1025 }
993} 1026}
@@ -1012,6 +1045,7 @@ void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone)
1012 default: 1045 default:
1013 WARN_ONCE(1, "Unsupported platform\n"); 1046 WARN_ONCE(1, "Unsupported platform\n");
1014 case 7: 1047 case 7:
1048 case 8:
1015 instdone[0] = I915_READ(GEN7_INSTDONE_1); 1049 instdone[0] = I915_READ(GEN7_INSTDONE_1);
1016 instdone[1] = I915_READ(GEN7_SC_INSTDONE); 1050 instdone[1] = I915_READ(GEN7_SC_INSTDONE);
1017 instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE); 1051 instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 4b91228fd9bd..5d1dedc02f15 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -30,6 +30,7 @@
30 30
31#include <linux/sysrq.h> 31#include <linux/sysrq.h>
32#include <linux/slab.h> 32#include <linux/slab.h>
33#include <linux/circ_buf.h>
33#include <drm/drmP.h> 34#include <drm/drmP.h>
34#include <drm/i915_drm.h> 35#include <drm/i915_drm.h>
35#include "i915_drv.h" 36#include "i915_drv.h"
@@ -269,6 +270,21 @@ static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
269 } 270 }
270} 271}
271 272
273static void broadwell_set_fifo_underrun_reporting(struct drm_device *dev,
274 enum pipe pipe, bool enable)
275{
276 struct drm_i915_private *dev_priv = dev->dev_private;
277
278 assert_spin_locked(&dev_priv->irq_lock);
279
280 if (enable)
281 dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_FIFO_UNDERRUN;
282 else
283 dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_FIFO_UNDERRUN;
284 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
285 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
286}
287
272/** 288/**
273 * ibx_display_interrupt_update - update SDEIMR 289 * ibx_display_interrupt_update - update SDEIMR
274 * @dev_priv: driver private 290 * @dev_priv: driver private
@@ -381,6 +397,8 @@ bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
381 ironlake_set_fifo_underrun_reporting(dev, pipe, enable); 397 ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
382 else if (IS_GEN7(dev)) 398 else if (IS_GEN7(dev))
383 ivybridge_set_fifo_underrun_reporting(dev, pipe, enable); 399 ivybridge_set_fifo_underrun_reporting(dev, pipe, enable);
400 else if (IS_GEN8(dev))
401 broadwell_set_fifo_underrun_reporting(dev, pipe, enable);
384 402
385done: 403done:
386 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 404 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
@@ -441,7 +459,7 @@ done:
441 459
442 460
443void 461void
444i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) 462i915_enable_pipestat(drm_i915_private_t *dev_priv, enum pipe pipe, u32 mask)
445{ 463{
446 u32 reg = PIPESTAT(pipe); 464 u32 reg = PIPESTAT(pipe);
447 u32 pipestat = I915_READ(reg) & 0x7fff0000; 465 u32 pipestat = I915_READ(reg) & 0x7fff0000;
@@ -458,7 +476,7 @@ i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
458} 476}
459 477
460void 478void
461i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) 479i915_disable_pipestat(drm_i915_private_t *dev_priv, enum pipe pipe, u32 mask)
462{ 480{
463 u32 reg = PIPESTAT(pipe); 481 u32 reg = PIPESTAT(pipe);
464 u32 pipestat = I915_READ(reg) & 0x7fff0000; 482 u32 pipestat = I915_READ(reg) & 0x7fff0000;
@@ -486,9 +504,10 @@ static void i915_enable_asle_pipestat(struct drm_device *dev)
486 504
487 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 505 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
488 506
489 i915_enable_pipestat(dev_priv, 1, PIPE_LEGACY_BLC_EVENT_ENABLE); 507 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_ENABLE);
490 if (INTEL_INFO(dev)->gen >= 4) 508 if (INTEL_INFO(dev)->gen >= 4)
491 i915_enable_pipestat(dev_priv, 0, PIPE_LEGACY_BLC_EVENT_ENABLE); 509 i915_enable_pipestat(dev_priv, PIPE_A,
510 PIPE_LEGACY_BLC_EVENT_ENABLE);
492 511
493 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 512 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
494} 513}
@@ -518,6 +537,12 @@ i915_pipe_enabled(struct drm_device *dev, int pipe)
518 } 537 }
519} 538}
520 539
540static u32 i8xx_get_vblank_counter(struct drm_device *dev, int pipe)
541{
542 /* Gen2 doesn't have a hardware frame counter */
543 return 0;
544}
545
521/* Called from drm generic code, passed a 'crtc', which 546/* Called from drm generic code, passed a 'crtc', which
522 * we use as a pipe index 547 * we use as a pipe index
523 */ 548 */
@@ -526,7 +551,7 @@ static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
526 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 551 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
527 unsigned long high_frame; 552 unsigned long high_frame;
528 unsigned long low_frame; 553 unsigned long low_frame;
529 u32 high1, high2, low; 554 u32 high1, high2, low, pixel, vbl_start;
530 555
531 if (!i915_pipe_enabled(dev, pipe)) { 556 if (!i915_pipe_enabled(dev, pipe)) {
532 DRM_DEBUG_DRIVER("trying to get vblank count for disabled " 557 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
@@ -534,6 +559,24 @@ static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
534 return 0; 559 return 0;
535 } 560 }
536 561
562 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
563 struct intel_crtc *intel_crtc =
564 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
565 const struct drm_display_mode *mode =
566 &intel_crtc->config.adjusted_mode;
567
568 vbl_start = mode->crtc_vblank_start * mode->crtc_htotal;
569 } else {
570 enum transcoder cpu_transcoder =
571 intel_pipe_to_cpu_transcoder(dev_priv, pipe);
572 u32 htotal;
573
574 htotal = ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff) + 1;
575 vbl_start = (I915_READ(VBLANK(cpu_transcoder)) & 0x1fff) + 1;
576
577 vbl_start *= htotal;
578 }
579
537 high_frame = PIPEFRAME(pipe); 580 high_frame = PIPEFRAME(pipe);
538 low_frame = PIPEFRAMEPIXEL(pipe); 581 low_frame = PIPEFRAMEPIXEL(pipe);
539 582
@@ -544,13 +587,20 @@ static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
544 */ 587 */
545 do { 588 do {
546 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; 589 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
547 low = I915_READ(low_frame) & PIPE_FRAME_LOW_MASK; 590 low = I915_READ(low_frame);
548 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; 591 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
549 } while (high1 != high2); 592 } while (high1 != high2);
550 593
551 high1 >>= PIPE_FRAME_HIGH_SHIFT; 594 high1 >>= PIPE_FRAME_HIGH_SHIFT;
595 pixel = low & PIPE_PIXEL_MASK;
552 low >>= PIPE_FRAME_LOW_SHIFT; 596 low >>= PIPE_FRAME_LOW_SHIFT;
553 return (high1 << 8) | low; 597
598 /*
599 * The frame counter increments at beginning of active.
600 * Cook up a vblank counter by also checking the pixel
601 * counter against vblank start.
602 */
603 return ((high1 << 8) | low) + (pixel >= vbl_start);
554} 604}
555 605
556static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe) 606static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
@@ -567,66 +617,163 @@ static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
567 return I915_READ(reg); 617 return I915_READ(reg);
568} 618}
569 619
620/* raw reads, only for fast reads of display block, no need for forcewake etc. */
621#define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
622#define __raw_i915_read16(dev_priv__, reg__) readw((dev_priv__)->regs + (reg__))
623
624static bool intel_pipe_in_vblank_locked(struct drm_device *dev, enum pipe pipe)
625{
626 struct drm_i915_private *dev_priv = dev->dev_private;
627 uint32_t status;
628 int reg;
629
630 if (IS_VALLEYVIEW(dev)) {
631 status = pipe == PIPE_A ?
632 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT :
633 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
634
635 reg = VLV_ISR;
636 } else if (IS_GEN2(dev)) {
637 status = pipe == PIPE_A ?
638 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT :
639 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
640
641 reg = ISR;
642 } else if (INTEL_INFO(dev)->gen < 5) {
643 status = pipe == PIPE_A ?
644 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT :
645 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
646
647 reg = ISR;
648 } else if (INTEL_INFO(dev)->gen < 7) {
649 status = pipe == PIPE_A ?
650 DE_PIPEA_VBLANK :
651 DE_PIPEB_VBLANK;
652
653 reg = DEISR;
654 } else {
655 switch (pipe) {
656 default:
657 case PIPE_A:
658 status = DE_PIPEA_VBLANK_IVB;
659 break;
660 case PIPE_B:
661 status = DE_PIPEB_VBLANK_IVB;
662 break;
663 case PIPE_C:
664 status = DE_PIPEC_VBLANK_IVB;
665 break;
666 }
667
668 reg = DEISR;
669 }
670
671 if (IS_GEN2(dev))
672 return __raw_i915_read16(dev_priv, reg) & status;
673 else
674 return __raw_i915_read32(dev_priv, reg) & status;
675}
676
570static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe, 677static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
571 int *vpos, int *hpos) 678 int *vpos, int *hpos, ktime_t *stime, ktime_t *etime)
572{ 679{
573 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 680 struct drm_i915_private *dev_priv = dev->dev_private;
574 u32 vbl = 0, position = 0; 681 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
682 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
683 const struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode;
684 int position;
575 int vbl_start, vbl_end, htotal, vtotal; 685 int vbl_start, vbl_end, htotal, vtotal;
576 bool in_vbl = true; 686 bool in_vbl = true;
577 int ret = 0; 687 int ret = 0;
578 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, 688 unsigned long irqflags;
579 pipe);
580 689
581 if (!i915_pipe_enabled(dev, pipe)) { 690 if (!intel_crtc->active) {
582 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled " 691 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
583 "pipe %c\n", pipe_name(pipe)); 692 "pipe %c\n", pipe_name(pipe));
584 return 0; 693 return 0;
585 } 694 }
586 695
587 /* Get vtotal. */ 696 htotal = mode->crtc_htotal;
588 vtotal = 1 + ((I915_READ(VTOTAL(cpu_transcoder)) >> 16) & 0x1fff); 697 vtotal = mode->crtc_vtotal;
698 vbl_start = mode->crtc_vblank_start;
699 vbl_end = mode->crtc_vblank_end;
589 700
590 if (INTEL_INFO(dev)->gen >= 4) { 701 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
702
703 /*
704 * Lock uncore.lock, as we will do multiple timing critical raw
705 * register reads, potentially with preemption disabled, so the
706 * following code must not block on uncore.lock.
707 */
708 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
709
710 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
711
712 /* Get optional system timestamp before query. */
713 if (stime)
714 *stime = ktime_get();
715
716 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
591 /* No obvious pixelcount register. Only query vertical 717 /* No obvious pixelcount register. Only query vertical
592 * scanout position from Display scan line register. 718 * scanout position from Display scan line register.
593 */ 719 */
594 position = I915_READ(PIPEDSL(pipe)); 720 if (IS_GEN2(dev))
721 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
722 else
723 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
595 724
596 /* Decode into vertical scanout position. Don't have 725 /*
597 * horizontal scanout position. 726 * The scanline counter increments at the leading edge
727 * of hsync, ie. it completely misses the active portion
728 * of the line. Fix up the counter at both edges of vblank
729 * to get a more accurate picture whether we're in vblank
730 * or not.
598 */ 731 */
599 *vpos = position & 0x1fff; 732 in_vbl = intel_pipe_in_vblank_locked(dev, pipe);
600 *hpos = 0; 733 if ((in_vbl && position == vbl_start - 1) ||
734 (!in_vbl && position == vbl_end - 1))
735 position = (position + 1) % vtotal;
601 } else { 736 } else {
602 /* Have access to pixelcount since start of frame. 737 /* Have access to pixelcount since start of frame.
603 * We can split this into vertical and horizontal 738 * We can split this into vertical and horizontal
604 * scanout position. 739 * scanout position.
605 */ 740 */
606 position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; 741 position = (__raw_i915_read32(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
607 742
608 htotal = 1 + ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff); 743 /* convert to pixel counts */
609 *vpos = position / htotal; 744 vbl_start *= htotal;
610 *hpos = position - (*vpos * htotal); 745 vbl_end *= htotal;
746 vtotal *= htotal;
611 } 747 }
612 748
613 /* Query vblank area. */ 749 /* Get optional system timestamp after query. */
614 vbl = I915_READ(VBLANK(cpu_transcoder)); 750 if (etime)
751 *etime = ktime_get();
615 752
616 /* Test position against vblank region. */ 753 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
617 vbl_start = vbl & 0x1fff;
618 vbl_end = (vbl >> 16) & 0x1fff;
619 754
620 if ((*vpos < vbl_start) || (*vpos > vbl_end)) 755 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
621 in_vbl = false;
622 756
623 /* Inside "upper part" of vblank area? Apply corrective offset: */ 757 in_vbl = position >= vbl_start && position < vbl_end;
624 if (in_vbl && (*vpos >= vbl_start))
625 *vpos = *vpos - vtotal;
626 758
627 /* Readouts valid? */ 759 /*
628 if (vbl > 0) 760 * While in vblank, position will be negative
629 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE; 761 * counting up towards 0 at vbl_end. And outside
762 * vblank, position will be positive counting
763 * up since vbl_end.
764 */
765 if (position >= vbl_start)
766 position -= vbl_end;
767 else
768 position += vtotal - vbl_end;
769
770 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
771 *vpos = position;
772 *hpos = 0;
773 } else {
774 *vpos = position / htotal;
775 *hpos = position - (*vpos * htotal);
776 }
630 777
631 /* In vblank? */ 778 /* In vblank? */
632 if (in_vbl) 779 if (in_vbl)
@@ -665,7 +812,8 @@ static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
665 crtc); 812 crtc);
666} 813}
667 814
668static int intel_hpd_irq_event(struct drm_device *dev, struct drm_connector *connector) 815static bool intel_hpd_irq_event(struct drm_device *dev,
816 struct drm_connector *connector)
669{ 817{
670 enum drm_connector_status old_status; 818 enum drm_connector_status old_status;
671 819
@@ -673,11 +821,16 @@ static int intel_hpd_irq_event(struct drm_device *dev, struct drm_connector *con
673 old_status = connector->status; 821 old_status = connector->status;
674 822
675 connector->status = connector->funcs->detect(connector, false); 823 connector->status = connector->funcs->detect(connector, false);
676 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n", 824 if (old_status == connector->status)
825 return false;
826
827 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
677 connector->base.id, 828 connector->base.id,
678 drm_get_connector_name(connector), 829 drm_get_connector_name(connector),
679 old_status, connector->status); 830 drm_get_connector_status_name(old_status),
680 return (old_status != connector->status); 831 drm_get_connector_status_name(connector->status));
832
833 return true;
681} 834}
682 835
683/* 836/*
@@ -801,7 +954,7 @@ static void notify_ring(struct drm_device *dev,
801 if (ring->obj == NULL) 954 if (ring->obj == NULL)
802 return; 955 return;
803 956
804 trace_i915_gem_request_complete(ring, ring->get_seqno(ring, false)); 957 trace_i915_gem_request_complete(ring);
805 958
806 wake_up_all(&ring->irq_queue); 959 wake_up_all(&ring->irq_queue);
807 i915_queue_hangcheck(dev); 960 i915_queue_hangcheck(dev);
@@ -812,7 +965,7 @@ static void gen6_pm_rps_work(struct work_struct *work)
812 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 965 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
813 rps.work); 966 rps.work);
814 u32 pm_iir; 967 u32 pm_iir;
815 u8 new_delay; 968 int new_delay, adj;
816 969
817 spin_lock_irq(&dev_priv->irq_lock); 970 spin_lock_irq(&dev_priv->irq_lock);
818 pm_iir = dev_priv->rps.pm_iir; 971 pm_iir = dev_priv->rps.pm_iir;
@@ -829,40 +982,49 @@ static void gen6_pm_rps_work(struct work_struct *work)
829 982
830 mutex_lock(&dev_priv->rps.hw_lock); 983 mutex_lock(&dev_priv->rps.hw_lock);
831 984
985 adj = dev_priv->rps.last_adj;
832 if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { 986 if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
833 new_delay = dev_priv->rps.cur_delay + 1; 987 if (adj > 0)
988 adj *= 2;
989 else
990 adj = 1;
991 new_delay = dev_priv->rps.cur_delay + adj;
834 992
835 /* 993 /*
836 * For better performance, jump directly 994 * For better performance, jump directly
837 * to RPe if we're below it. 995 * to RPe if we're below it.
838 */ 996 */
839 if (IS_VALLEYVIEW(dev_priv->dev) && 997 if (new_delay < dev_priv->rps.rpe_delay)
840 dev_priv->rps.cur_delay < dev_priv->rps.rpe_delay) 998 new_delay = dev_priv->rps.rpe_delay;
999 } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
1000 if (dev_priv->rps.cur_delay > dev_priv->rps.rpe_delay)
841 new_delay = dev_priv->rps.rpe_delay; 1001 new_delay = dev_priv->rps.rpe_delay;
842 } else 1002 else
843 new_delay = dev_priv->rps.cur_delay - 1; 1003 new_delay = dev_priv->rps.min_delay;
1004 adj = 0;
1005 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
1006 if (adj < 0)
1007 adj *= 2;
1008 else
1009 adj = -1;
1010 new_delay = dev_priv->rps.cur_delay + adj;
1011 } else { /* unknown event */
1012 new_delay = dev_priv->rps.cur_delay;
1013 }
844 1014
845 /* sysfs frequency interfaces may have snuck in while servicing the 1015 /* sysfs frequency interfaces may have snuck in while servicing the
846 * interrupt 1016 * interrupt
847 */ 1017 */
848 if (new_delay >= dev_priv->rps.min_delay && 1018 if (new_delay < (int)dev_priv->rps.min_delay)
849 new_delay <= dev_priv->rps.max_delay) { 1019 new_delay = dev_priv->rps.min_delay;
850 if (IS_VALLEYVIEW(dev_priv->dev)) 1020 if (new_delay > (int)dev_priv->rps.max_delay)
851 valleyview_set_rps(dev_priv->dev, new_delay); 1021 new_delay = dev_priv->rps.max_delay;
852 else 1022 dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_delay;
853 gen6_set_rps(dev_priv->dev, new_delay); 1023
854 } 1024 if (IS_VALLEYVIEW(dev_priv->dev))
855 1025 valleyview_set_rps(dev_priv->dev, new_delay);
856 if (IS_VALLEYVIEW(dev_priv->dev)) { 1026 else
857 /* 1027 gen6_set_rps(dev_priv->dev, new_delay);
858 * On VLV, when we enter RC6 we may not be at the minimum
859 * voltage level, so arm a timer to check. It should only
860 * fire when there's activity or once after we've entered
861 * RC6, and then won't be re-armed until the next RPS interrupt.
862 */
863 mod_delayed_work(dev_priv->wq, &dev_priv->rps.vlv_work,
864 msecs_to_jiffies(100));
865 }
866 1028
867 mutex_unlock(&dev_priv->rps.hw_lock); 1029 mutex_unlock(&dev_priv->rps.hw_lock);
868} 1030}
@@ -882,9 +1044,10 @@ static void ivybridge_parity_work(struct work_struct *work)
882 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 1044 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
883 l3_parity.error_work); 1045 l3_parity.error_work);
884 u32 error_status, row, bank, subbank; 1046 u32 error_status, row, bank, subbank;
885 char *parity_event[5]; 1047 char *parity_event[6];
886 uint32_t misccpctl; 1048 uint32_t misccpctl;
887 unsigned long flags; 1049 unsigned long flags;
1050 uint8_t slice = 0;
888 1051
889 /* We must turn off DOP level clock gating to access the L3 registers. 1052 /* We must turn off DOP level clock gating to access the L3 registers.
890 * In order to prevent a get/put style interface, acquire struct mutex 1053 * In order to prevent a get/put style interface, acquire struct mutex
@@ -892,55 +1055,81 @@ static void ivybridge_parity_work(struct work_struct *work)
892 */ 1055 */
893 mutex_lock(&dev_priv->dev->struct_mutex); 1056 mutex_lock(&dev_priv->dev->struct_mutex);
894 1057
1058 /* If we've screwed up tracking, just let the interrupt fire again */
1059 if (WARN_ON(!dev_priv->l3_parity.which_slice))
1060 goto out;
1061
895 misccpctl = I915_READ(GEN7_MISCCPCTL); 1062 misccpctl = I915_READ(GEN7_MISCCPCTL);
896 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); 1063 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
897 POSTING_READ(GEN7_MISCCPCTL); 1064 POSTING_READ(GEN7_MISCCPCTL);
898 1065
899 error_status = I915_READ(GEN7_L3CDERRST1); 1066 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
900 row = GEN7_PARITY_ERROR_ROW(error_status); 1067 u32 reg;
901 bank = GEN7_PARITY_ERROR_BANK(error_status);
902 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
903 1068
904 I915_WRITE(GEN7_L3CDERRST1, GEN7_PARITY_ERROR_VALID | 1069 slice--;
905 GEN7_L3CDERRST1_ENABLE); 1070 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev)))
906 POSTING_READ(GEN7_L3CDERRST1); 1071 break;
907 1072
908 I915_WRITE(GEN7_MISCCPCTL, misccpctl); 1073 dev_priv->l3_parity.which_slice &= ~(1<<slice);
909 1074
910 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1075 reg = GEN7_L3CDERRST1 + (slice * 0x200);
911 ilk_enable_gt_irq(dev_priv, GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
912 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
913 1076
914 mutex_unlock(&dev_priv->dev->struct_mutex); 1077 error_status = I915_READ(reg);
1078 row = GEN7_PARITY_ERROR_ROW(error_status);
1079 bank = GEN7_PARITY_ERROR_BANK(error_status);
1080 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
915 1081
916 parity_event[0] = I915_L3_PARITY_UEVENT "=1"; 1082 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
917 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row); 1083 POSTING_READ(reg);
918 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank); 1084
919 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank); 1085 parity_event[0] = I915_L3_PARITY_UEVENT "=1";
920 parity_event[4] = NULL; 1086 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
1087 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
1088 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
1089 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
1090 parity_event[5] = NULL;
1091
1092 kobject_uevent_env(&dev_priv->dev->primary->kdev->kobj,
1093 KOBJ_CHANGE, parity_event);
1094
1095 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
1096 slice, row, bank, subbank);
1097
1098 kfree(parity_event[4]);
1099 kfree(parity_event[3]);
1100 kfree(parity_event[2]);
1101 kfree(parity_event[1]);
1102 }
921 1103
922 kobject_uevent_env(&dev_priv->dev->primary->kdev.kobj, 1104 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
923 KOBJ_CHANGE, parity_event);
924 1105
925 DRM_DEBUG("Parity error: Row = %d, Bank = %d, Sub bank = %d.\n", 1106out:
926 row, bank, subbank); 1107 WARN_ON(dev_priv->l3_parity.which_slice);
1108 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1109 ilk_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev));
1110 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
927 1111
928 kfree(parity_event[3]); 1112 mutex_unlock(&dev_priv->dev->struct_mutex);
929 kfree(parity_event[2]);
930 kfree(parity_event[1]);
931} 1113}
932 1114
933static void ivybridge_parity_error_irq_handler(struct drm_device *dev) 1115static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir)
934{ 1116{
935 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1117 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
936 1118
937 if (!HAS_L3_GPU_CACHE(dev)) 1119 if (!HAS_L3_DPF(dev))
938 return; 1120 return;
939 1121
940 spin_lock(&dev_priv->irq_lock); 1122 spin_lock(&dev_priv->irq_lock);
941 ilk_disable_gt_irq(dev_priv, GT_RENDER_L3_PARITY_ERROR_INTERRUPT); 1123 ilk_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev));
942 spin_unlock(&dev_priv->irq_lock); 1124 spin_unlock(&dev_priv->irq_lock);
943 1125
1126 iir &= GT_PARITY_ERROR(dev);
1127 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
1128 dev_priv->l3_parity.which_slice |= 1 << 1;
1129
1130 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
1131 dev_priv->l3_parity.which_slice |= 1 << 0;
1132
944 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work); 1133 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
945} 1134}
946 1135
@@ -975,8 +1164,58 @@ static void snb_gt_irq_handler(struct drm_device *dev,
975 i915_handle_error(dev, false); 1164 i915_handle_error(dev, false);
976 } 1165 }
977 1166
978 if (gt_iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT) 1167 if (gt_iir & GT_PARITY_ERROR(dev))
979 ivybridge_parity_error_irq_handler(dev); 1168 ivybridge_parity_error_irq_handler(dev, gt_iir);
1169}
1170
1171static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
1172 struct drm_i915_private *dev_priv,
1173 u32 master_ctl)
1174{
1175 u32 rcs, bcs, vcs;
1176 uint32_t tmp = 0;
1177 irqreturn_t ret = IRQ_NONE;
1178
1179 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
1180 tmp = I915_READ(GEN8_GT_IIR(0));
1181 if (tmp) {
1182 ret = IRQ_HANDLED;
1183 rcs = tmp >> GEN8_RCS_IRQ_SHIFT;
1184 bcs = tmp >> GEN8_BCS_IRQ_SHIFT;
1185 if (rcs & GT_RENDER_USER_INTERRUPT)
1186 notify_ring(dev, &dev_priv->ring[RCS]);
1187 if (bcs & GT_RENDER_USER_INTERRUPT)
1188 notify_ring(dev, &dev_priv->ring[BCS]);
1189 I915_WRITE(GEN8_GT_IIR(0), tmp);
1190 } else
1191 DRM_ERROR("The master control interrupt lied (GT0)!\n");
1192 }
1193
1194 if (master_ctl & GEN8_GT_VCS1_IRQ) {
1195 tmp = I915_READ(GEN8_GT_IIR(1));
1196 if (tmp) {
1197 ret = IRQ_HANDLED;
1198 vcs = tmp >> GEN8_VCS1_IRQ_SHIFT;
1199 if (vcs & GT_RENDER_USER_INTERRUPT)
1200 notify_ring(dev, &dev_priv->ring[VCS]);
1201 I915_WRITE(GEN8_GT_IIR(1), tmp);
1202 } else
1203 DRM_ERROR("The master control interrupt lied (GT1)!\n");
1204 }
1205
1206 if (master_ctl & GEN8_GT_VECS_IRQ) {
1207 tmp = I915_READ(GEN8_GT_IIR(3));
1208 if (tmp) {
1209 ret = IRQ_HANDLED;
1210 vcs = tmp >> GEN8_VECS_IRQ_SHIFT;
1211 if (vcs & GT_RENDER_USER_INTERRUPT)
1212 notify_ring(dev, &dev_priv->ring[VECS]);
1213 I915_WRITE(GEN8_GT_IIR(3), tmp);
1214 } else
1215 DRM_ERROR("The master control interrupt lied (GT3)!\n");
1216 }
1217
1218 return ret;
980} 1219}
981 1220
982#define HPD_STORM_DETECT_PERIOD 1000 1221#define HPD_STORM_DETECT_PERIOD 1000
@@ -1050,6 +1289,102 @@ static void dp_aux_irq_handler(struct drm_device *dev)
1050 wake_up_all(&dev_priv->gmbus_wait_queue); 1289 wake_up_all(&dev_priv->gmbus_wait_queue);
1051} 1290}
1052 1291
1292#if defined(CONFIG_DEBUG_FS)
1293static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1294 uint32_t crc0, uint32_t crc1,
1295 uint32_t crc2, uint32_t crc3,
1296 uint32_t crc4)
1297{
1298 struct drm_i915_private *dev_priv = dev->dev_private;
1299 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
1300 struct intel_pipe_crc_entry *entry;
1301 int head, tail;
1302
1303 spin_lock(&pipe_crc->lock);
1304
1305 if (!pipe_crc->entries) {
1306 spin_unlock(&pipe_crc->lock);
1307 DRM_ERROR("spurious interrupt\n");
1308 return;
1309 }
1310
1311 head = pipe_crc->head;
1312 tail = pipe_crc->tail;
1313
1314 if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
1315 spin_unlock(&pipe_crc->lock);
1316 DRM_ERROR("CRC buffer overflowing\n");
1317 return;
1318 }
1319
1320 entry = &pipe_crc->entries[head];
1321
1322 entry->frame = dev->driver->get_vblank_counter(dev, pipe);
1323 entry->crc[0] = crc0;
1324 entry->crc[1] = crc1;
1325 entry->crc[2] = crc2;
1326 entry->crc[3] = crc3;
1327 entry->crc[4] = crc4;
1328
1329 head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
1330 pipe_crc->head = head;
1331
1332 spin_unlock(&pipe_crc->lock);
1333
1334 wake_up_interruptible(&pipe_crc->wq);
1335}
1336#else
1337static inline void
1338display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1339 uint32_t crc0, uint32_t crc1,
1340 uint32_t crc2, uint32_t crc3,
1341 uint32_t crc4) {}
1342#endif
1343
1344
1345static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1346{
1347 struct drm_i915_private *dev_priv = dev->dev_private;
1348
1349 display_pipe_crc_irq_handler(dev, pipe,
1350 I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1351 0, 0, 0, 0);
1352}
1353
1354static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1355{
1356 struct drm_i915_private *dev_priv = dev->dev_private;
1357
1358 display_pipe_crc_irq_handler(dev, pipe,
1359 I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1360 I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
1361 I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
1362 I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
1363 I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
1364}
1365
1366static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1367{
1368 struct drm_i915_private *dev_priv = dev->dev_private;
1369 uint32_t res1, res2;
1370
1371 if (INTEL_INFO(dev)->gen >= 3)
1372 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
1373 else
1374 res1 = 0;
1375
1376 if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
1377 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
1378 else
1379 res2 = 0;
1380
1381 display_pipe_crc_irq_handler(dev, pipe,
1382 I915_READ(PIPE_CRC_RES_RED(pipe)),
1383 I915_READ(PIPE_CRC_RES_GREEN(pipe)),
1384 I915_READ(PIPE_CRC_RES_BLUE(pipe)),
1385 res1, res2);
1386}
1387
1053/* The RPS events need forcewake, so we add them to a work queue and mask their 1388/* The RPS events need forcewake, so we add them to a work queue and mask their
1054 * IMR bits until the work is done. Other interrupts can be processed without 1389 * IMR bits until the work is done. Other interrupts can be processed without
1055 * the work queue. */ 1390 * the work queue. */
@@ -1117,13 +1452,16 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
1117 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1452 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1118 1453
1119 for_each_pipe(pipe) { 1454 for_each_pipe(pipe) {
1120 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS) 1455 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
1121 drm_handle_vblank(dev, pipe); 1456 drm_handle_vblank(dev, pipe);
1122 1457
1123 if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) { 1458 if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) {
1124 intel_prepare_page_flip(dev, pipe); 1459 intel_prepare_page_flip(dev, pipe);
1125 intel_finish_page_flip(dev, pipe); 1460 intel_finish_page_flip(dev, pipe);
1126 } 1461 }
1462
1463 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1464 i9xx_pipe_crc_irq_handler(dev, pipe);
1127 } 1465 }
1128 1466
1129 /* Consume port. Then clear IIR or we'll miss events */ 1467 /* Consume port. Then clear IIR or we'll miss events */
@@ -1212,21 +1550,26 @@ static void ivb_err_int_handler(struct drm_device *dev)
1212{ 1550{
1213 struct drm_i915_private *dev_priv = dev->dev_private; 1551 struct drm_i915_private *dev_priv = dev->dev_private;
1214 u32 err_int = I915_READ(GEN7_ERR_INT); 1552 u32 err_int = I915_READ(GEN7_ERR_INT);
1553 enum pipe pipe;
1215 1554
1216 if (err_int & ERR_INT_POISON) 1555 if (err_int & ERR_INT_POISON)
1217 DRM_ERROR("Poison interrupt\n"); 1556 DRM_ERROR("Poison interrupt\n");
1218 1557
1219 if (err_int & ERR_INT_FIFO_UNDERRUN_A) 1558 for_each_pipe(pipe) {
1220 if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_A, false)) 1559 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) {
1221 DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n"); 1560 if (intel_set_cpu_fifo_underrun_reporting(dev, pipe,
1222 1561 false))
1223 if (err_int & ERR_INT_FIFO_UNDERRUN_B) 1562 DRM_DEBUG_DRIVER("Pipe %c FIFO underrun\n",
1224 if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false)) 1563 pipe_name(pipe));
1225 DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n"); 1564 }
1226 1565
1227 if (err_int & ERR_INT_FIFO_UNDERRUN_C) 1566 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
1228 if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_C, false)) 1567 if (IS_IVYBRIDGE(dev))
1229 DRM_DEBUG_DRIVER("Pipe C FIFO underrun\n"); 1568 ivb_pipe_crc_irq_handler(dev, pipe);
1569 else
1570 hsw_pipe_crc_irq_handler(dev, pipe);
1571 }
1572 }
1230 1573
1231 I915_WRITE(GEN7_ERR_INT, err_int); 1574 I915_WRITE(GEN7_ERR_INT, err_int);
1232} 1575}
@@ -1297,6 +1640,7 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
1297static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir) 1640static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
1298{ 1641{
1299 struct drm_i915_private *dev_priv = dev->dev_private; 1642 struct drm_i915_private *dev_priv = dev->dev_private;
1643 enum pipe pipe;
1300 1644
1301 if (de_iir & DE_AUX_CHANNEL_A) 1645 if (de_iir & DE_AUX_CHANNEL_A)
1302 dp_aux_irq_handler(dev); 1646 dp_aux_irq_handler(dev);
@@ -1304,31 +1648,26 @@ static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
1304 if (de_iir & DE_GSE) 1648 if (de_iir & DE_GSE)
1305 intel_opregion_asle_intr(dev); 1649 intel_opregion_asle_intr(dev);
1306 1650
1307 if (de_iir & DE_PIPEA_VBLANK)
1308 drm_handle_vblank(dev, 0);
1309
1310 if (de_iir & DE_PIPEB_VBLANK)
1311 drm_handle_vblank(dev, 1);
1312
1313 if (de_iir & DE_POISON) 1651 if (de_iir & DE_POISON)
1314 DRM_ERROR("Poison interrupt\n"); 1652 DRM_ERROR("Poison interrupt\n");
1315 1653
1316 if (de_iir & DE_PIPEA_FIFO_UNDERRUN) 1654 for_each_pipe(pipe) {
1317 if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_A, false)) 1655 if (de_iir & DE_PIPE_VBLANK(pipe))
1318 DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n"); 1656 drm_handle_vblank(dev, pipe);
1319 1657
1320 if (de_iir & DE_PIPEB_FIFO_UNDERRUN) 1658 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
1321 if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false)) 1659 if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
1322 DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n"); 1660 DRM_DEBUG_DRIVER("Pipe %c FIFO underrun\n",
1661 pipe_name(pipe));
1323 1662
1324 if (de_iir & DE_PLANEA_FLIP_DONE) { 1663 if (de_iir & DE_PIPE_CRC_DONE(pipe))
1325 intel_prepare_page_flip(dev, 0); 1664 i9xx_pipe_crc_irq_handler(dev, pipe);
1326 intel_finish_page_flip_plane(dev, 0);
1327 }
1328 1665
1329 if (de_iir & DE_PLANEB_FLIP_DONE) { 1666 /* plane/pipes map 1:1 on ilk+ */
1330 intel_prepare_page_flip(dev, 1); 1667 if (de_iir & DE_PLANE_FLIP_DONE(pipe)) {
1331 intel_finish_page_flip_plane(dev, 1); 1668 intel_prepare_page_flip(dev, pipe);
1669 intel_finish_page_flip_plane(dev, pipe);
1670 }
1332 } 1671 }
1333 1672
1334 /* check event from PCH */ 1673 /* check event from PCH */
@@ -1351,7 +1690,7 @@ static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
1351static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir) 1690static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
1352{ 1691{
1353 struct drm_i915_private *dev_priv = dev->dev_private; 1692 struct drm_i915_private *dev_priv = dev->dev_private;
1354 int i; 1693 enum pipe i;
1355 1694
1356 if (de_iir & DE_ERR_INT_IVB) 1695 if (de_iir & DE_ERR_INT_IVB)
1357 ivb_err_int_handler(dev); 1696 ivb_err_int_handler(dev);
@@ -1362,10 +1701,12 @@ static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
1362 if (de_iir & DE_GSE_IVB) 1701 if (de_iir & DE_GSE_IVB)
1363 intel_opregion_asle_intr(dev); 1702 intel_opregion_asle_intr(dev);
1364 1703
1365 for (i = 0; i < 3; i++) { 1704 for_each_pipe(i) {
1366 if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i))) 1705 if (de_iir & (DE_PIPE_VBLANK_IVB(i)))
1367 drm_handle_vblank(dev, i); 1706 drm_handle_vblank(dev, i);
1368 if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) { 1707
1708 /* plane/pipes map 1:1 on ilk+ */
1709 if (de_iir & DE_PLANE_FLIP_DONE_IVB(i)) {
1369 intel_prepare_page_flip(dev, i); 1710 intel_prepare_page_flip(dev, i);
1370 intel_finish_page_flip_plane(dev, i); 1711 intel_finish_page_flip_plane(dev, i);
1371 } 1712 }
@@ -1388,7 +1729,6 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
1388 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1729 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1389 u32 de_iir, gt_iir, de_ier, sde_ier = 0; 1730 u32 de_iir, gt_iir, de_ier, sde_ier = 0;
1390 irqreturn_t ret = IRQ_NONE; 1731 irqreturn_t ret = IRQ_NONE;
1391 bool err_int_reenable = false;
1392 1732
1393 atomic_inc(&dev_priv->irq_received); 1733 atomic_inc(&dev_priv->irq_received);
1394 1734
@@ -1412,17 +1752,6 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
1412 POSTING_READ(SDEIER); 1752 POSTING_READ(SDEIER);
1413 } 1753 }
1414 1754
1415 /* On Haswell, also mask ERR_INT because we don't want to risk
1416 * generating "unclaimed register" interrupts from inside the interrupt
1417 * handler. */
1418 if (IS_HASWELL(dev)) {
1419 spin_lock(&dev_priv->irq_lock);
1420 err_int_reenable = ~dev_priv->irq_mask & DE_ERR_INT_IVB;
1421 if (err_int_reenable)
1422 ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
1423 spin_unlock(&dev_priv->irq_lock);
1424 }
1425
1426 gt_iir = I915_READ(GTIIR); 1755 gt_iir = I915_READ(GTIIR);
1427 if (gt_iir) { 1756 if (gt_iir) {
1428 if (INTEL_INFO(dev)->gen >= 6) 1757 if (INTEL_INFO(dev)->gen >= 6)
@@ -1452,13 +1781,6 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
1452 } 1781 }
1453 } 1782 }
1454 1783
1455 if (err_int_reenable) {
1456 spin_lock(&dev_priv->irq_lock);
1457 if (ivb_can_enable_err_int(dev))
1458 ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
1459 spin_unlock(&dev_priv->irq_lock);
1460 }
1461
1462 I915_WRITE(DEIER, de_ier); 1784 I915_WRITE(DEIER, de_ier);
1463 POSTING_READ(DEIER); 1785 POSTING_READ(DEIER);
1464 if (!HAS_PCH_NOP(dev)) { 1786 if (!HAS_PCH_NOP(dev)) {
@@ -1469,6 +1791,117 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
1469 return ret; 1791 return ret;
1470} 1792}
1471 1793
1794static irqreturn_t gen8_irq_handler(int irq, void *arg)
1795{
1796 struct drm_device *dev = arg;
1797 struct drm_i915_private *dev_priv = dev->dev_private;
1798 u32 master_ctl;
1799 irqreturn_t ret = IRQ_NONE;
1800 uint32_t tmp = 0;
1801 enum pipe pipe;
1802
1803 atomic_inc(&dev_priv->irq_received);
1804
1805 master_ctl = I915_READ(GEN8_MASTER_IRQ);
1806 master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
1807 if (!master_ctl)
1808 return IRQ_NONE;
1809
1810 I915_WRITE(GEN8_MASTER_IRQ, 0);
1811 POSTING_READ(GEN8_MASTER_IRQ);
1812
1813 ret = gen8_gt_irq_handler(dev, dev_priv, master_ctl);
1814
1815 if (master_ctl & GEN8_DE_MISC_IRQ) {
1816 tmp = I915_READ(GEN8_DE_MISC_IIR);
1817 if (tmp & GEN8_DE_MISC_GSE)
1818 intel_opregion_asle_intr(dev);
1819 else if (tmp)
1820 DRM_ERROR("Unexpected DE Misc interrupt\n");
1821 else
1822 DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
1823
1824 if (tmp) {
1825 I915_WRITE(GEN8_DE_MISC_IIR, tmp);
1826 ret = IRQ_HANDLED;
1827 }
1828 }
1829
1830 if (master_ctl & GEN8_DE_PORT_IRQ) {
1831 tmp = I915_READ(GEN8_DE_PORT_IIR);
1832 if (tmp & GEN8_AUX_CHANNEL_A)
1833 dp_aux_irq_handler(dev);
1834 else if (tmp)
1835 DRM_ERROR("Unexpected DE Port interrupt\n");
1836 else
1837 DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
1838
1839 if (tmp) {
1840 I915_WRITE(GEN8_DE_PORT_IIR, tmp);
1841 ret = IRQ_HANDLED;
1842 }
1843 }
1844
1845 for_each_pipe(pipe) {
1846 uint32_t pipe_iir;
1847
1848 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
1849 continue;
1850
1851 pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
1852 if (pipe_iir & GEN8_PIPE_VBLANK)
1853 drm_handle_vblank(dev, pipe);
1854
1855 if (pipe_iir & GEN8_PIPE_FLIP_DONE) {
1856 intel_prepare_page_flip(dev, pipe);
1857 intel_finish_page_flip_plane(dev, pipe);
1858 }
1859
1860 if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE)
1861 hsw_pipe_crc_irq_handler(dev, pipe);
1862
1863 if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN) {
1864 if (intel_set_cpu_fifo_underrun_reporting(dev, pipe,
1865 false))
1866 DRM_DEBUG_DRIVER("Pipe %c FIFO underrun\n",
1867 pipe_name(pipe));
1868 }
1869
1870 if (pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS) {
1871 DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
1872 pipe_name(pipe),
1873 pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS);
1874 }
1875
1876 if (pipe_iir) {
1877 ret = IRQ_HANDLED;
1878 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir);
1879 } else
1880 DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
1881 }
1882
1883 if (!HAS_PCH_NOP(dev) && master_ctl & GEN8_DE_PCH_IRQ) {
1884 /*
1885 * FIXME(BDW): Assume for now that the new interrupt handling
1886 * scheme also closed the SDE interrupt handling race we've seen
1887 * on older pch-split platforms. But this needs testing.
1888 */
1889 u32 pch_iir = I915_READ(SDEIIR);
1890
1891 cpt_irq_handler(dev, pch_iir);
1892
1893 if (pch_iir) {
1894 I915_WRITE(SDEIIR, pch_iir);
1895 ret = IRQ_HANDLED;
1896 }
1897 }
1898
1899 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
1900 POSTING_READ(GEN8_MASTER_IRQ);
1901
1902 return ret;
1903}
1904
1472static void i915_error_wake_up(struct drm_i915_private *dev_priv, 1905static void i915_error_wake_up(struct drm_i915_private *dev_priv,
1473 bool reset_completed) 1906 bool reset_completed)
1474{ 1907{
@@ -1516,7 +1949,7 @@ static void i915_error_work_func(struct work_struct *work)
1516 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL }; 1949 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
1517 int ret; 1950 int ret;
1518 1951
1519 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event); 1952 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, error_event);
1520 1953
1521 /* 1954 /*
1522 * Note that there's only one work item which does gpu resets, so we 1955 * Note that there's only one work item which does gpu resets, so we
@@ -1530,7 +1963,7 @@ static void i915_error_work_func(struct work_struct *work)
1530 */ 1963 */
1531 if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) { 1964 if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
1532 DRM_DEBUG_DRIVER("resetting chip\n"); 1965 DRM_DEBUG_DRIVER("resetting chip\n");
1533 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, 1966 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE,
1534 reset_event); 1967 reset_event);
1535 1968
1536 /* 1969 /*
@@ -1557,7 +1990,7 @@ static void i915_error_work_func(struct work_struct *work)
1557 smp_mb__before_atomic_inc(); 1990 smp_mb__before_atomic_inc();
1558 atomic_inc(&dev_priv->gpu_error.reset_counter); 1991 atomic_inc(&dev_priv->gpu_error.reset_counter);
1559 1992
1560 kobject_uevent_env(&dev->primary->kdev.kobj, 1993 kobject_uevent_env(&dev->primary->kdev->kobj,
1561 KOBJ_CHANGE, reset_done_event); 1994 KOBJ_CHANGE, reset_done_event);
1562 } else { 1995 } else {
1563 atomic_set(&error->reset_counter, I915_WEDGED); 1996 atomic_set(&error->reset_counter, I915_WEDGED);
@@ -1787,7 +2220,7 @@ static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
1787 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2220 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1788 unsigned long irqflags; 2221 unsigned long irqflags;
1789 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) : 2222 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
1790 DE_PIPE_VBLANK_ILK(pipe); 2223 DE_PIPE_VBLANK(pipe);
1791 2224
1792 if (!i915_pipe_enabled(dev, pipe)) 2225 if (!i915_pipe_enabled(dev, pipe))
1793 return -EINVAL; 2226 return -EINVAL;
@@ -1810,7 +2243,7 @@ static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
1810 2243
1811 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2244 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1812 imr = I915_READ(VLV_IMR); 2245 imr = I915_READ(VLV_IMR);
1813 if (pipe == 0) 2246 if (pipe == PIPE_A)
1814 imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT; 2247 imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
1815 else 2248 else
1816 imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 2249 imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
@@ -1822,6 +2255,22 @@ static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
1822 return 0; 2255 return 0;
1823} 2256}
1824 2257
2258static int gen8_enable_vblank(struct drm_device *dev, int pipe)
2259{
2260 struct drm_i915_private *dev_priv = dev->dev_private;
2261 unsigned long irqflags;
2262
2263 if (!i915_pipe_enabled(dev, pipe))
2264 return -EINVAL;
2265
2266 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2267 dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_VBLANK;
2268 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
2269 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
2270 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2271 return 0;
2272}
2273
1825/* Called from drm generic code, passed 'crtc' which 2274/* Called from drm generic code, passed 'crtc' which
1826 * we use as a pipe index 2275 * we use as a pipe index
1827 */ 2276 */
@@ -1845,7 +2294,7 @@ static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
1845 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2294 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1846 unsigned long irqflags; 2295 unsigned long irqflags;
1847 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) : 2296 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
1848 DE_PIPE_VBLANK_ILK(pipe); 2297 DE_PIPE_VBLANK(pipe);
1849 2298
1850 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2299 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1851 ironlake_disable_display_irq(dev_priv, bit); 2300 ironlake_disable_display_irq(dev_priv, bit);
@@ -1862,7 +2311,7 @@ static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
1862 i915_disable_pipestat(dev_priv, pipe, 2311 i915_disable_pipestat(dev_priv, pipe,
1863 PIPE_START_VBLANK_INTERRUPT_ENABLE); 2312 PIPE_START_VBLANK_INTERRUPT_ENABLE);
1864 imr = I915_READ(VLV_IMR); 2313 imr = I915_READ(VLV_IMR);
1865 if (pipe == 0) 2314 if (pipe == PIPE_A)
1866 imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT; 2315 imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
1867 else 2316 else
1868 imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 2317 imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
@@ -1870,6 +2319,21 @@ static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
1870 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2319 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1871} 2320}
1872 2321
2322static void gen8_disable_vblank(struct drm_device *dev, int pipe)
2323{
2324 struct drm_i915_private *dev_priv = dev->dev_private;
2325 unsigned long irqflags;
2326
2327 if (!i915_pipe_enabled(dev, pipe))
2328 return;
2329
2330 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2331 dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_VBLANK;
2332 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
2333 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
2334 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2335}
2336
1873static u32 2337static u32
1874ring_last_seqno(struct intel_ring_buffer *ring) 2338ring_last_seqno(struct intel_ring_buffer *ring)
1875{ 2339{
@@ -1965,6 +2429,7 @@ ring_stuck(struct intel_ring_buffer *ring, u32 acthd)
1965 if (tmp & RING_WAIT) { 2429 if (tmp & RING_WAIT) {
1966 DRM_ERROR("Kicking stuck wait on %s\n", 2430 DRM_ERROR("Kicking stuck wait on %s\n",
1967 ring->name); 2431 ring->name);
2432 i915_handle_error(dev, false);
1968 I915_WRITE_CTL(ring, tmp); 2433 I915_WRITE_CTL(ring, tmp);
1969 return HANGCHECK_KICK; 2434 return HANGCHECK_KICK;
1970 } 2435 }
@@ -1976,6 +2441,7 @@ ring_stuck(struct intel_ring_buffer *ring, u32 acthd)
1976 case 1: 2441 case 1:
1977 DRM_ERROR("Kicking stuck semaphore on %s\n", 2442 DRM_ERROR("Kicking stuck semaphore on %s\n",
1978 ring->name); 2443 ring->name);
2444 i915_handle_error(dev, false);
1979 I915_WRITE_CTL(ring, tmp); 2445 I915_WRITE_CTL(ring, tmp);
1980 return HANGCHECK_KICK; 2446 return HANGCHECK_KICK;
1981 case 0: 2447 case 0:
@@ -2021,12 +2487,21 @@ static void i915_hangcheck_elapsed(unsigned long data)
2021 2487
2022 if (ring->hangcheck.seqno == seqno) { 2488 if (ring->hangcheck.seqno == seqno) {
2023 if (ring_idle(ring, seqno)) { 2489 if (ring_idle(ring, seqno)) {
2490 ring->hangcheck.action = HANGCHECK_IDLE;
2491
2024 if (waitqueue_active(&ring->irq_queue)) { 2492 if (waitqueue_active(&ring->irq_queue)) {
2025 /* Issue a wake-up to catch stuck h/w. */ 2493 /* Issue a wake-up to catch stuck h/w. */
2026 DRM_ERROR("Hangcheck timer elapsed... %s idle\n", 2494 if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) {
2027 ring->name); 2495 if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring)))
2028 wake_up_all(&ring->irq_queue); 2496 DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
2029 ring->hangcheck.score += HUNG; 2497 ring->name);
2498 else
2499 DRM_INFO("Fake missed irq on %s\n",
2500 ring->name);
2501 wake_up_all(&ring->irq_queue);
2502 }
2503 /* Safeguard against driver failure */
2504 ring->hangcheck.score += BUSY;
2030 } else 2505 } else
2031 busy = false; 2506 busy = false;
2032 } else { 2507 } else {
@@ -2049,6 +2524,7 @@ static void i915_hangcheck_elapsed(unsigned long data)
2049 acthd); 2524 acthd);
2050 2525
2051 switch (ring->hangcheck.action) { 2526 switch (ring->hangcheck.action) {
2527 case HANGCHECK_IDLE:
2052 case HANGCHECK_WAIT: 2528 case HANGCHECK_WAIT:
2053 break; 2529 break;
2054 case HANGCHECK_ACTIVE: 2530 case HANGCHECK_ACTIVE:
@@ -2064,6 +2540,8 @@ static void i915_hangcheck_elapsed(unsigned long data)
2064 } 2540 }
2065 } 2541 }
2066 } else { 2542 } else {
2543 ring->hangcheck.action = HANGCHECK_ACTIVE;
2544
2067 /* Gradually reduce the count so that we catch DoS 2545 /* Gradually reduce the count so that we catch DoS
2068 * attempts across multiple batches. 2546 * attempts across multiple batches.
2069 */ 2547 */
@@ -2190,6 +2668,53 @@ static void valleyview_irq_preinstall(struct drm_device *dev)
2190 POSTING_READ(VLV_IER); 2668 POSTING_READ(VLV_IER);
2191} 2669}
2192 2670
2671static void gen8_irq_preinstall(struct drm_device *dev)
2672{
2673 struct drm_i915_private *dev_priv = dev->dev_private;
2674 int pipe;
2675
2676 atomic_set(&dev_priv->irq_received, 0);
2677
2678 I915_WRITE(GEN8_MASTER_IRQ, 0);
2679 POSTING_READ(GEN8_MASTER_IRQ);
2680
2681 /* IIR can theoretically queue up two events. Be paranoid */
2682#define GEN8_IRQ_INIT_NDX(type, which) do { \
2683 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
2684 POSTING_READ(GEN8_##type##_IMR(which)); \
2685 I915_WRITE(GEN8_##type##_IER(which), 0); \
2686 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
2687 POSTING_READ(GEN8_##type##_IIR(which)); \
2688 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
2689 } while (0)
2690
2691#define GEN8_IRQ_INIT(type) do { \
2692 I915_WRITE(GEN8_##type##_IMR, 0xffffffff); \
2693 POSTING_READ(GEN8_##type##_IMR); \
2694 I915_WRITE(GEN8_##type##_IER, 0); \
2695 I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \
2696 POSTING_READ(GEN8_##type##_IIR); \
2697 I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \
2698 } while (0)
2699
2700 GEN8_IRQ_INIT_NDX(GT, 0);
2701 GEN8_IRQ_INIT_NDX(GT, 1);
2702 GEN8_IRQ_INIT_NDX(GT, 2);
2703 GEN8_IRQ_INIT_NDX(GT, 3);
2704
2705 for_each_pipe(pipe) {
2706 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe);
2707 }
2708
2709 GEN8_IRQ_INIT(DE_PORT);
2710 GEN8_IRQ_INIT(DE_MISC);
2711 GEN8_IRQ_INIT(PCU);
2712#undef GEN8_IRQ_INIT
2713#undef GEN8_IRQ_INIT_NDX
2714
2715 POSTING_READ(GEN8_PCU_IIR);
2716}
2717
2193static void ibx_hpd_irq_setup(struct drm_device *dev) 2718static void ibx_hpd_irq_setup(struct drm_device *dev)
2194{ 2719{
2195 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2720 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -2254,10 +2779,10 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev)
2254 pm_irqs = gt_irqs = 0; 2779 pm_irqs = gt_irqs = 0;
2255 2780
2256 dev_priv->gt_irq_mask = ~0; 2781 dev_priv->gt_irq_mask = ~0;
2257 if (HAS_L3_GPU_CACHE(dev)) { 2782 if (HAS_L3_DPF(dev)) {
2258 /* L3 parity interrupt is always unmasked. */ 2783 /* L3 parity interrupt is always unmasked. */
2259 dev_priv->gt_irq_mask = ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT; 2784 dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev);
2260 gt_irqs |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT; 2785 gt_irqs |= GT_PARITY_ERROR(dev);
2261 } 2786 }
2262 2787
2263 gt_irqs |= GT_RENDER_USER_INTERRUPT; 2788 gt_irqs |= GT_RENDER_USER_INTERRUPT;
@@ -2306,8 +2831,10 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
2306 } else { 2831 } else {
2307 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | 2832 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
2308 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE | 2833 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
2309 DE_AUX_CHANNEL_A | DE_PIPEB_FIFO_UNDERRUN | 2834 DE_AUX_CHANNEL_A |
2310 DE_PIPEA_FIFO_UNDERRUN | DE_POISON); 2835 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
2836 DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE |
2837 DE_POISON);
2311 extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT; 2838 extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT;
2312 } 2839 }
2313 2840
@@ -2341,7 +2868,8 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
2341{ 2868{
2342 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2869 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2343 u32 enable_mask; 2870 u32 enable_mask;
2344 u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV; 2871 u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV |
2872 PIPE_CRC_DONE_ENABLE;
2345 unsigned long irqflags; 2873 unsigned long irqflags;
2346 2874
2347 enable_mask = I915_DISPLAY_PORT_INTERRUPT; 2875 enable_mask = I915_DISPLAY_PORT_INTERRUPT;
@@ -2371,9 +2899,9 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
2371 /* Interrupt setup is already guaranteed to be single-threaded, this is 2899 /* Interrupt setup is already guaranteed to be single-threaded, this is
2372 * just to make the assert_spin_locked check happy. */ 2900 * just to make the assert_spin_locked check happy. */
2373 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2901 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2374 i915_enable_pipestat(dev_priv, 0, pipestat_enable); 2902 i915_enable_pipestat(dev_priv, PIPE_A, pipestat_enable);
2375 i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE); 2903 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_EVENT_ENABLE);
2376 i915_enable_pipestat(dev_priv, 1, pipestat_enable); 2904 i915_enable_pipestat(dev_priv, PIPE_B, pipestat_enable);
2377 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2905 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2378 2906
2379 I915_WRITE(VLV_IIR, 0xffffffff); 2907 I915_WRITE(VLV_IIR, 0xffffffff);
@@ -2392,6 +2920,117 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
2392 return 0; 2920 return 0;
2393} 2921}
2394 2922
2923static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
2924{
2925 int i;
2926
2927 /* These are interrupts we'll toggle with the ring mask register */
2928 uint32_t gt_interrupts[] = {
2929 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
2930 GT_RENDER_L3_PARITY_ERROR_INTERRUPT |
2931 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
2932 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
2933 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
2934 0,
2935 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT
2936 };
2937
2938 for (i = 0; i < ARRAY_SIZE(gt_interrupts); i++) {
2939 u32 tmp = I915_READ(GEN8_GT_IIR(i));
2940 if (tmp)
2941 DRM_ERROR("Interrupt (%d) should have been masked in pre-install 0x%08x\n",
2942 i, tmp);
2943 I915_WRITE(GEN8_GT_IMR(i), ~gt_interrupts[i]);
2944 I915_WRITE(GEN8_GT_IER(i), gt_interrupts[i]);
2945 }
2946 POSTING_READ(GEN8_GT_IER(0));
2947}
2948
2949static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
2950{
2951 struct drm_device *dev = dev_priv->dev;
2952 uint32_t de_pipe_masked = GEN8_PIPE_FLIP_DONE |
2953 GEN8_PIPE_CDCLK_CRC_DONE |
2954 GEN8_PIPE_FIFO_UNDERRUN |
2955 GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
2956 uint32_t de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK;
2957 int pipe;
2958 dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
2959 dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
2960 dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
2961
2962 for_each_pipe(pipe) {
2963 u32 tmp = I915_READ(GEN8_DE_PIPE_IIR(pipe));
2964 if (tmp)
2965 DRM_ERROR("Interrupt (%d) should have been masked in pre-install 0x%08x\n",
2966 pipe, tmp);
2967 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
2968 I915_WRITE(GEN8_DE_PIPE_IER(pipe), de_pipe_enables);
2969 }
2970 POSTING_READ(GEN8_DE_PIPE_ISR(0));
2971
2972 I915_WRITE(GEN8_DE_PORT_IMR, ~GEN8_AUX_CHANNEL_A);
2973 I915_WRITE(GEN8_DE_PORT_IER, GEN8_AUX_CHANNEL_A);
2974 POSTING_READ(GEN8_DE_PORT_IER);
2975}
2976
2977static int gen8_irq_postinstall(struct drm_device *dev)
2978{
2979 struct drm_i915_private *dev_priv = dev->dev_private;
2980
2981 gen8_gt_irq_postinstall(dev_priv);
2982 gen8_de_irq_postinstall(dev_priv);
2983
2984 ibx_irq_postinstall(dev);
2985
2986 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
2987 POSTING_READ(GEN8_MASTER_IRQ);
2988
2989 return 0;
2990}
2991
2992static void gen8_irq_uninstall(struct drm_device *dev)
2993{
2994 struct drm_i915_private *dev_priv = dev->dev_private;
2995 int pipe;
2996
2997 if (!dev_priv)
2998 return;
2999
3000 atomic_set(&dev_priv->irq_received, 0);
3001
3002 I915_WRITE(GEN8_MASTER_IRQ, 0);
3003
3004#define GEN8_IRQ_FINI_NDX(type, which) do { \
3005 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
3006 I915_WRITE(GEN8_##type##_IER(which), 0); \
3007 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
3008 } while (0)
3009
3010#define GEN8_IRQ_FINI(type) do { \
3011 I915_WRITE(GEN8_##type##_IMR, 0xffffffff); \
3012 I915_WRITE(GEN8_##type##_IER, 0); \
3013 I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \
3014 } while (0)
3015
3016 GEN8_IRQ_FINI_NDX(GT, 0);
3017 GEN8_IRQ_FINI_NDX(GT, 1);
3018 GEN8_IRQ_FINI_NDX(GT, 2);
3019 GEN8_IRQ_FINI_NDX(GT, 3);
3020
3021 for_each_pipe(pipe) {
3022 GEN8_IRQ_FINI_NDX(DE_PIPE, pipe);
3023 }
3024
3025 GEN8_IRQ_FINI(DE_PORT);
3026 GEN8_IRQ_FINI(DE_MISC);
3027 GEN8_IRQ_FINI(PCU);
3028#undef GEN8_IRQ_FINI
3029#undef GEN8_IRQ_FINI_NDX
3030
3031 POSTING_READ(GEN8_PCU_IIR);
3032}
3033
2395static void valleyview_irq_uninstall(struct drm_device *dev) 3034static void valleyview_irq_uninstall(struct drm_device *dev)
2396{ 3035{
2397 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3036 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -2464,6 +3103,7 @@ static void i8xx_irq_preinstall(struct drm_device * dev)
2464static int i8xx_irq_postinstall(struct drm_device *dev) 3103static int i8xx_irq_postinstall(struct drm_device *dev)
2465{ 3104{
2466 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3105 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3106 unsigned long irqflags;
2467 3107
2468 I915_WRITE16(EMR, 3108 I915_WRITE16(EMR,
2469 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 3109 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
@@ -2484,6 +3124,13 @@ static int i8xx_irq_postinstall(struct drm_device *dev)
2484 I915_USER_INTERRUPT); 3124 I915_USER_INTERRUPT);
2485 POSTING_READ16(IER); 3125 POSTING_READ16(IER);
2486 3126
3127 /* Interrupt setup is already guaranteed to be single-threaded, this is
3128 * just to make the assert_spin_locked check happy. */
3129 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3130 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_ENABLE);
3131 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_ENABLE);
3132 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3133
2487 return 0; 3134 return 0;
2488} 3135}
2489 3136
@@ -2570,13 +3217,14 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
2570 if (iir & I915_USER_INTERRUPT) 3217 if (iir & I915_USER_INTERRUPT)
2571 notify_ring(dev, &dev_priv->ring[RCS]); 3218 notify_ring(dev, &dev_priv->ring[RCS]);
2572 3219
2573 if (pipe_stats[0] & PIPE_VBLANK_INTERRUPT_STATUS && 3220 for_each_pipe(pipe) {
2574 i8xx_handle_vblank(dev, 0, iir)) 3221 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
2575 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(0); 3222 i8xx_handle_vblank(dev, pipe, iir))
3223 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
2576 3224
2577 if (pipe_stats[1] & PIPE_VBLANK_INTERRUPT_STATUS && 3225 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
2578 i8xx_handle_vblank(dev, 1, iir)) 3226 i9xx_pipe_crc_irq_handler(dev, pipe);
2579 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(1); 3227 }
2580 3228
2581 iir = new_iir; 3229 iir = new_iir;
2582 } 3230 }
@@ -2623,6 +3271,7 @@ static int i915_irq_postinstall(struct drm_device *dev)
2623{ 3271{
2624 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3272 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2625 u32 enable_mask; 3273 u32 enable_mask;
3274 unsigned long irqflags;
2626 3275
2627 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 3276 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
2628 3277
@@ -2658,6 +3307,13 @@ static int i915_irq_postinstall(struct drm_device *dev)
2658 3307
2659 i915_enable_asle_pipestat(dev); 3308 i915_enable_asle_pipestat(dev);
2660 3309
3310 /* Interrupt setup is already guaranteed to be single-threaded, this is
3311 * just to make the assert_spin_locked check happy. */
3312 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3313 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_ENABLE);
3314 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_ENABLE);
3315 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3316
2661 return 0; 3317 return 0;
2662} 3318}
2663 3319
@@ -2769,6 +3425,9 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
2769 3425
2770 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 3426 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
2771 blc_event = true; 3427 blc_event = true;
3428
3429 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
3430 i9xx_pipe_crc_irq_handler(dev, pipe);
2772 } 3431 }
2773 3432
2774 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 3433 if (blc_event || (iir & I915_ASLE_INTERRUPT))
@@ -2867,7 +3526,9 @@ static int i965_irq_postinstall(struct drm_device *dev)
2867 /* Interrupt setup is already guaranteed to be single-threaded, this is 3526 /* Interrupt setup is already guaranteed to be single-threaded, this is
2868 * just to make the assert_spin_locked check happy. */ 3527 * just to make the assert_spin_locked check happy. */
2869 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3528 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2870 i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE); 3529 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_EVENT_ENABLE);
3530 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_ENABLE);
3531 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_ENABLE);
2871 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3532 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2872 3533
2873 /* 3534 /*
@@ -3013,6 +3674,9 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
3013 3674
3014 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 3675 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
3015 blc_event = true; 3676 blc_event = true;
3677
3678 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
3679 i9xx_pipe_crc_irq_handler(dev, pipe);
3016 } 3680 }
3017 3681
3018 3682
@@ -3122,18 +3786,21 @@ void intel_irq_init(struct drm_device *dev)
3122 3786
3123 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE); 3787 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
3124 3788
3125 dev->driver->get_vblank_counter = i915_get_vblank_counter; 3789 if (IS_GEN2(dev)) {
3126 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 3790 dev->max_vblank_count = 0;
3127 if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { 3791 dev->driver->get_vblank_counter = i8xx_get_vblank_counter;
3792 } else if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
3128 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ 3793 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
3129 dev->driver->get_vblank_counter = gm45_get_vblank_counter; 3794 dev->driver->get_vblank_counter = gm45_get_vblank_counter;
3795 } else {
3796 dev->driver->get_vblank_counter = i915_get_vblank_counter;
3797 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
3130 } 3798 }
3131 3799
3132 if (drm_core_check_feature(dev, DRIVER_MODESET)) 3800 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
3133 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp; 3801 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
3134 else 3802 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
3135 dev->driver->get_vblank_timestamp = NULL; 3803 }
3136 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
3137 3804
3138 if (IS_VALLEYVIEW(dev)) { 3805 if (IS_VALLEYVIEW(dev)) {
3139 dev->driver->irq_handler = valleyview_irq_handler; 3806 dev->driver->irq_handler = valleyview_irq_handler;
@@ -3143,6 +3810,14 @@ void intel_irq_init(struct drm_device *dev)
3143 dev->driver->enable_vblank = valleyview_enable_vblank; 3810 dev->driver->enable_vblank = valleyview_enable_vblank;
3144 dev->driver->disable_vblank = valleyview_disable_vblank; 3811 dev->driver->disable_vblank = valleyview_disable_vblank;
3145 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 3812 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
3813 } else if (IS_GEN8(dev)) {
3814 dev->driver->irq_handler = gen8_irq_handler;
3815 dev->driver->irq_preinstall = gen8_irq_preinstall;
3816 dev->driver->irq_postinstall = gen8_irq_postinstall;
3817 dev->driver->irq_uninstall = gen8_irq_uninstall;
3818 dev->driver->enable_vblank = gen8_enable_vblank;
3819 dev->driver->disable_vblank = gen8_disable_vblank;
3820 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
3146 } else if (HAS_PCH_SPLIT(dev)) { 3821 } else if (HAS_PCH_SPLIT(dev)) {
3147 dev->driver->irq_handler = ironlake_irq_handler; 3822 dev->driver->irq_handler = ironlake_irq_handler;
3148 dev->driver->irq_preinstall = ironlake_irq_preinstall; 3823 dev->driver->irq_preinstall = ironlake_irq_preinstall;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index ef9b35479f01..f9eafb6ed523 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -26,6 +26,7 @@
26#define _I915_REG_H_ 26#define _I915_REG_H_
27 27
28#define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a))) 28#define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a)))
29#define _PIPE_INC(pipe, base, inc) ((base) + (pipe)*(inc))
29#define _TRANSCODER(tran, a, b) ((a) + (tran)*((b)-(a))) 30#define _TRANSCODER(tran, a, b) ((a) + (tran)*((b)-(a)))
30 31
31#define _PORT(port, a, b) ((a) + (port)*((b)-(a))) 32#define _PORT(port, a, b) ((a) + (port)*((b)-(a)))
@@ -109,6 +110,9 @@
109#define RING_PP_DIR_DCLV(ring) ((ring)->mmio_base+0x220) 110#define RING_PP_DIR_DCLV(ring) ((ring)->mmio_base+0x220)
110#define PP_DIR_DCLV_2G 0xffffffff 111#define PP_DIR_DCLV_2G 0xffffffff
111 112
113#define GEN8_RING_PDP_UDW(ring, n) ((ring)->mmio_base+0x270 + ((n) * 8 + 4))
114#define GEN8_RING_PDP_LDW(ring, n) ((ring)->mmio_base+0x270 + (n) * 8)
115
112#define GAM_ECOCHK 0x4090 116#define GAM_ECOCHK 0x4090
113#define ECOCHK_SNB_BIT (1<<10) 117#define ECOCHK_SNB_BIT (1<<10)
114#define HSW_ECOCHK_ARB_PRIO_SOL (1<<6) 118#define HSW_ECOCHK_ARB_PRIO_SOL (1<<6)
@@ -246,6 +250,7 @@
246#define MI_BATCH_NON_SECURE_HSW (1<<13) 250#define MI_BATCH_NON_SECURE_HSW (1<<13)
247#define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0) 251#define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0)
248#define MI_BATCH_GTT (2<<6) /* aliased with (1<<7) on gen4 */ 252#define MI_BATCH_GTT (2<<6) /* aliased with (1<<7) on gen4 */
253#define MI_BATCH_BUFFER_START_GEN8 MI_INSTR(0x31, 1)
249#define MI_SEMAPHORE_MBOX MI_INSTR(0x16, 1) /* gen6+ */ 254#define MI_SEMAPHORE_MBOX MI_INSTR(0x16, 1) /* gen6+ */
250#define MI_SEMAPHORE_GLOBAL_GTT (1<<22) 255#define MI_SEMAPHORE_GLOBAL_GTT (1<<22)
251#define MI_SEMAPHORE_UPDATE (1<<21) 256#define MI_SEMAPHORE_UPDATE (1<<21)
@@ -264,6 +269,11 @@
264#define MI_SEMAPHORE_SYNC_VVE (1<<16) /* VECS wait for VCS (VEVSYNC) */ 269#define MI_SEMAPHORE_SYNC_VVE (1<<16) /* VECS wait for VCS (VEVSYNC) */
265#define MI_SEMAPHORE_SYNC_RVE (2<<16) /* VECS wait for RCS (VERSYNC) */ 270#define MI_SEMAPHORE_SYNC_RVE (2<<16) /* VECS wait for RCS (VERSYNC) */
266#define MI_SEMAPHORE_SYNC_INVALID (3<<16) 271#define MI_SEMAPHORE_SYNC_INVALID (3<<16)
272
273#define MI_PREDICATE_RESULT_2 (0x2214)
274#define LOWER_SLICE_ENABLED (1<<0)
275#define LOWER_SLICE_DISABLED (0<<0)
276
267/* 277/*
268 * 3D instructions used by the kernel 278 * 3D instructions used by the kernel
269 */ 279 */
@@ -346,12 +356,25 @@
346#define IOSF_PORT_PUNIT 0x4 356#define IOSF_PORT_PUNIT 0x4
347#define IOSF_PORT_NC 0x11 357#define IOSF_PORT_NC 0x11
348#define IOSF_PORT_DPIO 0x12 358#define IOSF_PORT_DPIO 0x12
359#define IOSF_PORT_GPIO_NC 0x13
360#define IOSF_PORT_CCK 0x14
361#define IOSF_PORT_CCU 0xA9
362#define IOSF_PORT_GPS_CORE 0x48
349#define VLV_IOSF_DATA (VLV_DISPLAY_BASE + 0x2104) 363#define VLV_IOSF_DATA (VLV_DISPLAY_BASE + 0x2104)
350#define VLV_IOSF_ADDR (VLV_DISPLAY_BASE + 0x2108) 364#define VLV_IOSF_ADDR (VLV_DISPLAY_BASE + 0x2108)
351 365
352#define PUNIT_OPCODE_REG_READ 6 366#define PUNIT_OPCODE_REG_READ 6
353#define PUNIT_OPCODE_REG_WRITE 7 367#define PUNIT_OPCODE_REG_WRITE 7
354 368
369#define PUNIT_REG_PWRGT_CTRL 0x60
370#define PUNIT_REG_PWRGT_STATUS 0x61
371#define PUNIT_CLK_GATE 1
372#define PUNIT_PWR_RESET 2
373#define PUNIT_PWR_GATE 3
374#define RENDER_PWRGT (PUNIT_PWR_GATE << 0)
375#define MEDIA_PWRGT (PUNIT_PWR_GATE << 2)
376#define DISP2D_PWRGT (PUNIT_PWR_GATE << 6)
377
355#define PUNIT_REG_GPU_LFM 0xd3 378#define PUNIT_REG_GPU_LFM 0xd3
356#define PUNIT_REG_GPU_FREQ_REQ 0xd4 379#define PUNIT_REG_GPU_FREQ_REQ 0xd4
357#define PUNIT_REG_GPU_FREQ_STS 0xd8 380#define PUNIT_REG_GPU_FREQ_STS 0xd8
@@ -372,6 +395,40 @@
372#define FB_FMAX_VMIN_FREQ_LO_SHIFT 27 395#define FB_FMAX_VMIN_FREQ_LO_SHIFT 27
373#define FB_FMAX_VMIN_FREQ_LO_MASK 0xf8000000 396#define FB_FMAX_VMIN_FREQ_LO_MASK 0xf8000000
374 397
398/* vlv2 north clock has */
399#define CCK_FUSE_REG 0x8
400#define CCK_FUSE_HPLL_FREQ_MASK 0x3
401#define CCK_REG_DSI_PLL_FUSE 0x44
402#define CCK_REG_DSI_PLL_CONTROL 0x48
403#define DSI_PLL_VCO_EN (1 << 31)
404#define DSI_PLL_LDO_GATE (1 << 30)
405#define DSI_PLL_P1_POST_DIV_SHIFT 17
406#define DSI_PLL_P1_POST_DIV_MASK (0x1ff << 17)
407#define DSI_PLL_P2_MUX_DSI0_DIV2 (1 << 13)
408#define DSI_PLL_P3_MUX_DSI1_DIV2 (1 << 12)
409#define DSI_PLL_MUX_MASK (3 << 9)
410#define DSI_PLL_MUX_DSI0_DSIPLL (0 << 10)
411#define DSI_PLL_MUX_DSI0_CCK (1 << 10)
412#define DSI_PLL_MUX_DSI1_DSIPLL (0 << 9)
413#define DSI_PLL_MUX_DSI1_CCK (1 << 9)
414#define DSI_PLL_CLK_GATE_MASK (0xf << 5)
415#define DSI_PLL_CLK_GATE_DSI0_DSIPLL (1 << 8)
416#define DSI_PLL_CLK_GATE_DSI1_DSIPLL (1 << 7)
417#define DSI_PLL_CLK_GATE_DSI0_CCK (1 << 6)
418#define DSI_PLL_CLK_GATE_DSI1_CCK (1 << 5)
419#define DSI_PLL_LOCK (1 << 0)
420#define CCK_REG_DSI_PLL_DIVIDER 0x4c
421#define DSI_PLL_LFSR (1 << 31)
422#define DSI_PLL_FRACTION_EN (1 << 30)
423#define DSI_PLL_FRAC_COUNTER_SHIFT 27
424#define DSI_PLL_FRAC_COUNTER_MASK (7 << 27)
425#define DSI_PLL_USYNC_CNT_SHIFT 18
426#define DSI_PLL_USYNC_CNT_MASK (0x1ff << 18)
427#define DSI_PLL_N1_DIV_SHIFT 16
428#define DSI_PLL_N1_DIV_MASK (3 << 16)
429#define DSI_PLL_M1_DIV_SHIFT 0
430#define DSI_PLL_M1_DIV_MASK (0x1ff << 0)
431
375/* 432/*
376 * DPIO - a special bus for various display related registers to hide behind 433 * DPIO - a special bus for various display related registers to hide behind
377 * 434 *
@@ -387,11 +444,11 @@
387#define DPIO_MODSEL1 (1<<3) /* if ref clk b == 27 */ 444#define DPIO_MODSEL1 (1<<3) /* if ref clk b == 27 */
388#define DPIO_MODSEL0 (1<<2) /* if ref clk a == 27 */ 445#define DPIO_MODSEL0 (1<<2) /* if ref clk a == 27 */
389#define DPIO_SFR_BYPASS (1<<1) 446#define DPIO_SFR_BYPASS (1<<1)
390#define DPIO_RESET (1<<0) 447#define DPIO_CMNRST (1<<0)
391 448
392#define _DPIO_TX3_SWING_CTL4_A 0x690 449#define _DPIO_TX3_SWING_CTL4_A 0x690
393#define _DPIO_TX3_SWING_CTL4_B 0x2a90 450#define _DPIO_TX3_SWING_CTL4_B 0x2a90
394#define DPIO_TX3_SWING_CTL4(pipe) _PIPE(pipe, _DPIO_TX_SWING_CTL4_A, \ 451#define DPIO_TX3_SWING_CTL4(pipe) _PIPE(pipe, _DPIO_TX3_SWING_CTL4_A, \
395 _DPIO_TX3_SWING_CTL4_B) 452 _DPIO_TX3_SWING_CTL4_B)
396 453
397/* 454/*
@@ -602,6 +659,9 @@
602#define ARB_MODE 0x04030 659#define ARB_MODE 0x04030
603#define ARB_MODE_SWIZZLE_SNB (1<<4) 660#define ARB_MODE_SWIZZLE_SNB (1<<4)
604#define ARB_MODE_SWIZZLE_IVB (1<<5) 661#define ARB_MODE_SWIZZLE_IVB (1<<5)
662#define GAMTARBMODE 0x04a08
663#define ARB_MODE_BWGTLB_DISABLE (1<<9)
664#define ARB_MODE_SWIZZLE_BDW (1<<1)
605#define RENDER_HWS_PGA_GEN7 (0x04080) 665#define RENDER_HWS_PGA_GEN7 (0x04080)
606#define RING_FAULT_REG(ring) (0x4094 + 0x100*(ring)->id) 666#define RING_FAULT_REG(ring) (0x4094 + 0x100*(ring)->id)
607#define RING_FAULT_GTTSEL_MASK (1<<11) 667#define RING_FAULT_GTTSEL_MASK (1<<11)
@@ -609,6 +669,7 @@
609#define RING_FAULT_FAULT_TYPE(x) ((x >> 1) & 0x3) 669#define RING_FAULT_FAULT_TYPE(x) ((x >> 1) & 0x3)
610#define RING_FAULT_VALID (1<<0) 670#define RING_FAULT_VALID (1<<0)
611#define DONE_REG 0x40b0 671#define DONE_REG 0x40b0
672#define GEN8_PRIVATE_PAT 0x40e0
612#define BSD_HWS_PGA_GEN7 (0x04180) 673#define BSD_HWS_PGA_GEN7 (0x04180)
613#define BLT_HWS_PGA_GEN7 (0x04280) 674#define BLT_HWS_PGA_GEN7 (0x04280)
614#define VEBOX_HWS_PGA_GEN7 (0x04380) 675#define VEBOX_HWS_PGA_GEN7 (0x04380)
@@ -669,13 +730,18 @@
669#define NOPID 0x02094 730#define NOPID 0x02094
670#define HWSTAM 0x02098 731#define HWSTAM 0x02098
671#define DMA_FADD_I8XX 0x020d0 732#define DMA_FADD_I8XX 0x020d0
733#define RING_BBSTATE(base) ((base)+0x110)
672 734
673#define ERROR_GEN6 0x040a0 735#define ERROR_GEN6 0x040a0
674#define GEN7_ERR_INT 0x44040 736#define GEN7_ERR_INT 0x44040
675#define ERR_INT_POISON (1<<31) 737#define ERR_INT_POISON (1<<31)
676#define ERR_INT_MMIO_UNCLAIMED (1<<13) 738#define ERR_INT_MMIO_UNCLAIMED (1<<13)
739#define ERR_INT_PIPE_CRC_DONE_C (1<<8)
677#define ERR_INT_FIFO_UNDERRUN_C (1<<6) 740#define ERR_INT_FIFO_UNDERRUN_C (1<<6)
741#define ERR_INT_PIPE_CRC_DONE_B (1<<5)
678#define ERR_INT_FIFO_UNDERRUN_B (1<<3) 742#define ERR_INT_FIFO_UNDERRUN_B (1<<3)
743#define ERR_INT_PIPE_CRC_DONE_A (1<<2)
744#define ERR_INT_PIPE_CRC_DONE(pipe) (1<<(2 + pipe*3))
679#define ERR_INT_FIFO_UNDERRUN_A (1<<0) 745#define ERR_INT_FIFO_UNDERRUN_A (1<<0)
680#define ERR_INT_FIFO_UNDERRUN(pipe) (1<<(pipe*3)) 746#define ERR_INT_FIFO_UNDERRUN(pipe) (1<<(pipe*3))
681 747
@@ -683,6 +749,7 @@
683#define FPGA_DBG_RM_NOCLAIM (1<<31) 749#define FPGA_DBG_RM_NOCLAIM (1<<31)
684 750
685#define DERRMR 0x44050 751#define DERRMR 0x44050
752/* Note that HBLANK events are reserved on bdw+ */
686#define DERRMR_PIPEA_SCANLINE (1<<0) 753#define DERRMR_PIPEA_SCANLINE (1<<0)
687#define DERRMR_PIPEA_PRI_FLIP_DONE (1<<1) 754#define DERRMR_PIPEA_PRI_FLIP_DONE (1<<1)
688#define DERRMR_PIPEA_SPR_FLIP_DONE (1<<2) 755#define DERRMR_PIPEA_SPR_FLIP_DONE (1<<2)
@@ -716,6 +783,7 @@
716#define _3D_CHICKEN3 0x02090 783#define _3D_CHICKEN3 0x02090
717#define _3D_CHICKEN_SF_DISABLE_OBJEND_CULL (1 << 10) 784#define _3D_CHICKEN_SF_DISABLE_OBJEND_CULL (1 << 10)
718#define _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL (1 << 5) 785#define _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL (1 << 5)
786#define _3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(x) ((x)<<1)
719 787
720#define MI_MODE 0x0209c 788#define MI_MODE 0x0209c
721# define VS_TIMER_DISPATCH (1 << 6) 789# define VS_TIMER_DISPATCH (1 << 6)
@@ -890,6 +958,7 @@
890#define GT_BLT_USER_INTERRUPT (1 << 22) 958#define GT_BLT_USER_INTERRUPT (1 << 22)
891#define GT_BSD_CS_ERROR_INTERRUPT (1 << 15) 959#define GT_BSD_CS_ERROR_INTERRUPT (1 << 15)
892#define GT_BSD_USER_INTERRUPT (1 << 12) 960#define GT_BSD_USER_INTERRUPT (1 << 12)
961#define GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1 (1 << 11) /* hsw+; rsvd on snb, ivb, vlv */
893#define GT_RENDER_L3_PARITY_ERROR_INTERRUPT (1 << 5) /* !snb */ 962#define GT_RENDER_L3_PARITY_ERROR_INTERRUPT (1 << 5) /* !snb */
894#define GT_RENDER_PIPECTL_NOTIFY_INTERRUPT (1 << 4) 963#define GT_RENDER_PIPECTL_NOTIFY_INTERRUPT (1 << 4)
895#define GT_RENDER_CS_MASTER_ERROR_INTERRUPT (1 << 3) 964#define GT_RENDER_CS_MASTER_ERROR_INTERRUPT (1 << 3)
@@ -900,6 +969,10 @@
900#define PM_VEBOX_CS_ERROR_INTERRUPT (1 << 12) /* hsw+ */ 969#define PM_VEBOX_CS_ERROR_INTERRUPT (1 << 12) /* hsw+ */
901#define PM_VEBOX_USER_INTERRUPT (1 << 10) /* hsw+ */ 970#define PM_VEBOX_USER_INTERRUPT (1 << 10) /* hsw+ */
902 971
972#define GT_PARITY_ERROR(dev) \
973 (GT_RENDER_L3_PARITY_ERROR_INTERRUPT | \
974 (IS_HASWELL(dev) ? GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1 : 0))
975
903/* These are all the "old" interrupts */ 976/* These are all the "old" interrupts */
904#define ILK_BSD_USER_INTERRUPT (1<<5) 977#define ILK_BSD_USER_INTERRUPT (1<<5)
905#define I915_PIPE_CONTROL_NOTIFY_INTERRUPT (1<<18) 978#define I915_PIPE_CONTROL_NOTIFY_INTERRUPT (1<<18)
@@ -1048,9 +1121,6 @@
1048 _HSW_PIPE_SLICE_CHICKEN_1_A, + \ 1121 _HSW_PIPE_SLICE_CHICKEN_1_A, + \
1049 _HSW_PIPE_SLICE_CHICKEN_1_B) 1122 _HSW_PIPE_SLICE_CHICKEN_1_B)
1050 1123
1051#define HSW_CLKGATE_DISABLE_PART_1 0x46500
1052#define HSW_DPFC_GATING_DISABLE (1<<23)
1053
1054/* 1124/*
1055 * GPIO regs 1125 * GPIO regs
1056 */ 1126 */
@@ -1387,6 +1457,12 @@
1387 1457
1388#define MI_ARB_VLV (VLV_DISPLAY_BASE + 0x6504) 1458#define MI_ARB_VLV (VLV_DISPLAY_BASE + 0x6504)
1389 1459
1460#define CZCLK_CDCLK_FREQ_RATIO (VLV_DISPLAY_BASE + 0x6508)
1461#define CDCLK_FREQ_SHIFT 4
1462#define CDCLK_FREQ_MASK (0x1f << CDCLK_FREQ_SHIFT)
1463#define CZCLK_FREQ_MASK 0xf
1464#define GMBUSFREQ_VLV (VLV_DISPLAY_BASE + 0x6510)
1465
1390/* 1466/*
1391 * Palette regs 1467 * Palette regs
1392 */ 1468 */
@@ -1404,13 +1480,15 @@
1404 * device 0 function 0's pci config register 0x44 or 0x48 and matches it in 1480 * device 0 function 0's pci config register 0x44 or 0x48 and matches it in
1405 * every way. It is not accessible from the CP register read instructions. 1481 * every way. It is not accessible from the CP register read instructions.
1406 * 1482 *
1483 * Starting from Haswell, you can't write registers using the MCHBAR mirror,
1484 * just read.
1407 */ 1485 */
1408#define MCHBAR_MIRROR_BASE 0x10000 1486#define MCHBAR_MIRROR_BASE 0x10000
1409 1487
1410#define MCHBAR_MIRROR_BASE_SNB 0x140000 1488#define MCHBAR_MIRROR_BASE_SNB 0x140000
1411 1489
1412/* Memory controller frequency in MCHBAR for Haswell (possible SNB+) */ 1490/* Memory controller frequency in MCHBAR for Haswell (possible SNB+) */
1413#define DCLK 0x5e04 1491#define DCLK (MCHBAR_MIRROR_BASE_SNB + 0x5e04)
1414 1492
1415/** 915-945 and GM965 MCH register controlling DRAM channel access */ 1493/** 915-945 and GM965 MCH register controlling DRAM channel access */
1416#define DCC 0x10200 1494#define DCC 0x10200
@@ -1705,9 +1783,9 @@
1705#define GEN6_GT_THREAD_STATUS_CORE_MASK 0x7 1783#define GEN6_GT_THREAD_STATUS_CORE_MASK 0x7
1706#define GEN6_GT_THREAD_STATUS_CORE_MASK_HSW (0x7 | (0x07 << 16)) 1784#define GEN6_GT_THREAD_STATUS_CORE_MASK_HSW (0x7 | (0x07 << 16))
1707 1785
1708#define GEN6_GT_PERF_STATUS 0x145948 1786#define GEN6_GT_PERF_STATUS (MCHBAR_MIRROR_BASE_SNB + 0x5948)
1709#define GEN6_RP_STATE_LIMITS 0x145994 1787#define GEN6_RP_STATE_LIMITS (MCHBAR_MIRROR_BASE_SNB + 0x5994)
1710#define GEN6_RP_STATE_CAP 0x145998 1788#define GEN6_RP_STATE_CAP (MCHBAR_MIRROR_BASE_SNB + 0x5998)
1711 1789
1712/* 1790/*
1713 * Logical Context regs 1791 * Logical Context regs
@@ -1752,6 +1830,12 @@
1752 * on HSW) - so the final size is 66944 bytes, which rounds to 17 pages. 1830 * on HSW) - so the final size is 66944 bytes, which rounds to 17 pages.
1753 */ 1831 */
1754#define HSW_CXT_TOTAL_SIZE (17 * PAGE_SIZE) 1832#define HSW_CXT_TOTAL_SIZE (17 * PAGE_SIZE)
1833/* Same as Haswell, but 72064 bytes now. */
1834#define GEN8_CXT_TOTAL_SIZE (18 * PAGE_SIZE)
1835
1836
1837#define VLV_CLK_CTL2 0x101104
1838#define CLK_CTL2_CZCOUNT_30NS_SHIFT 28
1755 1839
1756/* 1840/*
1757 * Overlay regs 1841 * Overlay regs
@@ -1771,6 +1855,83 @@
1771 * Display engine regs 1855 * Display engine regs
1772 */ 1856 */
1773 1857
1858/* Pipe A CRC regs */
1859#define _PIPE_CRC_CTL_A (dev_priv->info->display_mmio_offset + 0x60050)
1860#define PIPE_CRC_ENABLE (1 << 31)
1861/* ivb+ source selection */
1862#define PIPE_CRC_SOURCE_PRIMARY_IVB (0 << 29)
1863#define PIPE_CRC_SOURCE_SPRITE_IVB (1 << 29)
1864#define PIPE_CRC_SOURCE_PF_IVB (2 << 29)
1865/* ilk+ source selection */
1866#define PIPE_CRC_SOURCE_PRIMARY_ILK (0 << 28)
1867#define PIPE_CRC_SOURCE_SPRITE_ILK (1 << 28)
1868#define PIPE_CRC_SOURCE_PIPE_ILK (2 << 28)
1869/* embedded DP port on the north display block, reserved on ivb */
1870#define PIPE_CRC_SOURCE_PORT_A_ILK (4 << 28)
1871#define PIPE_CRC_SOURCE_FDI_ILK (5 << 28) /* reserved on ivb */
1872/* vlv source selection */
1873#define PIPE_CRC_SOURCE_PIPE_VLV (0 << 27)
1874#define PIPE_CRC_SOURCE_HDMIB_VLV (1 << 27)
1875#define PIPE_CRC_SOURCE_HDMIC_VLV (2 << 27)
1876/* with DP port the pipe source is invalid */
1877#define PIPE_CRC_SOURCE_DP_D_VLV (3 << 27)
1878#define PIPE_CRC_SOURCE_DP_B_VLV (6 << 27)
1879#define PIPE_CRC_SOURCE_DP_C_VLV (7 << 27)
1880/* gen3+ source selection */
1881#define PIPE_CRC_SOURCE_PIPE_I9XX (0 << 28)
1882#define PIPE_CRC_SOURCE_SDVOB_I9XX (1 << 28)
1883#define PIPE_CRC_SOURCE_SDVOC_I9XX (2 << 28)
1884/* with DP/TV port the pipe source is invalid */
1885#define PIPE_CRC_SOURCE_DP_D_G4X (3 << 28)
1886#define PIPE_CRC_SOURCE_TV_PRE (4 << 28)
1887#define PIPE_CRC_SOURCE_TV_POST (5 << 28)
1888#define PIPE_CRC_SOURCE_DP_B_G4X (6 << 28)
1889#define PIPE_CRC_SOURCE_DP_C_G4X (7 << 28)
1890/* gen2 doesn't have source selection bits */
1891#define PIPE_CRC_INCLUDE_BORDER_I8XX (1 << 30)
1892
1893#define _PIPE_CRC_RES_1_A_IVB 0x60064
1894#define _PIPE_CRC_RES_2_A_IVB 0x60068
1895#define _PIPE_CRC_RES_3_A_IVB 0x6006c
1896#define _PIPE_CRC_RES_4_A_IVB 0x60070
1897#define _PIPE_CRC_RES_5_A_IVB 0x60074
1898
1899#define _PIPE_CRC_RES_RED_A (dev_priv->info->display_mmio_offset + 0x60060)
1900#define _PIPE_CRC_RES_GREEN_A (dev_priv->info->display_mmio_offset + 0x60064)
1901#define _PIPE_CRC_RES_BLUE_A (dev_priv->info->display_mmio_offset + 0x60068)
1902#define _PIPE_CRC_RES_RES1_A_I915 (dev_priv->info->display_mmio_offset + 0x6006c)
1903#define _PIPE_CRC_RES_RES2_A_G4X (dev_priv->info->display_mmio_offset + 0x60080)
1904
1905/* Pipe B CRC regs */
1906#define _PIPE_CRC_RES_1_B_IVB 0x61064
1907#define _PIPE_CRC_RES_2_B_IVB 0x61068
1908#define _PIPE_CRC_RES_3_B_IVB 0x6106c
1909#define _PIPE_CRC_RES_4_B_IVB 0x61070
1910#define _PIPE_CRC_RES_5_B_IVB 0x61074
1911
1912#define PIPE_CRC_CTL(pipe) _PIPE_INC(pipe, _PIPE_CRC_CTL_A, 0x01000)
1913#define PIPE_CRC_RES_1_IVB(pipe) \
1914 _PIPE(pipe, _PIPE_CRC_RES_1_A_IVB, _PIPE_CRC_RES_1_B_IVB)
1915#define PIPE_CRC_RES_2_IVB(pipe) \
1916 _PIPE(pipe, _PIPE_CRC_RES_2_A_IVB, _PIPE_CRC_RES_2_B_IVB)
1917#define PIPE_CRC_RES_3_IVB(pipe) \
1918 _PIPE(pipe, _PIPE_CRC_RES_3_A_IVB, _PIPE_CRC_RES_3_B_IVB)
1919#define PIPE_CRC_RES_4_IVB(pipe) \
1920 _PIPE(pipe, _PIPE_CRC_RES_4_A_IVB, _PIPE_CRC_RES_4_B_IVB)
1921#define PIPE_CRC_RES_5_IVB(pipe) \
1922 _PIPE(pipe, _PIPE_CRC_RES_5_A_IVB, _PIPE_CRC_RES_5_B_IVB)
1923
1924#define PIPE_CRC_RES_RED(pipe) \
1925 _PIPE_INC(pipe, _PIPE_CRC_RES_RED_A, 0x01000)
1926#define PIPE_CRC_RES_GREEN(pipe) \
1927 _PIPE_INC(pipe, _PIPE_CRC_RES_GREEN_A, 0x01000)
1928#define PIPE_CRC_RES_BLUE(pipe) \
1929 _PIPE_INC(pipe, _PIPE_CRC_RES_BLUE_A, 0x01000)
1930#define PIPE_CRC_RES_RES1_I915(pipe) \
1931 _PIPE_INC(pipe, _PIPE_CRC_RES_RES1_A_I915, 0x01000)
1932#define PIPE_CRC_RES_RES2_G4X(pipe) \
1933 _PIPE_INC(pipe, _PIPE_CRC_RES_RES2_A_G4X, 0x01000)
1934
1774/* Pipe A timing regs */ 1935/* Pipe A timing regs */
1775#define _HTOTAL_A (dev_priv->info->display_mmio_offset + 0x60000) 1936#define _HTOTAL_A (dev_priv->info->display_mmio_offset + 0x60000)
1776#define _HBLANK_A (dev_priv->info->display_mmio_offset + 0x60004) 1937#define _HBLANK_A (dev_priv->info->display_mmio_offset + 0x60004)
@@ -1793,7 +1954,6 @@
1793#define _BCLRPAT_B (dev_priv->info->display_mmio_offset + 0x61020) 1954#define _BCLRPAT_B (dev_priv->info->display_mmio_offset + 0x61020)
1794#define _VSYNCSHIFT_B (dev_priv->info->display_mmio_offset + 0x61028) 1955#define _VSYNCSHIFT_B (dev_priv->info->display_mmio_offset + 0x61028)
1795 1956
1796
1797#define HTOTAL(trans) _TRANSCODER(trans, _HTOTAL_A, _HTOTAL_B) 1957#define HTOTAL(trans) _TRANSCODER(trans, _HTOTAL_A, _HTOTAL_B)
1798#define HBLANK(trans) _TRANSCODER(trans, _HBLANK_A, _HBLANK_B) 1958#define HBLANK(trans) _TRANSCODER(trans, _HBLANK_A, _HBLANK_B)
1799#define HSYNC(trans) _TRANSCODER(trans, _HSYNC_A, _HSYNC_B) 1959#define HSYNC(trans) _TRANSCODER(trans, _HSYNC_A, _HSYNC_B)
@@ -1803,8 +1963,9 @@
1803#define BCLRPAT(pipe) _PIPE(pipe, _BCLRPAT_A, _BCLRPAT_B) 1963#define BCLRPAT(pipe) _PIPE(pipe, _BCLRPAT_A, _BCLRPAT_B)
1804#define VSYNCSHIFT(trans) _TRANSCODER(trans, _VSYNCSHIFT_A, _VSYNCSHIFT_B) 1964#define VSYNCSHIFT(trans) _TRANSCODER(trans, _VSYNCSHIFT_A, _VSYNCSHIFT_B)
1805 1965
1806/* HSW eDP PSR registers */ 1966/* HSW+ eDP PSR registers */
1807#define EDP_PSR_CTL 0x64800 1967#define EDP_PSR_BASE(dev) (IS_HASWELL(dev) ? 0x64800 : 0x6f800)
1968#define EDP_PSR_CTL(dev) (EDP_PSR_BASE(dev) + 0)
1808#define EDP_PSR_ENABLE (1<<31) 1969#define EDP_PSR_ENABLE (1<<31)
1809#define EDP_PSR_LINK_DISABLE (0<<27) 1970#define EDP_PSR_LINK_DISABLE (0<<27)
1810#define EDP_PSR_LINK_STANDBY (1<<27) 1971#define EDP_PSR_LINK_STANDBY (1<<27)
@@ -1827,16 +1988,16 @@
1827#define EDP_PSR_TP1_TIME_0us (3<<4) 1988#define EDP_PSR_TP1_TIME_0us (3<<4)
1828#define EDP_PSR_IDLE_FRAME_SHIFT 0 1989#define EDP_PSR_IDLE_FRAME_SHIFT 0
1829 1990
1830#define EDP_PSR_AUX_CTL 0x64810 1991#define EDP_PSR_AUX_CTL(dev) (EDP_PSR_BASE(dev) + 0x10)
1831#define EDP_PSR_AUX_DATA1 0x64814 1992#define EDP_PSR_AUX_DATA1(dev) (EDP_PSR_BASE(dev) + 0x14)
1832#define EDP_PSR_DPCD_COMMAND 0x80060000 1993#define EDP_PSR_DPCD_COMMAND 0x80060000
1833#define EDP_PSR_AUX_DATA2 0x64818 1994#define EDP_PSR_AUX_DATA2(dev) (EDP_PSR_BASE(dev) + 0x18)
1834#define EDP_PSR_DPCD_NORMAL_OPERATION (1<<24) 1995#define EDP_PSR_DPCD_NORMAL_OPERATION (1<<24)
1835#define EDP_PSR_AUX_DATA3 0x6481c 1996#define EDP_PSR_AUX_DATA3(dev) (EDP_PSR_BASE(dev) + 0x1c)
1836#define EDP_PSR_AUX_DATA4 0x64820 1997#define EDP_PSR_AUX_DATA4(dev) (EDP_PSR_BASE(dev) + 0x20)
1837#define EDP_PSR_AUX_DATA5 0x64824 1998#define EDP_PSR_AUX_DATA5(dev) (EDP_PSR_BASE(dev) + 0x24)
1838 1999
1839#define EDP_PSR_STATUS_CTL 0x64840 2000#define EDP_PSR_STATUS_CTL(dev) (EDP_PSR_BASE(dev) + 0x40)
1840#define EDP_PSR_STATUS_STATE_MASK (7<<29) 2001#define EDP_PSR_STATUS_STATE_MASK (7<<29)
1841#define EDP_PSR_STATUS_STATE_IDLE (0<<29) 2002#define EDP_PSR_STATUS_STATE_IDLE (0<<29)
1842#define EDP_PSR_STATUS_STATE_SRDONACK (1<<29) 2003#define EDP_PSR_STATUS_STATE_SRDONACK (1<<29)
@@ -1860,10 +2021,10 @@
1860#define EDP_PSR_STATUS_SENDING_TP1 (1<<4) 2021#define EDP_PSR_STATUS_SENDING_TP1 (1<<4)
1861#define EDP_PSR_STATUS_IDLE_MASK 0xf 2022#define EDP_PSR_STATUS_IDLE_MASK 0xf
1862 2023
1863#define EDP_PSR_PERF_CNT 0x64844 2024#define EDP_PSR_PERF_CNT(dev) (EDP_PSR_BASE(dev) + 0x44)
1864#define EDP_PSR_PERF_CNT_MASK 0xffffff 2025#define EDP_PSR_PERF_CNT_MASK 0xffffff
1865 2026
1866#define EDP_PSR_DEBUG_CTL 0x64860 2027#define EDP_PSR_DEBUG_CTL(dev) (EDP_PSR_BASE(dev) + 0x60)
1867#define EDP_PSR_DEBUG_MASK_LPSP (1<<27) 2028#define EDP_PSR_DEBUG_MASK_LPSP (1<<27)
1868#define EDP_PSR_DEBUG_MASK_MEMUP (1<<26) 2029#define EDP_PSR_DEBUG_MASK_MEMUP (1<<26)
1869#define EDP_PSR_DEBUG_MASK_HPD (1<<25) 2030#define EDP_PSR_DEBUG_MASK_HPD (1<<25)
@@ -2006,6 +2167,14 @@
2006#define PCH_HDMIC 0xe1150 2167#define PCH_HDMIC 0xe1150
2007#define PCH_HDMID 0xe1160 2168#define PCH_HDMID 0xe1160
2008 2169
2170#define PORT_DFT_I9XX 0x61150
2171#define DC_BALANCE_RESET (1 << 25)
2172#define PORT_DFT2_G4X 0x61154
2173#define DC_BALANCE_RESET_VLV (1 << 31)
2174#define PIPE_SCRAMBLE_RESET_MASK (0x3 << 0)
2175#define PIPE_B_SCRAMBLE_RESET (1 << 1)
2176#define PIPE_A_SCRAMBLE_RESET (1 << 0)
2177
2009/* Gen 3 SDVO bits: */ 2178/* Gen 3 SDVO bits: */
2010#define SDVO_ENABLE (1 << 31) 2179#define SDVO_ENABLE (1 << 31)
2011#define SDVO_PIPE_SEL(pipe) ((pipe) << 30) 2180#define SDVO_PIPE_SEL(pipe) ((pipe) << 30)
@@ -2034,6 +2203,7 @@
2034 2203
2035/* Gen 4 SDVO/HDMI bits: */ 2204/* Gen 4 SDVO/HDMI bits: */
2036#define SDVO_COLOR_FORMAT_8bpc (0 << 26) 2205#define SDVO_COLOR_FORMAT_8bpc (0 << 26)
2206#define SDVO_COLOR_FORMAT_MASK (7 << 26)
2037#define SDVO_ENCODING_SDVO (0 << 10) 2207#define SDVO_ENCODING_SDVO (0 << 10)
2038#define SDVO_ENCODING_HDMI (2 << 10) 2208#define SDVO_ENCODING_HDMI (2 << 10)
2039#define HDMI_MODE_SELECT_HDMI (1 << 9) /* HDMI only */ 2209#define HDMI_MODE_SELECT_HDMI (1 << 9) /* HDMI only */
@@ -2238,6 +2408,21 @@
2238 2408
2239#define PFIT_AUTO_RATIOS (dev_priv->info->display_mmio_offset + 0x61238) 2409#define PFIT_AUTO_RATIOS (dev_priv->info->display_mmio_offset + 0x61238)
2240 2410
2411#define _VLV_BLC_PWM_CTL2_A (dev_priv->info->display_mmio_offset + 0x61250)
2412#define _VLV_BLC_PWM_CTL2_B (dev_priv->info->display_mmio_offset + 0x61350)
2413#define VLV_BLC_PWM_CTL2(pipe) _PIPE(pipe, _VLV_BLC_PWM_CTL2_A, \
2414 _VLV_BLC_PWM_CTL2_B)
2415
2416#define _VLV_BLC_PWM_CTL_A (dev_priv->info->display_mmio_offset + 0x61254)
2417#define _VLV_BLC_PWM_CTL_B (dev_priv->info->display_mmio_offset + 0x61354)
2418#define VLV_BLC_PWM_CTL(pipe) _PIPE(pipe, _VLV_BLC_PWM_CTL_A, \
2419 _VLV_BLC_PWM_CTL_B)
2420
2421#define _VLV_BLC_HIST_CTL_A (dev_priv->info->display_mmio_offset + 0x61260)
2422#define _VLV_BLC_HIST_CTL_B (dev_priv->info->display_mmio_offset + 0x61360)
2423#define VLV_BLC_HIST_CTL(pipe) _PIPE(pipe, _VLV_BLC_HIST_CTL_A, \
2424 _VLV_BLC_HIST_CTL_B)
2425
2241/* Backlight control */ 2426/* Backlight control */
2242#define BLC_PWM_CTL2 (dev_priv->info->display_mmio_offset + 0x61250) /* 965+ only */ 2427#define BLC_PWM_CTL2 (dev_priv->info->display_mmio_offset + 0x61250) /* 965+ only */
2243#define BLM_PWM_ENABLE (1 << 31) 2428#define BLM_PWM_ENABLE (1 << 31)
@@ -2986,6 +3171,7 @@
2986#define PIPECONF_DISABLE 0 3171#define PIPECONF_DISABLE 0
2987#define PIPECONF_DOUBLE_WIDE (1<<30) 3172#define PIPECONF_DOUBLE_WIDE (1<<30)
2988#define I965_PIPECONF_ACTIVE (1<<30) 3173#define I965_PIPECONF_ACTIVE (1<<30)
3174#define PIPECONF_DSI_PLL_LOCKED (1<<29) /* vlv & pipe A only */
2989#define PIPECONF_FRAME_START_DELAY_MASK (3<<27) 3175#define PIPECONF_FRAME_START_DELAY_MASK (3<<27)
2990#define PIPECONF_SINGLE_WIDE 0 3176#define PIPECONF_SINGLE_WIDE 0
2991#define PIPECONF_PIPE_UNLOCKED 0 3177#define PIPECONF_PIPE_UNLOCKED 0
@@ -3068,6 +3254,18 @@
3068#define PIPEFRAMEPIXEL(pipe) _PIPE(pipe, _PIPEAFRAMEPIXEL, _PIPEBFRAMEPIXEL) 3254#define PIPEFRAMEPIXEL(pipe) _PIPE(pipe, _PIPEAFRAMEPIXEL, _PIPEBFRAMEPIXEL)
3069#define PIPESTAT(pipe) _PIPE(pipe, _PIPEASTAT, _PIPEBSTAT) 3255#define PIPESTAT(pipe) _PIPE(pipe, _PIPEASTAT, _PIPEBSTAT)
3070 3256
3257#define _PIPE_MISC_A 0x70030
3258#define _PIPE_MISC_B 0x71030
3259#define PIPEMISC_DITHER_BPC_MASK (7<<5)
3260#define PIPEMISC_DITHER_8_BPC (0<<5)
3261#define PIPEMISC_DITHER_10_BPC (1<<5)
3262#define PIPEMISC_DITHER_6_BPC (2<<5)
3263#define PIPEMISC_DITHER_12_BPC (3<<5)
3264#define PIPEMISC_DITHER_ENABLE (1<<4)
3265#define PIPEMISC_DITHER_TYPE_MASK (3<<2)
3266#define PIPEMISC_DITHER_TYPE_SP (0<<2)
3267#define PIPEMISC(pipe) _PIPE(pipe, _PIPE_MISC_A, _PIPE_MISC_B)
3268
3071#define VLV_DPFLIPSTAT (VLV_DISPLAY_BASE + 0x70028) 3269#define VLV_DPFLIPSTAT (VLV_DISPLAY_BASE + 0x70028)
3072#define PIPEB_LINE_COMPARE_INT_EN (1<<29) 3270#define PIPEB_LINE_COMPARE_INT_EN (1<<29)
3073#define PIPEB_HLINE_INT_EN (1<<28) 3271#define PIPEB_HLINE_INT_EN (1<<28)
@@ -3184,11 +3382,11 @@
3184 3382
3185/* define the Watermark register on Ironlake */ 3383/* define the Watermark register on Ironlake */
3186#define WM0_PIPEA_ILK 0x45100 3384#define WM0_PIPEA_ILK 0x45100
3187#define WM0_PIPE_PLANE_MASK (0x7f<<16) 3385#define WM0_PIPE_PLANE_MASK (0xffff<<16)
3188#define WM0_PIPE_PLANE_SHIFT 16 3386#define WM0_PIPE_PLANE_SHIFT 16
3189#define WM0_PIPE_SPRITE_MASK (0x3f<<8) 3387#define WM0_PIPE_SPRITE_MASK (0xff<<8)
3190#define WM0_PIPE_SPRITE_SHIFT 8 3388#define WM0_PIPE_SPRITE_SHIFT 8
3191#define WM0_PIPE_CURSOR_MASK (0x1f) 3389#define WM0_PIPE_CURSOR_MASK (0xff)
3192 3390
3193#define WM0_PIPEB_ILK 0x45104 3391#define WM0_PIPEB_ILK 0x45104
3194#define WM0_PIPEC_IVB 0x45200 3392#define WM0_PIPEC_IVB 0x45200
@@ -3198,9 +3396,10 @@
3198#define WM1_LP_LATENCY_MASK (0x7f<<24) 3396#define WM1_LP_LATENCY_MASK (0x7f<<24)
3199#define WM1_LP_FBC_MASK (0xf<<20) 3397#define WM1_LP_FBC_MASK (0xf<<20)
3200#define WM1_LP_FBC_SHIFT 20 3398#define WM1_LP_FBC_SHIFT 20
3201#define WM1_LP_SR_MASK (0x1ff<<8) 3399#define WM1_LP_FBC_SHIFT_BDW 19
3400#define WM1_LP_SR_MASK (0x7ff<<8)
3202#define WM1_LP_SR_SHIFT 8 3401#define WM1_LP_SR_SHIFT 8
3203#define WM1_LP_CURSOR_MASK (0x3f) 3402#define WM1_LP_CURSOR_MASK (0xff)
3204#define WM2_LP_ILK 0x4510c 3403#define WM2_LP_ILK 0x4510c
3205#define WM2_LP_EN (1<<31) 3404#define WM2_LP_EN (1<<31)
3206#define WM3_LP_ILK 0x45110 3405#define WM3_LP_ILK 0x45110
@@ -3281,17 +3480,17 @@
3281 * } while (high1 != high2); 3480 * } while (high1 != high2);
3282 * frame = (high1 << 8) | low1; 3481 * frame = (high1 << 8) | low1;
3283 */ 3482 */
3284#define _PIPEAFRAMEHIGH (dev_priv->info->display_mmio_offset + 0x70040) 3483#define _PIPEAFRAMEHIGH 0x70040
3285#define PIPE_FRAME_HIGH_MASK 0x0000ffff 3484#define PIPE_FRAME_HIGH_MASK 0x0000ffff
3286#define PIPE_FRAME_HIGH_SHIFT 0 3485#define PIPE_FRAME_HIGH_SHIFT 0
3287#define _PIPEAFRAMEPIXEL (dev_priv->info->display_mmio_offset + 0x70044) 3486#define _PIPEAFRAMEPIXEL 0x70044
3288#define PIPE_FRAME_LOW_MASK 0xff000000 3487#define PIPE_FRAME_LOW_MASK 0xff000000
3289#define PIPE_FRAME_LOW_SHIFT 24 3488#define PIPE_FRAME_LOW_SHIFT 24
3290#define PIPE_PIXEL_MASK 0x00ffffff 3489#define PIPE_PIXEL_MASK 0x00ffffff
3291#define PIPE_PIXEL_SHIFT 0 3490#define PIPE_PIXEL_SHIFT 0
3292/* GM45+ just has to be different */ 3491/* GM45+ just has to be different */
3293#define _PIPEA_FRMCOUNT_GM45 0x70040 3492#define _PIPEA_FRMCOUNT_GM45 (dev_priv->info->display_mmio_offset + 0x70040)
3294#define _PIPEA_FLIPCOUNT_GM45 0x70044 3493#define _PIPEA_FLIPCOUNT_GM45 (dev_priv->info->display_mmio_offset + 0x70044)
3295#define PIPE_FRMCOUNT_GM45(pipe) _PIPE(pipe, _PIPEA_FRMCOUNT_GM45, _PIPEB_FRMCOUNT_GM45) 3494#define PIPE_FRMCOUNT_GM45(pipe) _PIPE(pipe, _PIPEA_FRMCOUNT_GM45, _PIPEB_FRMCOUNT_GM45)
3296 3495
3297/* Cursor A & B regs */ 3496/* Cursor A & B regs */
@@ -3422,10 +3621,10 @@
3422#define _PIPEBDSL (dev_priv->info->display_mmio_offset + 0x71000) 3621#define _PIPEBDSL (dev_priv->info->display_mmio_offset + 0x71000)
3423#define _PIPEBCONF (dev_priv->info->display_mmio_offset + 0x71008) 3622#define _PIPEBCONF (dev_priv->info->display_mmio_offset + 0x71008)
3424#define _PIPEBSTAT (dev_priv->info->display_mmio_offset + 0x71024) 3623#define _PIPEBSTAT (dev_priv->info->display_mmio_offset + 0x71024)
3425#define _PIPEBFRAMEHIGH (dev_priv->info->display_mmio_offset + 0x71040) 3624#define _PIPEBFRAMEHIGH 0x71040
3426#define _PIPEBFRAMEPIXEL (dev_priv->info->display_mmio_offset + 0x71044) 3625#define _PIPEBFRAMEPIXEL 0x71044
3427#define _PIPEB_FRMCOUNT_GM45 0x71040 3626#define _PIPEB_FRMCOUNT_GM45 (dev_priv->info->display_mmio_offset + 0x71040)
3428#define _PIPEB_FLIPCOUNT_GM45 0x71044 3627#define _PIPEB_FLIPCOUNT_GM45 (dev_priv->info->display_mmio_offset + 0x71044)
3429 3628
3430 3629
3431/* Display B control */ 3630/* Display B control */
@@ -3780,6 +3979,7 @@
3780#define DE_SPRITEA_FLIP_DONE (1 << 28) 3979#define DE_SPRITEA_FLIP_DONE (1 << 28)
3781#define DE_PLANEB_FLIP_DONE (1 << 27) 3980#define DE_PLANEB_FLIP_DONE (1 << 27)
3782#define DE_PLANEA_FLIP_DONE (1 << 26) 3981#define DE_PLANEA_FLIP_DONE (1 << 26)
3982#define DE_PLANE_FLIP_DONE(plane) (1 << (26 + (plane)))
3783#define DE_PCU_EVENT (1 << 25) 3983#define DE_PCU_EVENT (1 << 25)
3784#define DE_GTT_FAULT (1 << 24) 3984#define DE_GTT_FAULT (1 << 24)
3785#define DE_POISON (1 << 23) 3985#define DE_POISON (1 << 23)
@@ -3793,13 +3993,18 @@
3793#define DE_PIPEB_ODD_FIELD (1 << 13) 3993#define DE_PIPEB_ODD_FIELD (1 << 13)
3794#define DE_PIPEB_LINE_COMPARE (1 << 12) 3994#define DE_PIPEB_LINE_COMPARE (1 << 12)
3795#define DE_PIPEB_VSYNC (1 << 11) 3995#define DE_PIPEB_VSYNC (1 << 11)
3996#define DE_PIPEB_CRC_DONE (1 << 10)
3796#define DE_PIPEB_FIFO_UNDERRUN (1 << 8) 3997#define DE_PIPEB_FIFO_UNDERRUN (1 << 8)
3797#define DE_PIPEA_VBLANK (1 << 7) 3998#define DE_PIPEA_VBLANK (1 << 7)
3999#define DE_PIPE_VBLANK(pipe) (1 << (7 + 8*(pipe)))
3798#define DE_PIPEA_EVEN_FIELD (1 << 6) 4000#define DE_PIPEA_EVEN_FIELD (1 << 6)
3799#define DE_PIPEA_ODD_FIELD (1 << 5) 4001#define DE_PIPEA_ODD_FIELD (1 << 5)
3800#define DE_PIPEA_LINE_COMPARE (1 << 4) 4002#define DE_PIPEA_LINE_COMPARE (1 << 4)
3801#define DE_PIPEA_VSYNC (1 << 3) 4003#define DE_PIPEA_VSYNC (1 << 3)
4004#define DE_PIPEA_CRC_DONE (1 << 2)
4005#define DE_PIPE_CRC_DONE(pipe) (1 << (2 + 8*(pipe)))
3802#define DE_PIPEA_FIFO_UNDERRUN (1 << 0) 4006#define DE_PIPEA_FIFO_UNDERRUN (1 << 0)
4007#define DE_PIPE_FIFO_UNDERRUN(pipe) (1 << (8*(pipe)))
3803 4008
3804/* More Ivybridge lolz */ 4009/* More Ivybridge lolz */
3805#define DE_ERR_INT_IVB (1<<30) 4010#define DE_ERR_INT_IVB (1<<30)
@@ -3815,9 +4020,8 @@
3815#define DE_PIPEB_VBLANK_IVB (1<<5) 4020#define DE_PIPEB_VBLANK_IVB (1<<5)
3816#define DE_SPRITEA_FLIP_DONE_IVB (1<<4) 4021#define DE_SPRITEA_FLIP_DONE_IVB (1<<4)
3817#define DE_PLANEA_FLIP_DONE_IVB (1<<3) 4022#define DE_PLANEA_FLIP_DONE_IVB (1<<3)
4023#define DE_PLANE_FLIP_DONE_IVB(plane) (1<< (3 + 5*(plane)))
3818#define DE_PIPEA_VBLANK_IVB (1<<0) 4024#define DE_PIPEA_VBLANK_IVB (1<<0)
3819
3820#define DE_PIPE_VBLANK_ILK(pipe) (1 << ((pipe * 8) + 7))
3821#define DE_PIPE_VBLANK_IVB(pipe) (1 << (pipe * 5)) 4025#define DE_PIPE_VBLANK_IVB(pipe) (1 << (pipe * 5))
3822 4026
3823#define VLV_MASTER_IER 0x4400c /* Gunit master IER */ 4027#define VLV_MASTER_IER 0x4400c /* Gunit master IER */
@@ -3833,6 +4037,71 @@
3833#define GTIIR 0x44018 4037#define GTIIR 0x44018
3834#define GTIER 0x4401c 4038#define GTIER 0x4401c
3835 4039
4040#define GEN8_MASTER_IRQ 0x44200
4041#define GEN8_MASTER_IRQ_CONTROL (1<<31)
4042#define GEN8_PCU_IRQ (1<<30)
4043#define GEN8_DE_PCH_IRQ (1<<23)
4044#define GEN8_DE_MISC_IRQ (1<<22)
4045#define GEN8_DE_PORT_IRQ (1<<20)
4046#define GEN8_DE_PIPE_C_IRQ (1<<18)
4047#define GEN8_DE_PIPE_B_IRQ (1<<17)
4048#define GEN8_DE_PIPE_A_IRQ (1<<16)
4049#define GEN8_DE_PIPE_IRQ(pipe) (1<<(16+pipe))
4050#define GEN8_GT_VECS_IRQ (1<<6)
4051#define GEN8_GT_VCS2_IRQ (1<<3)
4052#define GEN8_GT_VCS1_IRQ (1<<2)
4053#define GEN8_GT_BCS_IRQ (1<<1)
4054#define GEN8_GT_RCS_IRQ (1<<0)
4055
4056#define GEN8_GT_ISR(which) (0x44300 + (0x10 * (which)))
4057#define GEN8_GT_IMR(which) (0x44304 + (0x10 * (which)))
4058#define GEN8_GT_IIR(which) (0x44308 + (0x10 * (which)))
4059#define GEN8_GT_IER(which) (0x4430c + (0x10 * (which)))
4060
4061#define GEN8_BCS_IRQ_SHIFT 16
4062#define GEN8_RCS_IRQ_SHIFT 0
4063#define GEN8_VCS2_IRQ_SHIFT 16
4064#define GEN8_VCS1_IRQ_SHIFT 0
4065#define GEN8_VECS_IRQ_SHIFT 0
4066
4067#define GEN8_DE_PIPE_ISR(pipe) (0x44400 + (0x10 * (pipe)))
4068#define GEN8_DE_PIPE_IMR(pipe) (0x44404 + (0x10 * (pipe)))
4069#define GEN8_DE_PIPE_IIR(pipe) (0x44408 + (0x10 * (pipe)))
4070#define GEN8_DE_PIPE_IER(pipe) (0x4440c + (0x10 * (pipe)))
4071#define GEN8_PIPE_FIFO_UNDERRUN (1 << 31)
4072#define GEN8_PIPE_CDCLK_CRC_ERROR (1 << 29)
4073#define GEN8_PIPE_CDCLK_CRC_DONE (1 << 28)
4074#define GEN8_PIPE_CURSOR_FAULT (1 << 10)
4075#define GEN8_PIPE_SPRITE_FAULT (1 << 9)
4076#define GEN8_PIPE_PRIMARY_FAULT (1 << 8)
4077#define GEN8_PIPE_SPRITE_FLIP_DONE (1 << 5)
4078#define GEN8_PIPE_FLIP_DONE (1 << 4)
4079#define GEN8_PIPE_SCAN_LINE_EVENT (1 << 2)
4080#define GEN8_PIPE_VSYNC (1 << 1)
4081#define GEN8_PIPE_VBLANK (1 << 0)
4082#define GEN8_DE_PIPE_IRQ_FAULT_ERRORS \
4083 (GEN8_PIPE_CURSOR_FAULT | \
4084 GEN8_PIPE_SPRITE_FAULT | \
4085 GEN8_PIPE_PRIMARY_FAULT)
4086
4087#define GEN8_DE_PORT_ISR 0x44440
4088#define GEN8_DE_PORT_IMR 0x44444
4089#define GEN8_DE_PORT_IIR 0x44448
4090#define GEN8_DE_PORT_IER 0x4444c
4091#define GEN8_PORT_DP_A_HOTPLUG (1 << 3)
4092#define GEN8_AUX_CHANNEL_A (1 << 0)
4093
4094#define GEN8_DE_MISC_ISR 0x44460
4095#define GEN8_DE_MISC_IMR 0x44464
4096#define GEN8_DE_MISC_IIR 0x44468
4097#define GEN8_DE_MISC_IER 0x4446c
4098#define GEN8_DE_MISC_GSE (1 << 27)
4099
4100#define GEN8_PCU_ISR 0x444e0
4101#define GEN8_PCU_IMR 0x444e4
4102#define GEN8_PCU_IIR 0x444e8
4103#define GEN8_PCU_IER 0x444ec
4104
3836#define ILK_DISPLAY_CHICKEN2 0x42004 4105#define ILK_DISPLAY_CHICKEN2 0x42004
3837/* Required on all Ironlake and Sandybridge according to the B-Spec. */ 4106/* Required on all Ironlake and Sandybridge according to the B-Spec. */
3838#define ILK_ELPIN_409_SELECT (1 << 25) 4107#define ILK_ELPIN_409_SELECT (1 << 25)
@@ -3858,8 +4127,14 @@
3858# define CHICKEN3_DGMG_DONE_FIX_DISABLE (1 << 2) 4127# define CHICKEN3_DGMG_DONE_FIX_DISABLE (1 << 2)
3859 4128
3860#define CHICKEN_PAR1_1 0x42080 4129#define CHICKEN_PAR1_1 0x42080
4130#define DPA_MASK_VBLANK_SRD (1 << 15)
3861#define FORCE_ARB_IDLE_PLANES (1 << 14) 4131#define FORCE_ARB_IDLE_PLANES (1 << 14)
3862 4132
4133#define _CHICKEN_PIPESL_1_A 0x420b0
4134#define _CHICKEN_PIPESL_1_B 0x420b4
4135#define DPRS_MASK_VBLANK_SRD (1 << 0)
4136#define CHICKEN_PIPESL_1(pipe) _PIPE(pipe, _CHICKEN_PIPESL_1_A, _CHICKEN_PIPESL_1_B)
4137
3863#define DISP_ARB_CTL 0x45000 4138#define DISP_ARB_CTL 0x45000
3864#define DISP_TILE_SURFACE_SWIZZLING (1<<13) 4139#define DISP_TILE_SURFACE_SWIZZLING (1<<13)
3865#define DISP_FBC_WM_DIS (1<<15) 4140#define DISP_FBC_WM_DIS (1<<15)
@@ -3870,6 +4145,8 @@
3870/* GEN7 chicken */ 4145/* GEN7 chicken */
3871#define GEN7_COMMON_SLICE_CHICKEN1 0x7010 4146#define GEN7_COMMON_SLICE_CHICKEN1 0x7010
3872# define GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC ((1<<10) | (1<<26)) 4147# define GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC ((1<<10) | (1<<26))
4148#define COMMON_SLICE_CHICKEN2 0x7014
4149# define GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE (1<<0)
3873 4150
3874#define GEN7_L3CNTLREG1 0xB01C 4151#define GEN7_L3CNTLREG1 0xB01C
3875#define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C4FFF8C 4152#define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C4FFF8C
@@ -4416,6 +4693,8 @@
4416#define PIPEA_PP_STATUS (VLV_DISPLAY_BASE + 0x61200) 4693#define PIPEA_PP_STATUS (VLV_DISPLAY_BASE + 0x61200)
4417#define PIPEA_PP_CONTROL (VLV_DISPLAY_BASE + 0x61204) 4694#define PIPEA_PP_CONTROL (VLV_DISPLAY_BASE + 0x61204)
4418#define PIPEA_PP_ON_DELAYS (VLV_DISPLAY_BASE + 0x61208) 4695#define PIPEA_PP_ON_DELAYS (VLV_DISPLAY_BASE + 0x61208)
4696#define PANEL_PORT_SELECT_DPB_VLV (1 << 30)
4697#define PANEL_PORT_SELECT_DPC_VLV (2 << 30)
4419#define PIPEA_PP_OFF_DELAYS (VLV_DISPLAY_BASE + 0x6120c) 4698#define PIPEA_PP_OFF_DELAYS (VLV_DISPLAY_BASE + 0x6120c)
4420#define PIPEA_PP_DIVISOR (VLV_DISPLAY_BASE + 0x61210) 4699#define PIPEA_PP_DIVISOR (VLV_DISPLAY_BASE + 0x61210)
4421 4700
@@ -4447,7 +4726,6 @@
4447#define PANEL_PORT_SELECT_MASK (3 << 30) 4726#define PANEL_PORT_SELECT_MASK (3 << 30)
4448#define PANEL_PORT_SELECT_LVDS (0 << 30) 4727#define PANEL_PORT_SELECT_LVDS (0 << 30)
4449#define PANEL_PORT_SELECT_DPA (1 << 30) 4728#define PANEL_PORT_SELECT_DPA (1 << 30)
4450#define EDP_PANEL (1 << 30)
4451#define PANEL_PORT_SELECT_DPC (2 << 30) 4729#define PANEL_PORT_SELECT_DPC (2 << 30)
4452#define PANEL_PORT_SELECT_DPD (3 << 30) 4730#define PANEL_PORT_SELECT_DPD (3 << 30)
4453#define PANEL_POWER_UP_DELAY_MASK (0x1fff0000) 4731#define PANEL_POWER_UP_DELAY_MASK (0x1fff0000)
@@ -4456,11 +4734,6 @@
4456#define PANEL_LIGHT_ON_DELAY_SHIFT 0 4734#define PANEL_LIGHT_ON_DELAY_SHIFT 0
4457 4735
4458#define PCH_PP_OFF_DELAYS 0xc720c 4736#define PCH_PP_OFF_DELAYS 0xc720c
4459#define PANEL_POWER_PORT_SELECT_MASK (0x3 << 30)
4460#define PANEL_POWER_PORT_LVDS (0 << 30)
4461#define PANEL_POWER_PORT_DP_A (1 << 30)
4462#define PANEL_POWER_PORT_DP_C (2 << 30)
4463#define PANEL_POWER_PORT_DP_D (3 << 30)
4464#define PANEL_POWER_DOWN_DELAY_MASK (0x1fff0000) 4737#define PANEL_POWER_DOWN_DELAY_MASK (0x1fff0000)
4465#define PANEL_POWER_DOWN_DELAY_SHIFT 16 4738#define PANEL_POWER_DOWN_DELAY_SHIFT 16
4466#define PANEL_LIGHT_OFF_DELAY_MASK (0x1fff) 4739#define PANEL_LIGHT_OFF_DELAY_MASK (0x1fff)
@@ -4638,7 +4911,7 @@
4638#define GEN6_RP_UP_IDLE_MIN (0x1<<3) 4911#define GEN6_RP_UP_IDLE_MIN (0x1<<3)
4639#define GEN6_RP_UP_BUSY_AVG (0x2<<3) 4912#define GEN6_RP_UP_BUSY_AVG (0x2<<3)
4640#define GEN6_RP_UP_BUSY_CONT (0x4<<3) 4913#define GEN6_RP_UP_BUSY_CONT (0x4<<3)
4641#define GEN7_RP_DOWN_IDLE_AVG (0x2<<0) 4914#define GEN6_RP_DOWN_IDLE_AVG (0x2<<0)
4642#define GEN6_RP_DOWN_IDLE_CONT (0x1<<0) 4915#define GEN6_RP_DOWN_IDLE_CONT (0x1<<0)
4643#define GEN6_RP_UP_THRESHOLD 0xA02C 4916#define GEN6_RP_UP_THRESHOLD 0xA02C
4644#define GEN6_RP_DOWN_THRESHOLD 0xA030 4917#define GEN6_RP_DOWN_THRESHOLD 0xA030
@@ -4683,6 +4956,10 @@
4683 GEN6_PM_RP_DOWN_TIMEOUT) 4956 GEN6_PM_RP_DOWN_TIMEOUT)
4684 4957
4685#define GEN6_GT_GFX_RC6_LOCKED 0x138104 4958#define GEN6_GT_GFX_RC6_LOCKED 0x138104
4959#define VLV_COUNTER_CONTROL 0x138104
4960#define VLV_COUNT_RANGE_HIGH (1<<15)
4961#define VLV_MEDIA_RC6_COUNT_EN (1<<1)
4962#define VLV_RENDER_RC6_COUNT_EN (1<<0)
4686#define GEN6_GT_GFX_RC6 0x138108 4963#define GEN6_GT_GFX_RC6 0x138108
4687#define GEN6_GT_GFX_RC6p 0x13810C 4964#define GEN6_GT_GFX_RC6p 0x13810C
4688#define GEN6_GT_GFX_RC6pp 0x138110 4965#define GEN6_GT_GFX_RC6pp 0x138110
@@ -4694,8 +4971,11 @@
4694#define GEN6_PCODE_READ_MIN_FREQ_TABLE 0x9 4971#define GEN6_PCODE_READ_MIN_FREQ_TABLE 0x9
4695#define GEN6_PCODE_WRITE_RC6VIDS 0x4 4972#define GEN6_PCODE_WRITE_RC6VIDS 0x4
4696#define GEN6_PCODE_READ_RC6VIDS 0x5 4973#define GEN6_PCODE_READ_RC6VIDS 0x5
4974#define GEN6_PCODE_READ_D_COMP 0x10
4975#define GEN6_PCODE_WRITE_D_COMP 0x11
4697#define GEN6_ENCODE_RC6_VID(mv) (((mv) - 245) / 5) 4976#define GEN6_ENCODE_RC6_VID(mv) (((mv) - 245) / 5)
4698#define GEN6_DECODE_RC6_VID(vids) (((vids) * 5) + 245) 4977#define GEN6_DECODE_RC6_VID(vids) (((vids) * 5) + 245)
4978#define DISPLAY_IPS_CONTROL 0x19
4699#define GEN6_PCODE_DATA 0x138128 4979#define GEN6_PCODE_DATA 0x138128
4700#define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8 4980#define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8
4701#define GEN6_PCODE_FREQ_RING_RATIO_SHIFT 16 4981#define GEN6_PCODE_FREQ_RING_RATIO_SHIFT 16
@@ -4713,6 +4993,7 @@
4713 4993
4714/* IVYBRIDGE DPF */ 4994/* IVYBRIDGE DPF */
4715#define GEN7_L3CDERRST1 0xB008 /* L3CD Error Status 1 */ 4995#define GEN7_L3CDERRST1 0xB008 /* L3CD Error Status 1 */
4996#define HSW_L3CDERRST11 0xB208 /* L3CD Error Status register 1 slice 1 */
4716#define GEN7_L3CDERRST1_ROW_MASK (0x7ff<<14) 4997#define GEN7_L3CDERRST1_ROW_MASK (0x7ff<<14)
4717#define GEN7_PARITY_ERROR_VALID (1<<13) 4998#define GEN7_PARITY_ERROR_VALID (1<<13)
4718#define GEN7_L3CDERRST1_BANK_MASK (3<<11) 4999#define GEN7_L3CDERRST1_BANK_MASK (3<<11)
@@ -4726,11 +5007,13 @@
4726#define GEN7_L3CDERRST1_ENABLE (1<<7) 5007#define GEN7_L3CDERRST1_ENABLE (1<<7)
4727 5008
4728#define GEN7_L3LOG_BASE 0xB070 5009#define GEN7_L3LOG_BASE 0xB070
5010#define HSW_L3LOG_BASE_SLICE1 0xB270
4729#define GEN7_L3LOG_SIZE 0x80 5011#define GEN7_L3LOG_SIZE 0x80
4730 5012
4731#define GEN7_HALF_SLICE_CHICKEN1 0xe100 /* IVB GT1 + VLV */ 5013#define GEN7_HALF_SLICE_CHICKEN1 0xe100 /* IVB GT1 + VLV */
4732#define GEN7_HALF_SLICE_CHICKEN1_GT2 0xf100 5014#define GEN7_HALF_SLICE_CHICKEN1_GT2 0xf100
4733#define GEN7_MAX_PS_THREAD_DEP (8<<12) 5015#define GEN7_MAX_PS_THREAD_DEP (8<<12)
5016#define GEN7_SINGLE_SUBSCAN_DISPATCH_ENABLE (1<<10)
4734#define GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE (1<<3) 5017#define GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE (1<<3)
4735 5018
4736#define GEN7_ROW_CHICKEN2 0xe4f4 5019#define GEN7_ROW_CHICKEN2 0xe4f4
@@ -4740,6 +5023,10 @@
4740#define HSW_ROW_CHICKEN3 0xe49c 5023#define HSW_ROW_CHICKEN3 0xe49c
4741#define HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE (1 << 6) 5024#define HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE (1 << 6)
4742 5025
5026#define HALF_SLICE_CHICKEN3 0xe184
5027#define GEN8_CENTROID_PIXEL_OPT_DIS (1<<8)
5028#define GEN8_SAMPLER_POWER_BYPASS_DIS (1<<1)
5029
4743#define G4X_AUD_VID_DID (dev_priv->info->display_mmio_offset + 0x62020) 5030#define G4X_AUD_VID_DID (dev_priv->info->display_mmio_offset + 0x62020)
4744#define INTEL_AUDIO_DEVCL 0x808629FB 5031#define INTEL_AUDIO_DEVCL 0x808629FB
4745#define INTEL_AUDIO_DEVBLC 0x80862801 5032#define INTEL_AUDIO_DEVBLC 0x80862801
@@ -4781,6 +5068,18 @@
4781 CPT_AUD_CNTL_ST_B) 5068 CPT_AUD_CNTL_ST_B)
4782#define CPT_AUD_CNTRL_ST2 0xE50C0 5069#define CPT_AUD_CNTRL_ST2 0xE50C0
4783 5070
5071#define VLV_HDMIW_HDMIEDID_A (VLV_DISPLAY_BASE + 0x62050)
5072#define VLV_HDMIW_HDMIEDID_B (VLV_DISPLAY_BASE + 0x62150)
5073#define VLV_HDMIW_HDMIEDID(pipe) _PIPE(pipe, \
5074 VLV_HDMIW_HDMIEDID_A, \
5075 VLV_HDMIW_HDMIEDID_B)
5076#define VLV_AUD_CNTL_ST_A (VLV_DISPLAY_BASE + 0x620B4)
5077#define VLV_AUD_CNTL_ST_B (VLV_DISPLAY_BASE + 0x621B4)
5078#define VLV_AUD_CNTL_ST(pipe) _PIPE(pipe, \
5079 VLV_AUD_CNTL_ST_A, \
5080 VLV_AUD_CNTL_ST_B)
5081#define VLV_AUD_CNTL_ST2 (VLV_DISPLAY_BASE + 0x620C0)
5082
4784/* These are the 4 32-bit write offset registers for each stream 5083/* These are the 4 32-bit write offset registers for each stream
4785 * output buffer. It determines the offset from the 5084 * output buffer. It determines the offset from the
4786 * 3DSTATE_SO_BUFFERs that the next streamed vertex output goes to. 5085 * 3DSTATE_SO_BUFFERs that the next streamed vertex output goes to.
@@ -4797,6 +5096,12 @@
4797#define CPT_AUD_CFG(pipe) _PIPE(pipe, \ 5096#define CPT_AUD_CFG(pipe) _PIPE(pipe, \
4798 CPT_AUD_CONFIG_A, \ 5097 CPT_AUD_CONFIG_A, \
4799 CPT_AUD_CONFIG_B) 5098 CPT_AUD_CONFIG_B)
5099#define VLV_AUD_CONFIG_A (VLV_DISPLAY_BASE + 0x62000)
5100#define VLV_AUD_CONFIG_B (VLV_DISPLAY_BASE + 0x62100)
5101#define VLV_AUD_CFG(pipe) _PIPE(pipe, \
5102 VLV_AUD_CONFIG_A, \
5103 VLV_AUD_CONFIG_B)
5104
4800#define AUD_CONFIG_N_VALUE_INDEX (1 << 29) 5105#define AUD_CONFIG_N_VALUE_INDEX (1 << 29)
4801#define AUD_CONFIG_N_PROG_ENABLE (1 << 28) 5106#define AUD_CONFIG_N_PROG_ENABLE (1 << 28)
4802#define AUD_CONFIG_UPPER_N_SHIFT 20 5107#define AUD_CONFIG_UPPER_N_SHIFT 20
@@ -4804,7 +5109,17 @@
4804#define AUD_CONFIG_LOWER_N_SHIFT 4 5109#define AUD_CONFIG_LOWER_N_SHIFT 4
4805#define AUD_CONFIG_LOWER_N_VALUE (0xfff << 4) 5110#define AUD_CONFIG_LOWER_N_VALUE (0xfff << 4)
4806#define AUD_CONFIG_PIXEL_CLOCK_HDMI_SHIFT 16 5111#define AUD_CONFIG_PIXEL_CLOCK_HDMI_SHIFT 16
4807#define AUD_CONFIG_PIXEL_CLOCK_HDMI (0xf << 16) 5112#define AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK (0xf << 16)
5113#define AUD_CONFIG_PIXEL_CLOCK_HDMI_25175 (0 << 16)
5114#define AUD_CONFIG_PIXEL_CLOCK_HDMI_25200 (1 << 16)
5115#define AUD_CONFIG_PIXEL_CLOCK_HDMI_27000 (2 << 16)
5116#define AUD_CONFIG_PIXEL_CLOCK_HDMI_27027 (3 << 16)
5117#define AUD_CONFIG_PIXEL_CLOCK_HDMI_54000 (4 << 16)
5118#define AUD_CONFIG_PIXEL_CLOCK_HDMI_54054 (5 << 16)
5119#define AUD_CONFIG_PIXEL_CLOCK_HDMI_74176 (6 << 16)
5120#define AUD_CONFIG_PIXEL_CLOCK_HDMI_74250 (7 << 16)
5121#define AUD_CONFIG_PIXEL_CLOCK_HDMI_148352 (8 << 16)
5122#define AUD_CONFIG_PIXEL_CLOCK_HDMI_148500 (9 << 16)
4808#define AUD_CONFIG_DISABLE_NCTS (1 << 3) 5123#define AUD_CONFIG_DISABLE_NCTS (1 << 3)
4809 5124
4810/* HSW Audio */ 5125/* HSW Audio */
@@ -4929,6 +5244,7 @@
4929#define DDI_BUF_CTL_B 0x64100 5244#define DDI_BUF_CTL_B 0x64100
4930#define DDI_BUF_CTL(port) _PORT(port, DDI_BUF_CTL_A, DDI_BUF_CTL_B) 5245#define DDI_BUF_CTL(port) _PORT(port, DDI_BUF_CTL_A, DDI_BUF_CTL_B)
4931#define DDI_BUF_CTL_ENABLE (1<<31) 5246#define DDI_BUF_CTL_ENABLE (1<<31)
5247/* Haswell */
4932#define DDI_BUF_EMP_400MV_0DB_HSW (0<<24) /* Sel0 */ 5248#define DDI_BUF_EMP_400MV_0DB_HSW (0<<24) /* Sel0 */
4933#define DDI_BUF_EMP_400MV_3_5DB_HSW (1<<24) /* Sel1 */ 5249#define DDI_BUF_EMP_400MV_3_5DB_HSW (1<<24) /* Sel1 */
4934#define DDI_BUF_EMP_400MV_6DB_HSW (2<<24) /* Sel2 */ 5250#define DDI_BUF_EMP_400MV_6DB_HSW (2<<24) /* Sel2 */
@@ -4938,6 +5254,16 @@
4938#define DDI_BUF_EMP_600MV_6DB_HSW (6<<24) /* Sel6 */ 5254#define DDI_BUF_EMP_600MV_6DB_HSW (6<<24) /* Sel6 */
4939#define DDI_BUF_EMP_800MV_0DB_HSW (7<<24) /* Sel7 */ 5255#define DDI_BUF_EMP_800MV_0DB_HSW (7<<24) /* Sel7 */
4940#define DDI_BUF_EMP_800MV_3_5DB_HSW (8<<24) /* Sel8 */ 5256#define DDI_BUF_EMP_800MV_3_5DB_HSW (8<<24) /* Sel8 */
5257/* Broadwell */
5258#define DDI_BUF_EMP_400MV_0DB_BDW (0<<24) /* Sel0 */
5259#define DDI_BUF_EMP_400MV_3_5DB_BDW (1<<24) /* Sel1 */
5260#define DDI_BUF_EMP_400MV_6DB_BDW (2<<24) /* Sel2 */
5261#define DDI_BUF_EMP_600MV_0DB_BDW (3<<24) /* Sel3 */
5262#define DDI_BUF_EMP_600MV_3_5DB_BDW (4<<24) /* Sel4 */
5263#define DDI_BUF_EMP_600MV_6DB_BDW (5<<24) /* Sel5 */
5264#define DDI_BUF_EMP_800MV_0DB_BDW (6<<24) /* Sel6 */
5265#define DDI_BUF_EMP_800MV_3_5DB_BDW (7<<24) /* Sel7 */
5266#define DDI_BUF_EMP_1200MV_0DB_BDW (8<<24) /* Sel8 */
4941#define DDI_BUF_EMP_MASK (0xf<<24) 5267#define DDI_BUF_EMP_MASK (0xf<<24)
4942#define DDI_BUF_PORT_REVERSAL (1<<16) 5268#define DDI_BUF_PORT_REVERSAL (1<<16)
4943#define DDI_BUF_IS_IDLE (1<<7) 5269#define DDI_BUF_IS_IDLE (1<<7)
@@ -5047,6 +5373,9 @@
5047#define LCPLL_PLL_LOCK (1<<30) 5373#define LCPLL_PLL_LOCK (1<<30)
5048#define LCPLL_CLK_FREQ_MASK (3<<26) 5374#define LCPLL_CLK_FREQ_MASK (3<<26)
5049#define LCPLL_CLK_FREQ_450 (0<<26) 5375#define LCPLL_CLK_FREQ_450 (0<<26)
5376#define LCPLL_CLK_FREQ_54O_BDW (1<<26)
5377#define LCPLL_CLK_FREQ_337_5_BDW (2<<26)
5378#define LCPLL_CLK_FREQ_675_BDW (3<<26)
5050#define LCPLL_CD_CLOCK_DISABLE (1<<25) 5379#define LCPLL_CD_CLOCK_DISABLE (1<<25)
5051#define LCPLL_CD2X_CLOCK_DISABLE (1<<23) 5380#define LCPLL_CD2X_CLOCK_DISABLE (1<<23)
5052#define LCPLL_POWER_DOWN_ALLOW (1<<22) 5381#define LCPLL_POWER_DOWN_ALLOW (1<<22)
@@ -5128,4 +5457,414 @@
5128#define PIPE_CSC_POSTOFF_ME(pipe) _PIPE(pipe, _PIPE_A_CSC_POSTOFF_ME, _PIPE_B_CSC_POSTOFF_ME) 5457#define PIPE_CSC_POSTOFF_ME(pipe) _PIPE(pipe, _PIPE_A_CSC_POSTOFF_ME, _PIPE_B_CSC_POSTOFF_ME)
5129#define PIPE_CSC_POSTOFF_LO(pipe) _PIPE(pipe, _PIPE_A_CSC_POSTOFF_LO, _PIPE_B_CSC_POSTOFF_LO) 5458#define PIPE_CSC_POSTOFF_LO(pipe) _PIPE(pipe, _PIPE_A_CSC_POSTOFF_LO, _PIPE_B_CSC_POSTOFF_LO)
5130 5459
5460/* VLV MIPI registers */
5461
5462#define _MIPIA_PORT_CTRL (VLV_DISPLAY_BASE + 0x61190)
5463#define _MIPIB_PORT_CTRL (VLV_DISPLAY_BASE + 0x61700)
5464#define MIPI_PORT_CTRL(pipe) _PIPE(pipe, _MIPIA_PORT_CTRL, _MIPIB_PORT_CTRL)
5465#define DPI_ENABLE (1 << 31) /* A + B */
5466#define MIPIA_MIPI4DPHY_DELAY_COUNT_SHIFT 27
5467#define MIPIA_MIPI4DPHY_DELAY_COUNT_MASK (0xf << 27)
5468#define DUAL_LINK_MODE_MASK (1 << 26)
5469#define DUAL_LINK_MODE_FRONT_BACK (0 << 26)
5470#define DUAL_LINK_MODE_PIXEL_ALTERNATIVE (1 << 26)
5471#define DITHERING_ENABLE (1 << 25) /* A + B */
5472#define FLOPPED_HSTX (1 << 23)
5473#define DE_INVERT (1 << 19) /* XXX */
5474#define MIPIA_FLISDSI_DELAY_COUNT_SHIFT 18
5475#define MIPIA_FLISDSI_DELAY_COUNT_MASK (0xf << 18)
5476#define AFE_LATCHOUT (1 << 17)
5477#define LP_OUTPUT_HOLD (1 << 16)
5478#define MIPIB_FLISDSI_DELAY_COUNT_HIGH_SHIFT 15
5479#define MIPIB_FLISDSI_DELAY_COUNT_HIGH_MASK (1 << 15)
5480#define MIPIB_MIPI4DPHY_DELAY_COUNT_SHIFT 11
5481#define MIPIB_MIPI4DPHY_DELAY_COUNT_MASK (0xf << 11)
5482#define CSB_SHIFT 9
5483#define CSB_MASK (3 << 9)
5484#define CSB_20MHZ (0 << 9)
5485#define CSB_10MHZ (1 << 9)
5486#define CSB_40MHZ (2 << 9)
5487#define BANDGAP_MASK (1 << 8)
5488#define BANDGAP_PNW_CIRCUIT (0 << 8)
5489#define BANDGAP_LNC_CIRCUIT (1 << 8)
5490#define MIPIB_FLISDSI_DELAY_COUNT_LOW_SHIFT 5
5491#define MIPIB_FLISDSI_DELAY_COUNT_LOW_MASK (7 << 5)
5492#define TEARING_EFFECT_DELAY (1 << 4) /* A + B */
5493#define TEARING_EFFECT_SHIFT 2 /* A + B */
5494#define TEARING_EFFECT_MASK (3 << 2)
5495#define TEARING_EFFECT_OFF (0 << 2)
5496#define TEARING_EFFECT_DSI (1 << 2)
5497#define TEARING_EFFECT_GPIO (2 << 2)
5498#define LANE_CONFIGURATION_SHIFT 0
5499#define LANE_CONFIGURATION_MASK (3 << 0)
5500#define LANE_CONFIGURATION_4LANE (0 << 0)
5501#define LANE_CONFIGURATION_DUAL_LINK_A (1 << 0)
5502#define LANE_CONFIGURATION_DUAL_LINK_B (2 << 0)
5503
5504#define _MIPIA_TEARING_CTRL (VLV_DISPLAY_BASE + 0x61194)
5505#define _MIPIB_TEARING_CTRL (VLV_DISPLAY_BASE + 0x61704)
5506#define MIPI_TEARING_CTRL(pipe) _PIPE(pipe, _MIPIA_TEARING_CTRL, _MIPIB_TEARING_CTRL)
5507#define TEARING_EFFECT_DELAY_SHIFT 0
5508#define TEARING_EFFECT_DELAY_MASK (0xffff << 0)
5509
5510/* XXX: all bits reserved */
5511#define _MIPIA_AUTOPWG (VLV_DISPLAY_BASE + 0x611a0)
5512
5513/* MIPI DSI Controller and D-PHY registers */
5514
5515#define _MIPIA_DEVICE_READY (VLV_DISPLAY_BASE + 0xb000)
5516#define _MIPIB_DEVICE_READY (VLV_DISPLAY_BASE + 0xb800)
5517#define MIPI_DEVICE_READY(pipe) _PIPE(pipe, _MIPIA_DEVICE_READY, _MIPIB_DEVICE_READY)
5518#define BUS_POSSESSION (1 << 3) /* set to give bus to receiver */
5519#define ULPS_STATE_MASK (3 << 1)
5520#define ULPS_STATE_ENTER (2 << 1)
5521#define ULPS_STATE_EXIT (1 << 1)
5522#define ULPS_STATE_NORMAL_OPERATION (0 << 1)
5523#define DEVICE_READY (1 << 0)
5524
5525#define _MIPIA_INTR_STAT (VLV_DISPLAY_BASE + 0xb004)
5526#define _MIPIB_INTR_STAT (VLV_DISPLAY_BASE + 0xb804)
5527#define MIPI_INTR_STAT(pipe) _PIPE(pipe, _MIPIA_INTR_STAT, _MIPIB_INTR_STAT)
5528#define _MIPIA_INTR_EN (VLV_DISPLAY_BASE + 0xb008)
5529#define _MIPIB_INTR_EN (VLV_DISPLAY_BASE + 0xb808)
5530#define MIPI_INTR_EN(pipe) _PIPE(pipe, _MIPIA_INTR_EN, _MIPIB_INTR_EN)
5531#define TEARING_EFFECT (1 << 31)
5532#define SPL_PKT_SENT_INTERRUPT (1 << 30)
5533#define GEN_READ_DATA_AVAIL (1 << 29)
5534#define LP_GENERIC_WR_FIFO_FULL (1 << 28)
5535#define HS_GENERIC_WR_FIFO_FULL (1 << 27)
5536#define RX_PROT_VIOLATION (1 << 26)
5537#define RX_INVALID_TX_LENGTH (1 << 25)
5538#define ACK_WITH_NO_ERROR (1 << 24)
5539#define TURN_AROUND_ACK_TIMEOUT (1 << 23)
5540#define LP_RX_TIMEOUT (1 << 22)
5541#define HS_TX_TIMEOUT (1 << 21)
5542#define DPI_FIFO_UNDERRUN (1 << 20)
5543#define LOW_CONTENTION (1 << 19)
5544#define HIGH_CONTENTION (1 << 18)
5545#define TXDSI_VC_ID_INVALID (1 << 17)
5546#define TXDSI_DATA_TYPE_NOT_RECOGNISED (1 << 16)
5547#define TXCHECKSUM_ERROR (1 << 15)
5548#define TXECC_MULTIBIT_ERROR (1 << 14)
5549#define TXECC_SINGLE_BIT_ERROR (1 << 13)
5550#define TXFALSE_CONTROL_ERROR (1 << 12)
5551#define RXDSI_VC_ID_INVALID (1 << 11)
5552#define RXDSI_DATA_TYPE_NOT_REGOGNISED (1 << 10)
5553#define RXCHECKSUM_ERROR (1 << 9)
5554#define RXECC_MULTIBIT_ERROR (1 << 8)
5555#define RXECC_SINGLE_BIT_ERROR (1 << 7)
5556#define RXFALSE_CONTROL_ERROR (1 << 6)
5557#define RXHS_RECEIVE_TIMEOUT_ERROR (1 << 5)
5558#define RX_LP_TX_SYNC_ERROR (1 << 4)
5559#define RXEXCAPE_MODE_ENTRY_ERROR (1 << 3)
5560#define RXEOT_SYNC_ERROR (1 << 2)
5561#define RXSOT_SYNC_ERROR (1 << 1)
5562#define RXSOT_ERROR (1 << 0)
5563
5564#define _MIPIA_DSI_FUNC_PRG (VLV_DISPLAY_BASE + 0xb00c)
5565#define _MIPIB_DSI_FUNC_PRG (VLV_DISPLAY_BASE + 0xb80c)
5566#define MIPI_DSI_FUNC_PRG(pipe) _PIPE(pipe, _MIPIA_DSI_FUNC_PRG, _MIPIB_DSI_FUNC_PRG)
5567#define CMD_MODE_DATA_WIDTH_MASK (7 << 13)
5568#define CMD_MODE_NOT_SUPPORTED (0 << 13)
5569#define CMD_MODE_DATA_WIDTH_16_BIT (1 << 13)
5570#define CMD_MODE_DATA_WIDTH_9_BIT (2 << 13)
5571#define CMD_MODE_DATA_WIDTH_8_BIT (3 << 13)
5572#define CMD_MODE_DATA_WIDTH_OPTION1 (4 << 13)
5573#define CMD_MODE_DATA_WIDTH_OPTION2 (5 << 13)
5574#define VID_MODE_FORMAT_MASK (0xf << 7)
5575#define VID_MODE_NOT_SUPPORTED (0 << 7)
5576#define VID_MODE_FORMAT_RGB565 (1 << 7)
5577#define VID_MODE_FORMAT_RGB666 (2 << 7)
5578#define VID_MODE_FORMAT_RGB666_LOOSE (3 << 7)
5579#define VID_MODE_FORMAT_RGB888 (4 << 7)
5580#define CMD_MODE_CHANNEL_NUMBER_SHIFT 5
5581#define CMD_MODE_CHANNEL_NUMBER_MASK (3 << 5)
5582#define VID_MODE_CHANNEL_NUMBER_SHIFT 3
5583#define VID_MODE_CHANNEL_NUMBER_MASK (3 << 3)
5584#define DATA_LANES_PRG_REG_SHIFT 0
5585#define DATA_LANES_PRG_REG_MASK (7 << 0)
5586
5587#define _MIPIA_HS_TX_TIMEOUT (VLV_DISPLAY_BASE + 0xb010)
5588#define _MIPIB_HS_TX_TIMEOUT (VLV_DISPLAY_BASE + 0xb810)
5589#define MIPI_HS_TX_TIMEOUT(pipe) _PIPE(pipe, _MIPIA_HS_TX_TIMEOUT, _MIPIB_HS_TX_TIMEOUT)
5590#define HIGH_SPEED_TX_TIMEOUT_COUNTER_MASK 0xffffff
5591
5592#define _MIPIA_LP_RX_TIMEOUT (VLV_DISPLAY_BASE + 0xb014)
5593#define _MIPIB_LP_RX_TIMEOUT (VLV_DISPLAY_BASE + 0xb814)
5594#define MIPI_LP_RX_TIMEOUT(pipe) _PIPE(pipe, _MIPIA_LP_RX_TIMEOUT, _MIPIB_LP_RX_TIMEOUT)
5595#define LOW_POWER_RX_TIMEOUT_COUNTER_MASK 0xffffff
5596
5597#define _MIPIA_TURN_AROUND_TIMEOUT (VLV_DISPLAY_BASE + 0xb018)
5598#define _MIPIB_TURN_AROUND_TIMEOUT (VLV_DISPLAY_BASE + 0xb818)
5599#define MIPI_TURN_AROUND_TIMEOUT(pipe) _PIPE(pipe, _MIPIA_TURN_AROUND_TIMEOUT, _MIPIB_TURN_AROUND_TIMEOUT)
5600#define TURN_AROUND_TIMEOUT_MASK 0x3f
5601
5602#define _MIPIA_DEVICE_RESET_TIMER (VLV_DISPLAY_BASE + 0xb01c)
5603#define _MIPIB_DEVICE_RESET_TIMER (VLV_DISPLAY_BASE + 0xb81c)
5604#define MIPI_DEVICE_RESET_TIMER(pipe) _PIPE(pipe, _MIPIA_DEVICE_RESET_TIMER, _MIPIB_DEVICE_RESET_TIMER)
5605#define DEVICE_RESET_TIMER_MASK 0xffff
5606
5607#define _MIPIA_DPI_RESOLUTION (VLV_DISPLAY_BASE + 0xb020)
5608#define _MIPIB_DPI_RESOLUTION (VLV_DISPLAY_BASE + 0xb820)
5609#define MIPI_DPI_RESOLUTION(pipe) _PIPE(pipe, _MIPIA_DPI_RESOLUTION, _MIPIB_DPI_RESOLUTION)
5610#define VERTICAL_ADDRESS_SHIFT 16
5611#define VERTICAL_ADDRESS_MASK (0xffff << 16)
5612#define HORIZONTAL_ADDRESS_SHIFT 0
5613#define HORIZONTAL_ADDRESS_MASK 0xffff
5614
5615#define _MIPIA_DBI_FIFO_THROTTLE (VLV_DISPLAY_BASE + 0xb024)
5616#define _MIPIB_DBI_FIFO_THROTTLE (VLV_DISPLAY_BASE + 0xb824)
5617#define MIPI_DBI_FIFO_THROTTLE(pipe) _PIPE(pipe, _MIPIA_DBI_FIFO_THROTTLE, _MIPIB_DBI_FIFO_THROTTLE)
5618#define DBI_FIFO_EMPTY_HALF (0 << 0)
5619#define DBI_FIFO_EMPTY_QUARTER (1 << 0)
5620#define DBI_FIFO_EMPTY_7_LOCATIONS (2 << 0)
5621
5622/* regs below are bits 15:0 */
5623#define _MIPIA_HSYNC_PADDING_COUNT (VLV_DISPLAY_BASE + 0xb028)
5624#define _MIPIB_HSYNC_PADDING_COUNT (VLV_DISPLAY_BASE + 0xb828)
5625#define MIPI_HSYNC_PADDING_COUNT(pipe) _PIPE(pipe, _MIPIA_HSYNC_PADDING_COUNT, _MIPIB_HSYNC_PADDING_COUNT)
5626
5627#define _MIPIA_HBP_COUNT (VLV_DISPLAY_BASE + 0xb02c)
5628#define _MIPIB_HBP_COUNT (VLV_DISPLAY_BASE + 0xb82c)
5629#define MIPI_HBP_COUNT(pipe) _PIPE(pipe, _MIPIA_HBP_COUNT, _MIPIB_HBP_COUNT)
5630
5631#define _MIPIA_HFP_COUNT (VLV_DISPLAY_BASE + 0xb030)
5632#define _MIPIB_HFP_COUNT (VLV_DISPLAY_BASE + 0xb830)
5633#define MIPI_HFP_COUNT(pipe) _PIPE(pipe, _MIPIA_HFP_COUNT, _MIPIB_HFP_COUNT)
5634
5635#define _MIPIA_HACTIVE_AREA_COUNT (VLV_DISPLAY_BASE + 0xb034)
5636#define _MIPIB_HACTIVE_AREA_COUNT (VLV_DISPLAY_BASE + 0xb834)
5637#define MIPI_HACTIVE_AREA_COUNT(pipe) _PIPE(pipe, _MIPIA_HACTIVE_AREA_COUNT, _MIPIB_HACTIVE_AREA_COUNT)
5638
5639#define _MIPIA_VSYNC_PADDING_COUNT (VLV_DISPLAY_BASE + 0xb038)
5640#define _MIPIB_VSYNC_PADDING_COUNT (VLV_DISPLAY_BASE + 0xb838)
5641#define MIPI_VSYNC_PADDING_COUNT(pipe) _PIPE(pipe, _MIPIA_VSYNC_PADDING_COUNT, _MIPIB_VSYNC_PADDING_COUNT)
5642
5643#define _MIPIA_VBP_COUNT (VLV_DISPLAY_BASE + 0xb03c)
5644#define _MIPIB_VBP_COUNT (VLV_DISPLAY_BASE + 0xb83c)
5645#define MIPI_VBP_COUNT(pipe) _PIPE(pipe, _MIPIA_VBP_COUNT, _MIPIB_VBP_COUNT)
5646
5647#define _MIPIA_VFP_COUNT (VLV_DISPLAY_BASE + 0xb040)
5648#define _MIPIB_VFP_COUNT (VLV_DISPLAY_BASE + 0xb840)
5649#define MIPI_VFP_COUNT(pipe) _PIPE(pipe, _MIPIA_VFP_COUNT, _MIPIB_VFP_COUNT)
5650
5651#define _MIPIA_HIGH_LOW_SWITCH_COUNT (VLV_DISPLAY_BASE + 0xb044)
5652#define _MIPIB_HIGH_LOW_SWITCH_COUNT (VLV_DISPLAY_BASE + 0xb844)
5653#define MIPI_HIGH_LOW_SWITCH_COUNT(pipe) _PIPE(pipe, _MIPIA_HIGH_LOW_SWITCH_COUNT, _MIPIB_HIGH_LOW_SWITCH_COUNT)
5654/* regs above are bits 15:0 */
5655
5656#define _MIPIA_DPI_CONTROL (VLV_DISPLAY_BASE + 0xb048)
5657#define _MIPIB_DPI_CONTROL (VLV_DISPLAY_BASE + 0xb848)
5658#define MIPI_DPI_CONTROL(pipe) _PIPE(pipe, _MIPIA_DPI_CONTROL, _MIPIB_DPI_CONTROL)
5659#define DPI_LP_MODE (1 << 6)
5660#define BACKLIGHT_OFF (1 << 5)
5661#define BACKLIGHT_ON (1 << 4)
5662#define COLOR_MODE_OFF (1 << 3)
5663#define COLOR_MODE_ON (1 << 2)
5664#define TURN_ON (1 << 1)
5665#define SHUTDOWN (1 << 0)
5666
5667#define _MIPIA_DPI_DATA (VLV_DISPLAY_BASE + 0xb04c)
5668#define _MIPIB_DPI_DATA (VLV_DISPLAY_BASE + 0xb84c)
5669#define MIPI_DPI_DATA(pipe) _PIPE(pipe, _MIPIA_DPI_DATA, _MIPIB_DPI_DATA)
5670#define COMMAND_BYTE_SHIFT 0
5671#define COMMAND_BYTE_MASK (0x3f << 0)
5672
5673#define _MIPIA_INIT_COUNT (VLV_DISPLAY_BASE + 0xb050)
5674#define _MIPIB_INIT_COUNT (VLV_DISPLAY_BASE + 0xb850)
5675#define MIPI_INIT_COUNT(pipe) _PIPE(pipe, _MIPIA_INIT_COUNT, _MIPIB_INIT_COUNT)
5676#define MASTER_INIT_TIMER_SHIFT 0
5677#define MASTER_INIT_TIMER_MASK (0xffff << 0)
5678
5679#define _MIPIA_MAX_RETURN_PKT_SIZE (VLV_DISPLAY_BASE + 0xb054)
5680#define _MIPIB_MAX_RETURN_PKT_SIZE (VLV_DISPLAY_BASE + 0xb854)
5681#define MIPI_MAX_RETURN_PKT_SIZE(pipe) _PIPE(pipe, _MIPIA_MAX_RETURN_PKT_SIZE, _MIPIB_MAX_RETURN_PKT_SIZE)
5682#define MAX_RETURN_PKT_SIZE_SHIFT 0
5683#define MAX_RETURN_PKT_SIZE_MASK (0x3ff << 0)
5684
5685#define _MIPIA_VIDEO_MODE_FORMAT (VLV_DISPLAY_BASE + 0xb058)
5686#define _MIPIB_VIDEO_MODE_FORMAT (VLV_DISPLAY_BASE + 0xb858)
5687#define MIPI_VIDEO_MODE_FORMAT(pipe) _PIPE(pipe, _MIPIA_VIDEO_MODE_FORMAT, _MIPIB_VIDEO_MODE_FORMAT)
5688#define RANDOM_DPI_DISPLAY_RESOLUTION (1 << 4)
5689#define DISABLE_VIDEO_BTA (1 << 3)
5690#define IP_TG_CONFIG (1 << 2)
5691#define VIDEO_MODE_NON_BURST_WITH_SYNC_PULSE (1 << 0)
5692#define VIDEO_MODE_NON_BURST_WITH_SYNC_EVENTS (2 << 0)
5693#define VIDEO_MODE_BURST (3 << 0)
5694
5695#define _MIPIA_EOT_DISABLE (VLV_DISPLAY_BASE + 0xb05c)
5696#define _MIPIB_EOT_DISABLE (VLV_DISPLAY_BASE + 0xb85c)
5697#define MIPI_EOT_DISABLE(pipe) _PIPE(pipe, _MIPIA_EOT_DISABLE, _MIPIB_EOT_DISABLE)
5698#define LP_RX_TIMEOUT_ERROR_RECOVERY_DISABLE (1 << 7)
5699#define HS_RX_TIMEOUT_ERROR_RECOVERY_DISABLE (1 << 6)
5700#define LOW_CONTENTION_RECOVERY_DISABLE (1 << 5)
5701#define HIGH_CONTENTION_RECOVERY_DISABLE (1 << 4)
5702#define TXDSI_TYPE_NOT_RECOGNISED_ERROR_RECOVERY_DISABLE (1 << 3)
5703#define TXECC_MULTIBIT_ERROR_RECOVERY_DISABLE (1 << 2)
5704#define CLOCKSTOP (1 << 1)
5705#define EOT_DISABLE (1 << 0)
5706
5707#define _MIPIA_LP_BYTECLK (VLV_DISPLAY_BASE + 0xb060)
5708#define _MIPIB_LP_BYTECLK (VLV_DISPLAY_BASE + 0xb860)
5709#define MIPI_LP_BYTECLK(pipe) _PIPE(pipe, _MIPIA_LP_BYTECLK, _MIPIB_LP_BYTECLK)
5710#define LP_BYTECLK_SHIFT 0
5711#define LP_BYTECLK_MASK (0xffff << 0)
5712
5713/* bits 31:0 */
5714#define _MIPIA_LP_GEN_DATA (VLV_DISPLAY_BASE + 0xb064)
5715#define _MIPIB_LP_GEN_DATA (VLV_DISPLAY_BASE + 0xb864)
5716#define MIPI_LP_GEN_DATA(pipe) _PIPE(pipe, _MIPIA_LP_GEN_DATA, _MIPIB_LP_GEN_DATA)
5717
5718/* bits 31:0 */
5719#define _MIPIA_HS_GEN_DATA (VLV_DISPLAY_BASE + 0xb068)
5720#define _MIPIB_HS_GEN_DATA (VLV_DISPLAY_BASE + 0xb868)
5721#define MIPI_HS_GEN_DATA(pipe) _PIPE(pipe, _MIPIA_HS_GEN_DATA, _MIPIB_HS_GEN_DATA)
5722
5723#define _MIPIA_LP_GEN_CTRL (VLV_DISPLAY_BASE + 0xb06c)
5724#define _MIPIB_LP_GEN_CTRL (VLV_DISPLAY_BASE + 0xb86c)
5725#define MIPI_LP_GEN_CTRL(pipe) _PIPE(pipe, _MIPIA_LP_GEN_CTRL, _MIPIB_LP_GEN_CTRL)
5726#define _MIPIA_HS_GEN_CTRL (VLV_DISPLAY_BASE + 0xb070)
5727#define _MIPIB_HS_GEN_CTRL (VLV_DISPLAY_BASE + 0xb870)
5728#define MIPI_HS_GEN_CTRL(pipe) _PIPE(pipe, _MIPIA_HS_GEN_CTRL, _MIPIB_HS_GEN_CTRL)
5729#define LONG_PACKET_WORD_COUNT_SHIFT 8
5730#define LONG_PACKET_WORD_COUNT_MASK (0xffff << 8)
5731#define SHORT_PACKET_PARAM_SHIFT 8
5732#define SHORT_PACKET_PARAM_MASK (0xffff << 8)
5733#define VIRTUAL_CHANNEL_SHIFT 6
5734#define VIRTUAL_CHANNEL_MASK (3 << 6)
5735#define DATA_TYPE_SHIFT 0
5736#define DATA_TYPE_MASK (3f << 0)
5737/* data type values, see include/video/mipi_display.h */
5738
5739#define _MIPIA_GEN_FIFO_STAT (VLV_DISPLAY_BASE + 0xb074)
5740#define _MIPIB_GEN_FIFO_STAT (VLV_DISPLAY_BASE + 0xb874)
5741#define MIPI_GEN_FIFO_STAT(pipe) _PIPE(pipe, _MIPIA_GEN_FIFO_STAT, _MIPIB_GEN_FIFO_STAT)
5742#define DPI_FIFO_EMPTY (1 << 28)
5743#define DBI_FIFO_EMPTY (1 << 27)
5744#define LP_CTRL_FIFO_EMPTY (1 << 26)
5745#define LP_CTRL_FIFO_HALF_EMPTY (1 << 25)
5746#define LP_CTRL_FIFO_FULL (1 << 24)
5747#define HS_CTRL_FIFO_EMPTY (1 << 18)
5748#define HS_CTRL_FIFO_HALF_EMPTY (1 << 17)
5749#define HS_CTRL_FIFO_FULL (1 << 16)
5750#define LP_DATA_FIFO_EMPTY (1 << 10)
5751#define LP_DATA_FIFO_HALF_EMPTY (1 << 9)
5752#define LP_DATA_FIFO_FULL (1 << 8)
5753#define HS_DATA_FIFO_EMPTY (1 << 2)
5754#define HS_DATA_FIFO_HALF_EMPTY (1 << 1)
5755#define HS_DATA_FIFO_FULL (1 << 0)
5756
5757#define _MIPIA_HS_LS_DBI_ENABLE (VLV_DISPLAY_BASE + 0xb078)
5758#define _MIPIB_HS_LS_DBI_ENABLE (VLV_DISPLAY_BASE + 0xb878)
5759#define MIPI_HS_LP_DBI_ENABLE(pipe) _PIPE(pipe, _MIPIA_HS_LS_DBI_ENABLE, _MIPIB_HS_LS_DBI_ENABLE)
5760#define DBI_HS_LP_MODE_MASK (1 << 0)
5761#define DBI_LP_MODE (1 << 0)
5762#define DBI_HS_MODE (0 << 0)
5763
5764#define _MIPIA_DPHY_PARAM (VLV_DISPLAY_BASE + 0xb080)
5765#define _MIPIB_DPHY_PARAM (VLV_DISPLAY_BASE + 0xb880)
5766#define MIPI_DPHY_PARAM(pipe) _PIPE(pipe, _MIPIA_DPHY_PARAM, _MIPIB_DPHY_PARAM)
5767#define EXIT_ZERO_COUNT_SHIFT 24
5768#define EXIT_ZERO_COUNT_MASK (0x3f << 24)
5769#define TRAIL_COUNT_SHIFT 16
5770#define TRAIL_COUNT_MASK (0x1f << 16)
5771#define CLK_ZERO_COUNT_SHIFT 8
5772#define CLK_ZERO_COUNT_MASK (0xff << 8)
5773#define PREPARE_COUNT_SHIFT 0
5774#define PREPARE_COUNT_MASK (0x3f << 0)
5775
5776/* bits 31:0 */
5777#define _MIPIA_DBI_BW_CTRL (VLV_DISPLAY_BASE + 0xb084)
5778#define _MIPIB_DBI_BW_CTRL (VLV_DISPLAY_BASE + 0xb884)
5779#define MIPI_DBI_BW_CTRL(pipe) _PIPE(pipe, _MIPIA_DBI_BW_CTRL, _MIPIB_DBI_BW_CTRL)
5780
5781#define _MIPIA_CLK_LANE_SWITCH_TIME_CNT (VLV_DISPLAY_BASE + 0xb088)
5782#define _MIPIB_CLK_LANE_SWITCH_TIME_CNT (VLV_DISPLAY_BASE + 0xb888)
5783#define MIPI_CLK_LANE_SWITCH_TIME_CNT(pipe) _PIPE(pipe, _MIPIA_CLK_LANE_SWITCH_TIME_CNT, _MIPIB_CLK_LANE_SWITCH_TIME_CNT)
5784#define LP_HS_SSW_CNT_SHIFT 16
5785#define LP_HS_SSW_CNT_MASK (0xffff << 16)
5786#define HS_LP_PWR_SW_CNT_SHIFT 0
5787#define HS_LP_PWR_SW_CNT_MASK (0xffff << 0)
5788
5789#define _MIPIA_STOP_STATE_STALL (VLV_DISPLAY_BASE + 0xb08c)
5790#define _MIPIB_STOP_STATE_STALL (VLV_DISPLAY_BASE + 0xb88c)
5791#define MIPI_STOP_STATE_STALL(pipe) _PIPE(pipe, _MIPIA_STOP_STATE_STALL, _MIPIB_STOP_STATE_STALL)
5792#define STOP_STATE_STALL_COUNTER_SHIFT 0
5793#define STOP_STATE_STALL_COUNTER_MASK (0xff << 0)
5794
5795#define _MIPIA_INTR_STAT_REG_1 (VLV_DISPLAY_BASE + 0xb090)
5796#define _MIPIB_INTR_STAT_REG_1 (VLV_DISPLAY_BASE + 0xb890)
5797#define MIPI_INTR_STAT_REG_1(pipe) _PIPE(pipe, _MIPIA_INTR_STAT_REG_1, _MIPIB_INTR_STAT_REG_1)
5798#define _MIPIA_INTR_EN_REG_1 (VLV_DISPLAY_BASE + 0xb094)
5799#define _MIPIB_INTR_EN_REG_1 (VLV_DISPLAY_BASE + 0xb894)
5800#define MIPI_INTR_EN_REG_1(pipe) _PIPE(pipe, _MIPIA_INTR_EN_REG_1, _MIPIB_INTR_EN_REG_1)
5801#define RX_CONTENTION_DETECTED (1 << 0)
5802
5803/* XXX: only pipe A ?!? */
5804#define MIPIA_DBI_TYPEC_CTRL (VLV_DISPLAY_BASE + 0xb100)
5805#define DBI_TYPEC_ENABLE (1 << 31)
5806#define DBI_TYPEC_WIP (1 << 30)
5807#define DBI_TYPEC_OPTION_SHIFT 28
5808#define DBI_TYPEC_OPTION_MASK (3 << 28)
5809#define DBI_TYPEC_FREQ_SHIFT 24
5810#define DBI_TYPEC_FREQ_MASK (0xf << 24)
5811#define DBI_TYPEC_OVERRIDE (1 << 8)
5812#define DBI_TYPEC_OVERRIDE_COUNTER_SHIFT 0
5813#define DBI_TYPEC_OVERRIDE_COUNTER_MASK (0xff << 0)
5814
5815
5816/* MIPI adapter registers */
5817
5818#define _MIPIA_CTRL (VLV_DISPLAY_BASE + 0xb104)
5819#define _MIPIB_CTRL (VLV_DISPLAY_BASE + 0xb904)
5820#define MIPI_CTRL(pipe) _PIPE(pipe, _MIPIA_CTRL, _MIPIB_CTRL)
5821#define ESCAPE_CLOCK_DIVIDER_SHIFT 5 /* A only */
5822#define ESCAPE_CLOCK_DIVIDER_MASK (3 << 5)
5823#define ESCAPE_CLOCK_DIVIDER_1 (0 << 5)
5824#define ESCAPE_CLOCK_DIVIDER_2 (1 << 5)
5825#define ESCAPE_CLOCK_DIVIDER_4 (2 << 5)
5826#define READ_REQUEST_PRIORITY_SHIFT 3
5827#define READ_REQUEST_PRIORITY_MASK (3 << 3)
5828#define READ_REQUEST_PRIORITY_LOW (0 << 3)
5829#define READ_REQUEST_PRIORITY_HIGH (3 << 3)
5830#define RGB_FLIP_TO_BGR (1 << 2)
5831
5832#define _MIPIA_DATA_ADDRESS (VLV_DISPLAY_BASE + 0xb108)
5833#define _MIPIB_DATA_ADDRESS (VLV_DISPLAY_BASE + 0xb908)
5834#define MIPI_DATA_ADDRESS(pipe) _PIPE(pipe, _MIPIA_DATA_ADDRESS, _MIPIB_DATA_ADDRESS)
5835#define DATA_MEM_ADDRESS_SHIFT 5
5836#define DATA_MEM_ADDRESS_MASK (0x7ffffff << 5)
5837#define DATA_VALID (1 << 0)
5838
5839#define _MIPIA_DATA_LENGTH (VLV_DISPLAY_BASE + 0xb10c)
5840#define _MIPIB_DATA_LENGTH (VLV_DISPLAY_BASE + 0xb90c)
5841#define MIPI_DATA_LENGTH(pipe) _PIPE(pipe, _MIPIA_DATA_LENGTH, _MIPIB_DATA_LENGTH)
5842#define DATA_LENGTH_SHIFT 0
5843#define DATA_LENGTH_MASK (0xfffff << 0)
5844
5845#define _MIPIA_COMMAND_ADDRESS (VLV_DISPLAY_BASE + 0xb110)
5846#define _MIPIB_COMMAND_ADDRESS (VLV_DISPLAY_BASE + 0xb910)
5847#define MIPI_COMMAND_ADDRESS(pipe) _PIPE(pipe, _MIPIA_COMMAND_ADDRESS, _MIPIB_COMMAND_ADDRESS)
5848#define COMMAND_MEM_ADDRESS_SHIFT 5
5849#define COMMAND_MEM_ADDRESS_MASK (0x7ffffff << 5)
5850#define AUTO_PWG_ENABLE (1 << 2)
5851#define MEMORY_WRITE_DATA_FROM_PIPE_RENDERING (1 << 1)
5852#define COMMAND_VALID (1 << 0)
5853
5854#define _MIPIA_COMMAND_LENGTH (VLV_DISPLAY_BASE + 0xb114)
5855#define _MIPIB_COMMAND_LENGTH (VLV_DISPLAY_BASE + 0xb914)
5856#define MIPI_COMMAND_LENGTH(pipe) _PIPE(pipe, _MIPIA_COMMAND_LENGTH, _MIPIB_COMMAND_LENGTH)
5857#define COMMAND_LENGTH_SHIFT(n) (8 * (n)) /* n: 0...3 */
5858#define COMMAND_LENGTH_MASK(n) (0xff << (8 * (n)))
5859
5860#define _MIPIA_READ_DATA_RETURN0 (VLV_DISPLAY_BASE + 0xb118)
5861#define _MIPIB_READ_DATA_RETURN0 (VLV_DISPLAY_BASE + 0xb918)
5862#define MIPI_READ_DATA_RETURN(pipe, n) \
5863 (_PIPE(pipe, _MIPIA_READ_DATA_RETURN0, _MIPIB_READ_DATA_RETURN0) + 4 * (n)) /* n: 0...7 */
5864
5865#define _MIPIA_READ_DATA_VALID (VLV_DISPLAY_BASE + 0xb138)
5866#define _MIPIB_READ_DATA_VALID (VLV_DISPLAY_BASE + 0xb938)
5867#define MIPI_READ_DATA_VALID(pipe) _PIPE(pipe, _MIPIA_READ_DATA_VALID, _MIPIB_READ_DATA_VALID)
5868#define READ_DATA_VALID(n) (1 << (n))
5869
5131#endif /* _I915_REG_H_ */ 5870#endif /* _I915_REG_H_ */
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 70db618989c4..98790c7cccb1 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -214,6 +214,22 @@ static void i915_save_display(struct drm_device *dev)
214 dev_priv->regfile.saveBLC_CPU_PWM_CTL2 = I915_READ(BLC_PWM_CPU_CTL2); 214 dev_priv->regfile.saveBLC_CPU_PWM_CTL2 = I915_READ(BLC_PWM_CPU_CTL2);
215 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) 215 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
216 dev_priv->regfile.saveLVDS = I915_READ(PCH_LVDS); 216 dev_priv->regfile.saveLVDS = I915_READ(PCH_LVDS);
217 } else if (IS_VALLEYVIEW(dev)) {
218 dev_priv->regfile.savePP_CONTROL = I915_READ(PP_CONTROL);
219 dev_priv->regfile.savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
220
221 dev_priv->regfile.saveBLC_PWM_CTL =
222 I915_READ(VLV_BLC_PWM_CTL(PIPE_A));
223 dev_priv->regfile.saveBLC_HIST_CTL =
224 I915_READ(VLV_BLC_HIST_CTL(PIPE_A));
225 dev_priv->regfile.saveBLC_PWM_CTL2 =
226 I915_READ(VLV_BLC_PWM_CTL2(PIPE_A));
227 dev_priv->regfile.saveBLC_PWM_CTL_B =
228 I915_READ(VLV_BLC_PWM_CTL(PIPE_B));
229 dev_priv->regfile.saveBLC_HIST_CTL_B =
230 I915_READ(VLV_BLC_HIST_CTL(PIPE_B));
231 dev_priv->regfile.saveBLC_PWM_CTL2_B =
232 I915_READ(VLV_BLC_PWM_CTL2(PIPE_B));
217 } else { 233 } else {
218 dev_priv->regfile.savePP_CONTROL = I915_READ(PP_CONTROL); 234 dev_priv->regfile.savePP_CONTROL = I915_READ(PP_CONTROL);
219 dev_priv->regfile.savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS); 235 dev_priv->regfile.savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
@@ -302,6 +318,19 @@ static void i915_restore_display(struct drm_device *dev)
302 I915_WRITE(PCH_PP_CONTROL, dev_priv->regfile.savePP_CONTROL); 318 I915_WRITE(PCH_PP_CONTROL, dev_priv->regfile.savePP_CONTROL);
303 I915_WRITE(RSTDBYCTL, 319 I915_WRITE(RSTDBYCTL,
304 dev_priv->regfile.saveMCHBAR_RENDER_STANDBY); 320 dev_priv->regfile.saveMCHBAR_RENDER_STANDBY);
321 } else if (IS_VALLEYVIEW(dev)) {
322 I915_WRITE(VLV_BLC_PWM_CTL(PIPE_A),
323 dev_priv->regfile.saveBLC_PWM_CTL);
324 I915_WRITE(VLV_BLC_HIST_CTL(PIPE_A),
325 dev_priv->regfile.saveBLC_HIST_CTL);
326 I915_WRITE(VLV_BLC_PWM_CTL2(PIPE_A),
327 dev_priv->regfile.saveBLC_PWM_CTL2);
328 I915_WRITE(VLV_BLC_PWM_CTL(PIPE_B),
329 dev_priv->regfile.saveBLC_PWM_CTL);
330 I915_WRITE(VLV_BLC_HIST_CTL(PIPE_B),
331 dev_priv->regfile.saveBLC_HIST_CTL);
332 I915_WRITE(VLV_BLC_PWM_CTL2(PIPE_B),
333 dev_priv->regfile.saveBLC_PWM_CTL2);
305 } else { 334 } else {
306 I915_WRITE(PFIT_PGM_RATIOS, dev_priv->regfile.savePFIT_PGM_RATIOS); 335 I915_WRITE(PFIT_PGM_RATIOS, dev_priv->regfile.savePFIT_PGM_RATIOS);
307 I915_WRITE(BLC_PWM_CTL, dev_priv->regfile.saveBLC_PWM_CTL); 336 I915_WRITE(BLC_PWM_CTL, dev_priv->regfile.saveBLC_PWM_CTL);
@@ -340,7 +369,9 @@ int i915_save_state(struct drm_device *dev)
340 struct drm_i915_private *dev_priv = dev->dev_private; 369 struct drm_i915_private *dev_priv = dev->dev_private;
341 int i; 370 int i;
342 371
343 pci_read_config_byte(dev->pdev, LBB, &dev_priv->regfile.saveLBB); 372 if (INTEL_INFO(dev)->gen <= 4)
373 pci_read_config_byte(dev->pdev, LBB,
374 &dev_priv->regfile.saveLBB);
344 375
345 mutex_lock(&dev->struct_mutex); 376 mutex_lock(&dev->struct_mutex);
346 377
@@ -367,7 +398,8 @@ int i915_save_state(struct drm_device *dev)
367 intel_disable_gt_powersave(dev); 398 intel_disable_gt_powersave(dev);
368 399
369 /* Cache mode state */ 400 /* Cache mode state */
370 dev_priv->regfile.saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0); 401 if (INTEL_INFO(dev)->gen < 7)
402 dev_priv->regfile.saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
371 403
372 /* Memory Arbitration state */ 404 /* Memory Arbitration state */
373 dev_priv->regfile.saveMI_ARB_STATE = I915_READ(MI_ARB_STATE); 405 dev_priv->regfile.saveMI_ARB_STATE = I915_READ(MI_ARB_STATE);
@@ -390,7 +422,9 @@ int i915_restore_state(struct drm_device *dev)
390 struct drm_i915_private *dev_priv = dev->dev_private; 422 struct drm_i915_private *dev_priv = dev->dev_private;
391 int i; 423 int i;
392 424
393 pci_write_config_byte(dev->pdev, LBB, dev_priv->regfile.saveLBB); 425 if (INTEL_INFO(dev)->gen <= 4)
426 pci_write_config_byte(dev->pdev, LBB,
427 dev_priv->regfile.saveLBB);
394 428
395 mutex_lock(&dev->struct_mutex); 429 mutex_lock(&dev->struct_mutex);
396 430
@@ -414,7 +448,9 @@ int i915_restore_state(struct drm_device *dev)
414 } 448 }
415 449
416 /* Cache mode state */ 450 /* Cache mode state */
417 I915_WRITE(CACHE_MODE_0, dev_priv->regfile.saveCACHE_MODE_0 | 0xffff0000); 451 if (INTEL_INFO(dev)->gen < 7)
452 I915_WRITE(CACHE_MODE_0, dev_priv->regfile.saveCACHE_MODE_0 |
453 0xffff0000);
418 454
419 /* Memory arbitration state */ 455 /* Memory arbitration state */
420 I915_WRITE(MI_ARB_STATE, dev_priv->regfile.saveMI_ARB_STATE | 0xffff0000); 456 I915_WRITE(MI_ARB_STATE, dev_priv->regfile.saveMI_ARB_STATE | 0xffff0000);
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index c8c4112de110..cef38fd320a7 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -32,30 +32,50 @@
32#include "intel_drv.h" 32#include "intel_drv.h"
33#include "i915_drv.h" 33#include "i915_drv.h"
34 34
35#define dev_to_drm_minor(d) dev_get_drvdata((d))
36
35#ifdef CONFIG_PM 37#ifdef CONFIG_PM
36static u32 calc_residency(struct drm_device *dev, const u32 reg) 38static u32 calc_residency(struct drm_device *dev, const u32 reg)
37{ 39{
38 struct drm_i915_private *dev_priv = dev->dev_private; 40 struct drm_i915_private *dev_priv = dev->dev_private;
39 u64 raw_time; /* 32b value may overflow during fixed point math */ 41 u64 raw_time; /* 32b value may overflow during fixed point math */
42 u64 units = 128ULL, div = 100000ULL, bias = 100ULL;
40 43
41 if (!intel_enable_rc6(dev)) 44 if (!intel_enable_rc6(dev))
42 return 0; 45 return 0;
43 46
44 raw_time = I915_READ(reg) * 128ULL; 47 /* On VLV, residency time is in CZ units rather than 1.28us */
45 return DIV_ROUND_UP_ULL(raw_time, 100000); 48 if (IS_VALLEYVIEW(dev)) {
49 u32 clkctl2;
50
51 clkctl2 = I915_READ(VLV_CLK_CTL2) >>
52 CLK_CTL2_CZCOUNT_30NS_SHIFT;
53 if (!clkctl2) {
54 WARN(!clkctl2, "bogus CZ count value");
55 return 0;
56 }
57 units = DIV_ROUND_UP_ULL(30ULL * bias, (u64)clkctl2);
58 if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
59 units <<= 8;
60
61 div = 1000000ULL * bias;
62 }
63
64 raw_time = I915_READ(reg) * units;
65 return DIV_ROUND_UP_ULL(raw_time, div);
46} 66}
47 67
48static ssize_t 68static ssize_t
49show_rc6_mask(struct device *kdev, struct device_attribute *attr, char *buf) 69show_rc6_mask(struct device *kdev, struct device_attribute *attr, char *buf)
50{ 70{
51 struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev); 71 struct drm_minor *dminor = dev_to_drm_minor(kdev);
52 return snprintf(buf, PAGE_SIZE, "%x\n", intel_enable_rc6(dminor->dev)); 72 return snprintf(buf, PAGE_SIZE, "%x\n", intel_enable_rc6(dminor->dev));
53} 73}
54 74
55static ssize_t 75static ssize_t
56show_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf) 76show_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf)
57{ 77{
58 struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev); 78 struct drm_minor *dminor = dev_get_drvdata(kdev);
59 u32 rc6_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6); 79 u32 rc6_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6);
60 return snprintf(buf, PAGE_SIZE, "%u\n", rc6_residency); 80 return snprintf(buf, PAGE_SIZE, "%u\n", rc6_residency);
61} 81}
@@ -63,16 +83,20 @@ show_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf)
63static ssize_t 83static ssize_t
64show_rc6p_ms(struct device *kdev, struct device_attribute *attr, char *buf) 84show_rc6p_ms(struct device *kdev, struct device_attribute *attr, char *buf)
65{ 85{
66 struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev); 86 struct drm_minor *dminor = dev_to_drm_minor(kdev);
67 u32 rc6p_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6p); 87 u32 rc6p_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6p);
88 if (IS_VALLEYVIEW(dminor->dev))
89 rc6p_residency = 0;
68 return snprintf(buf, PAGE_SIZE, "%u\n", rc6p_residency); 90 return snprintf(buf, PAGE_SIZE, "%u\n", rc6p_residency);
69} 91}
70 92
71static ssize_t 93static ssize_t
72show_rc6pp_ms(struct device *kdev, struct device_attribute *attr, char *buf) 94show_rc6pp_ms(struct device *kdev, struct device_attribute *attr, char *buf)
73{ 95{
74 struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev); 96 struct drm_minor *dminor = dev_to_drm_minor(kdev);
75 u32 rc6pp_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6pp); 97 u32 rc6pp_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6pp);
98 if (IS_VALLEYVIEW(dminor->dev))
99 rc6pp_residency = 0;
76 return snprintf(buf, PAGE_SIZE, "%u\n", rc6pp_residency); 100 return snprintf(buf, PAGE_SIZE, "%u\n", rc6pp_residency);
77} 101}
78 102
@@ -97,7 +121,7 @@ static struct attribute_group rc6_attr_group = {
97 121
98static int l3_access_valid(struct drm_device *dev, loff_t offset) 122static int l3_access_valid(struct drm_device *dev, loff_t offset)
99{ 123{
100 if (!HAS_L3_GPU_CACHE(dev)) 124 if (!HAS_L3_DPF(dev))
101 return -EPERM; 125 return -EPERM;
102 126
103 if (offset % 4 != 0) 127 if (offset % 4 != 0)
@@ -115,31 +139,34 @@ i915_l3_read(struct file *filp, struct kobject *kobj,
115 loff_t offset, size_t count) 139 loff_t offset, size_t count)
116{ 140{
117 struct device *dev = container_of(kobj, struct device, kobj); 141 struct device *dev = container_of(kobj, struct device, kobj);
118 struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev); 142 struct drm_minor *dminor = dev_to_drm_minor(dev);
119 struct drm_device *drm_dev = dminor->dev; 143 struct drm_device *drm_dev = dminor->dev;
120 struct drm_i915_private *dev_priv = drm_dev->dev_private; 144 struct drm_i915_private *dev_priv = drm_dev->dev_private;
121 uint32_t misccpctl; 145 int slice = (int)(uintptr_t)attr->private;
122 int i, ret; 146 int ret;
147
148 count = round_down(count, 4);
123 149
124 ret = l3_access_valid(drm_dev, offset); 150 ret = l3_access_valid(drm_dev, offset);
125 if (ret) 151 if (ret)
126 return ret; 152 return ret;
127 153
154 count = min_t(size_t, GEN7_L3LOG_SIZE - offset, count);
155
128 ret = i915_mutex_lock_interruptible(drm_dev); 156 ret = i915_mutex_lock_interruptible(drm_dev);
129 if (ret) 157 if (ret)
130 return ret; 158 return ret;
131 159
132 misccpctl = I915_READ(GEN7_MISCCPCTL); 160 if (dev_priv->l3_parity.remap_info[slice])
133 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); 161 memcpy(buf,
134 162 dev_priv->l3_parity.remap_info[slice] + (offset/4),
135 for (i = offset; count >= 4 && i < GEN7_L3LOG_SIZE; i += 4, count -= 4) 163 count);
136 *((uint32_t *)(&buf[i])) = I915_READ(GEN7_L3LOG_BASE + i); 164 else
137 165 memset(buf, 0, count);
138 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
139 166
140 mutex_unlock(&drm_dev->struct_mutex); 167 mutex_unlock(&drm_dev->struct_mutex);
141 168
142 return i - offset; 169 return count;
143} 170}
144 171
145static ssize_t 172static ssize_t
@@ -148,21 +175,26 @@ i915_l3_write(struct file *filp, struct kobject *kobj,
148 loff_t offset, size_t count) 175 loff_t offset, size_t count)
149{ 176{
150 struct device *dev = container_of(kobj, struct device, kobj); 177 struct device *dev = container_of(kobj, struct device, kobj);
151 struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev); 178 struct drm_minor *dminor = dev_to_drm_minor(dev);
152 struct drm_device *drm_dev = dminor->dev; 179 struct drm_device *drm_dev = dminor->dev;
153 struct drm_i915_private *dev_priv = drm_dev->dev_private; 180 struct drm_i915_private *dev_priv = drm_dev->dev_private;
181 struct i915_hw_context *ctx;
154 u32 *temp = NULL; /* Just here to make handling failures easy */ 182 u32 *temp = NULL; /* Just here to make handling failures easy */
183 int slice = (int)(uintptr_t)attr->private;
155 int ret; 184 int ret;
156 185
157 ret = l3_access_valid(drm_dev, offset); 186 ret = l3_access_valid(drm_dev, offset);
158 if (ret) 187 if (ret)
159 return ret; 188 return ret;
160 189
190 if (dev_priv->hw_contexts_disabled)
191 return -ENXIO;
192
161 ret = i915_mutex_lock_interruptible(drm_dev); 193 ret = i915_mutex_lock_interruptible(drm_dev);
162 if (ret) 194 if (ret)
163 return ret; 195 return ret;
164 196
165 if (!dev_priv->l3_parity.remap_info) { 197 if (!dev_priv->l3_parity.remap_info[slice]) {
166 temp = kzalloc(GEN7_L3LOG_SIZE, GFP_KERNEL); 198 temp = kzalloc(GEN7_L3LOG_SIZE, GFP_KERNEL);
167 if (!temp) { 199 if (!temp) {
168 mutex_unlock(&drm_dev->struct_mutex); 200 mutex_unlock(&drm_dev->struct_mutex);
@@ -182,13 +214,13 @@ i915_l3_write(struct file *filp, struct kobject *kobj,
182 * at this point it is left as a TODO. 214 * at this point it is left as a TODO.
183 */ 215 */
184 if (temp) 216 if (temp)
185 dev_priv->l3_parity.remap_info = temp; 217 dev_priv->l3_parity.remap_info[slice] = temp;
186 218
187 memcpy(dev_priv->l3_parity.remap_info + (offset/4), 219 memcpy(dev_priv->l3_parity.remap_info[slice] + (offset/4), buf, count);
188 buf + (offset/4),
189 count);
190 220
191 i915_gem_l3_remap(drm_dev); 221 /* NB: We defer the remapping until we switch to the context */
222 list_for_each_entry(ctx, &dev_priv->context_list, link)
223 ctx->remap_slice |= (1<<slice);
192 224
193 mutex_unlock(&drm_dev->struct_mutex); 225 mutex_unlock(&drm_dev->struct_mutex);
194 226
@@ -200,17 +232,29 @@ static struct bin_attribute dpf_attrs = {
200 .size = GEN7_L3LOG_SIZE, 232 .size = GEN7_L3LOG_SIZE,
201 .read = i915_l3_read, 233 .read = i915_l3_read,
202 .write = i915_l3_write, 234 .write = i915_l3_write,
203 .mmap = NULL 235 .mmap = NULL,
236 .private = (void *)0
237};
238
239static struct bin_attribute dpf_attrs_1 = {
240 .attr = {.name = "l3_parity_slice_1", .mode = (S_IRUSR | S_IWUSR)},
241 .size = GEN7_L3LOG_SIZE,
242 .read = i915_l3_read,
243 .write = i915_l3_write,
244 .mmap = NULL,
245 .private = (void *)1
204}; 246};
205 247
206static ssize_t gt_cur_freq_mhz_show(struct device *kdev, 248static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
207 struct device_attribute *attr, char *buf) 249 struct device_attribute *attr, char *buf)
208{ 250{
209 struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev); 251 struct drm_minor *minor = dev_to_drm_minor(kdev);
210 struct drm_device *dev = minor->dev; 252 struct drm_device *dev = minor->dev;
211 struct drm_i915_private *dev_priv = dev->dev_private; 253 struct drm_i915_private *dev_priv = dev->dev_private;
212 int ret; 254 int ret;
213 255
256 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
257
214 mutex_lock(&dev_priv->rps.hw_lock); 258 mutex_lock(&dev_priv->rps.hw_lock);
215 if (IS_VALLEYVIEW(dev_priv->dev)) { 259 if (IS_VALLEYVIEW(dev_priv->dev)) {
216 u32 freq; 260 u32 freq;
@@ -227,7 +271,7 @@ static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
227static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev, 271static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev,
228 struct device_attribute *attr, char *buf) 272 struct device_attribute *attr, char *buf)
229{ 273{
230 struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev); 274 struct drm_minor *minor = dev_to_drm_minor(kdev);
231 struct drm_device *dev = minor->dev; 275 struct drm_device *dev = minor->dev;
232 struct drm_i915_private *dev_priv = dev->dev_private; 276 struct drm_i915_private *dev_priv = dev->dev_private;
233 277
@@ -238,11 +282,13 @@ static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev,
238 282
239static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf) 283static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
240{ 284{
241 struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev); 285 struct drm_minor *minor = dev_to_drm_minor(kdev);
242 struct drm_device *dev = minor->dev; 286 struct drm_device *dev = minor->dev;
243 struct drm_i915_private *dev_priv = dev->dev_private; 287 struct drm_i915_private *dev_priv = dev->dev_private;
244 int ret; 288 int ret;
245 289
290 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
291
246 mutex_lock(&dev_priv->rps.hw_lock); 292 mutex_lock(&dev_priv->rps.hw_lock);
247 if (IS_VALLEYVIEW(dev_priv->dev)) 293 if (IS_VALLEYVIEW(dev_priv->dev))
248 ret = vlv_gpu_freq(dev_priv->mem_freq, dev_priv->rps.max_delay); 294 ret = vlv_gpu_freq(dev_priv->mem_freq, dev_priv->rps.max_delay);
@@ -257,7 +303,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
257 struct device_attribute *attr, 303 struct device_attribute *attr,
258 const char *buf, size_t count) 304 const char *buf, size_t count)
259{ 305{
260 struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev); 306 struct drm_minor *minor = dev_to_drm_minor(kdev);
261 struct drm_device *dev = minor->dev; 307 struct drm_device *dev = minor->dev;
262 struct drm_i915_private *dev_priv = dev->dev_private; 308 struct drm_i915_private *dev_priv = dev->dev_private;
263 u32 val, rp_state_cap, hw_max, hw_min, non_oc_max; 309 u32 val, rp_state_cap, hw_max, hw_min, non_oc_max;
@@ -267,6 +313,8 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
267 if (ret) 313 if (ret)
268 return ret; 314 return ret;
269 315
316 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
317
270 mutex_lock(&dev_priv->rps.hw_lock); 318 mutex_lock(&dev_priv->rps.hw_lock);
271 319
272 if (IS_VALLEYVIEW(dev_priv->dev)) { 320 if (IS_VALLEYVIEW(dev_priv->dev)) {
@@ -310,11 +358,13 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
310 358
311static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf) 359static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
312{ 360{
313 struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev); 361 struct drm_minor *minor = dev_to_drm_minor(kdev);
314 struct drm_device *dev = minor->dev; 362 struct drm_device *dev = minor->dev;
315 struct drm_i915_private *dev_priv = dev->dev_private; 363 struct drm_i915_private *dev_priv = dev->dev_private;
316 int ret; 364 int ret;
317 365
366 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
367
318 mutex_lock(&dev_priv->rps.hw_lock); 368 mutex_lock(&dev_priv->rps.hw_lock);
319 if (IS_VALLEYVIEW(dev_priv->dev)) 369 if (IS_VALLEYVIEW(dev_priv->dev))
320 ret = vlv_gpu_freq(dev_priv->mem_freq, dev_priv->rps.min_delay); 370 ret = vlv_gpu_freq(dev_priv->mem_freq, dev_priv->rps.min_delay);
@@ -329,7 +379,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
329 struct device_attribute *attr, 379 struct device_attribute *attr,
330 const char *buf, size_t count) 380 const char *buf, size_t count)
331{ 381{
332 struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev); 382 struct drm_minor *minor = dev_to_drm_minor(kdev);
333 struct drm_device *dev = minor->dev; 383 struct drm_device *dev = minor->dev;
334 struct drm_i915_private *dev_priv = dev->dev_private; 384 struct drm_i915_private *dev_priv = dev->dev_private;
335 u32 val, rp_state_cap, hw_max, hw_min; 385 u32 val, rp_state_cap, hw_max, hw_min;
@@ -339,6 +389,8 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
339 if (ret) 389 if (ret)
340 return ret; 390 return ret;
341 391
392 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
393
342 mutex_lock(&dev_priv->rps.hw_lock); 394 mutex_lock(&dev_priv->rps.hw_lock);
343 395
344 if (IS_VALLEYVIEW(dev)) { 396 if (IS_VALLEYVIEW(dev)) {
@@ -388,7 +440,7 @@ static DEVICE_ATTR(gt_RPn_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
388/* For now we have a static number of RP states */ 440/* For now we have a static number of RP states */
389static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf) 441static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
390{ 442{
391 struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev); 443 struct drm_minor *minor = dev_to_drm_minor(kdev);
392 struct drm_device *dev = minor->dev; 444 struct drm_device *dev = minor->dev;
393 struct drm_i915_private *dev_priv = dev->dev_private; 445 struct drm_i915_private *dev_priv = dev->dev_private;
394 u32 val, rp_state_cap; 446 u32 val, rp_state_cap;
@@ -436,7 +488,7 @@ static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
436{ 488{
437 489
438 struct device *kdev = container_of(kobj, struct device, kobj); 490 struct device *kdev = container_of(kobj, struct device, kobj);
439 struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev); 491 struct drm_minor *minor = dev_to_drm_minor(kdev);
440 struct drm_device *dev = minor->dev; 492 struct drm_device *dev = minor->dev;
441 struct i915_error_state_file_priv error_priv; 493 struct i915_error_state_file_priv error_priv;
442 struct drm_i915_error_state_buf error_str; 494 struct drm_i915_error_state_buf error_str;
@@ -471,7 +523,7 @@ static ssize_t error_state_write(struct file *file, struct kobject *kobj,
471 loff_t off, size_t count) 523 loff_t off, size_t count)
472{ 524{
473 struct device *kdev = container_of(kobj, struct device, kobj); 525 struct device *kdev = container_of(kobj, struct device, kobj);
474 struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev); 526 struct drm_minor *minor = dev_to_drm_minor(kdev);
475 struct drm_device *dev = minor->dev; 527 struct drm_device *dev = minor->dev;
476 int ret; 528 int ret;
477 529
@@ -501,27 +553,34 @@ void i915_setup_sysfs(struct drm_device *dev)
501 553
502#ifdef CONFIG_PM 554#ifdef CONFIG_PM
503 if (INTEL_INFO(dev)->gen >= 6) { 555 if (INTEL_INFO(dev)->gen >= 6) {
504 ret = sysfs_merge_group(&dev->primary->kdev.kobj, 556 ret = sysfs_merge_group(&dev->primary->kdev->kobj,
505 &rc6_attr_group); 557 &rc6_attr_group);
506 if (ret) 558 if (ret)
507 DRM_ERROR("RC6 residency sysfs setup failed\n"); 559 DRM_ERROR("RC6 residency sysfs setup failed\n");
508 } 560 }
509#endif 561#endif
510 if (HAS_L3_GPU_CACHE(dev)) { 562 if (HAS_L3_DPF(dev)) {
511 ret = device_create_bin_file(&dev->primary->kdev, &dpf_attrs); 563 ret = device_create_bin_file(dev->primary->kdev, &dpf_attrs);
512 if (ret) 564 if (ret)
513 DRM_ERROR("l3 parity sysfs setup failed\n"); 565 DRM_ERROR("l3 parity sysfs setup failed\n");
566
567 if (NUM_L3_SLICES(dev) > 1) {
568 ret = device_create_bin_file(dev->primary->kdev,
569 &dpf_attrs_1);
570 if (ret)
571 DRM_ERROR("l3 parity slice 1 setup failed\n");
572 }
514 } 573 }
515 574
516 ret = 0; 575 ret = 0;
517 if (IS_VALLEYVIEW(dev)) 576 if (IS_VALLEYVIEW(dev))
518 ret = sysfs_create_files(&dev->primary->kdev.kobj, vlv_attrs); 577 ret = sysfs_create_files(&dev->primary->kdev->kobj, vlv_attrs);
519 else if (INTEL_INFO(dev)->gen >= 6) 578 else if (INTEL_INFO(dev)->gen >= 6)
520 ret = sysfs_create_files(&dev->primary->kdev.kobj, gen6_attrs); 579 ret = sysfs_create_files(&dev->primary->kdev->kobj, gen6_attrs);
521 if (ret) 580 if (ret)
522 DRM_ERROR("RPS sysfs setup failed\n"); 581 DRM_ERROR("RPS sysfs setup failed\n");
523 582
524 ret = sysfs_create_bin_file(&dev->primary->kdev.kobj, 583 ret = sysfs_create_bin_file(&dev->primary->kdev->kobj,
525 &error_state_attr); 584 &error_state_attr);
526 if (ret) 585 if (ret)
527 DRM_ERROR("error_state sysfs setup failed\n"); 586 DRM_ERROR("error_state sysfs setup failed\n");
@@ -529,13 +588,14 @@ void i915_setup_sysfs(struct drm_device *dev)
529 588
530void i915_teardown_sysfs(struct drm_device *dev) 589void i915_teardown_sysfs(struct drm_device *dev)
531{ 590{
532 sysfs_remove_bin_file(&dev->primary->kdev.kobj, &error_state_attr); 591 sysfs_remove_bin_file(&dev->primary->kdev->kobj, &error_state_attr);
533 if (IS_VALLEYVIEW(dev)) 592 if (IS_VALLEYVIEW(dev))
534 sysfs_remove_files(&dev->primary->kdev.kobj, vlv_attrs); 593 sysfs_remove_files(&dev->primary->kdev->kobj, vlv_attrs);
535 else 594 else
536 sysfs_remove_files(&dev->primary->kdev.kobj, gen6_attrs); 595 sysfs_remove_files(&dev->primary->kdev->kobj, gen6_attrs);
537 device_remove_bin_file(&dev->primary->kdev, &dpf_attrs); 596 device_remove_bin_file(dev->primary->kdev, &dpf_attrs_1);
597 device_remove_bin_file(dev->primary->kdev, &dpf_attrs);
538#ifdef CONFIG_PM 598#ifdef CONFIG_PM
539 sysfs_unmerge_group(&dev->primary->kdev.kobj, &rc6_attr_group); 599 sysfs_unmerge_group(&dev->primary->kdev->kobj, &rc6_attr_group);
540#endif 600#endif
541} 601}
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index e2c5ee6f6194..6e580c98dede 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -233,6 +233,47 @@ TRACE_EVENT(i915_gem_evict_everything,
233 TP_printk("dev=%d", __entry->dev) 233 TP_printk("dev=%d", __entry->dev)
234); 234);
235 235
236TRACE_EVENT(i915_gem_evict_vm,
237 TP_PROTO(struct i915_address_space *vm),
238 TP_ARGS(vm),
239
240 TP_STRUCT__entry(
241 __field(struct i915_address_space *, vm)
242 ),
243
244 TP_fast_assign(
245 __entry->vm = vm;
246 ),
247
248 TP_printk("dev=%d, vm=%p", __entry->vm->dev->primary->index, __entry->vm)
249);
250
251TRACE_EVENT(i915_gem_ring_sync_to,
252 TP_PROTO(struct intel_ring_buffer *from,
253 struct intel_ring_buffer *to,
254 u32 seqno),
255 TP_ARGS(from, to, seqno),
256
257 TP_STRUCT__entry(
258 __field(u32, dev)
259 __field(u32, sync_from)
260 __field(u32, sync_to)
261 __field(u32, seqno)
262 ),
263
264 TP_fast_assign(
265 __entry->dev = from->dev->primary->index;
266 __entry->sync_from = from->id;
267 __entry->sync_to = to->id;
268 __entry->seqno = seqno;
269 ),
270
271 TP_printk("dev=%u, sync-from=%u, sync-to=%u, seqno=%u",
272 __entry->dev,
273 __entry->sync_from, __entry->sync_to,
274 __entry->seqno)
275);
276
236TRACE_EVENT(i915_gem_ring_dispatch, 277TRACE_EVENT(i915_gem_ring_dispatch,
237 TP_PROTO(struct intel_ring_buffer *ring, u32 seqno, u32 flags), 278 TP_PROTO(struct intel_ring_buffer *ring, u32 seqno, u32 flags),
238 TP_ARGS(ring, seqno, flags), 279 TP_ARGS(ring, seqno, flags),
@@ -304,9 +345,24 @@ DEFINE_EVENT(i915_gem_request, i915_gem_request_add,
304 TP_ARGS(ring, seqno) 345 TP_ARGS(ring, seqno)
305); 346);
306 347
307DEFINE_EVENT(i915_gem_request, i915_gem_request_complete, 348TRACE_EVENT(i915_gem_request_complete,
308 TP_PROTO(struct intel_ring_buffer *ring, u32 seqno), 349 TP_PROTO(struct intel_ring_buffer *ring),
309 TP_ARGS(ring, seqno) 350 TP_ARGS(ring),
351
352 TP_STRUCT__entry(
353 __field(u32, dev)
354 __field(u32, ring)
355 __field(u32, seqno)
356 ),
357
358 TP_fast_assign(
359 __entry->dev = ring->dev->primary->index;
360 __entry->ring = ring->id;
361 __entry->seqno = ring->get_seqno(ring, false);
362 ),
363
364 TP_printk("dev=%u, ring=%u, seqno=%u",
365 __entry->dev, __entry->ring, __entry->seqno)
310); 366);
311 367
312DEFINE_EVENT(i915_gem_request, i915_gem_request_retire, 368DEFINE_EVENT(i915_gem_request, i915_gem_request_retire,
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 53f2bed8bc5f..6dd622d733b9 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -389,7 +389,7 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
389{ 389{
390 struct sdvo_device_mapping *p_mapping; 390 struct sdvo_device_mapping *p_mapping;
391 struct bdb_general_definitions *p_defs; 391 struct bdb_general_definitions *p_defs;
392 struct child_device_config *p_child; 392 union child_device_config *p_child;
393 int i, child_device_num, count; 393 int i, child_device_num, count;
394 u16 block_size; 394 u16 block_size;
395 395
@@ -416,36 +416,36 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
416 count = 0; 416 count = 0;
417 for (i = 0; i < child_device_num; i++) { 417 for (i = 0; i < child_device_num; i++) {
418 p_child = &(p_defs->devices[i]); 418 p_child = &(p_defs->devices[i]);
419 if (!p_child->device_type) { 419 if (!p_child->old.device_type) {
420 /* skip the device block if device type is invalid */ 420 /* skip the device block if device type is invalid */
421 continue; 421 continue;
422 } 422 }
423 if (p_child->slave_addr != SLAVE_ADDR1 && 423 if (p_child->old.slave_addr != SLAVE_ADDR1 &&
424 p_child->slave_addr != SLAVE_ADDR2) { 424 p_child->old.slave_addr != SLAVE_ADDR2) {
425 /* 425 /*
426 * If the slave address is neither 0x70 nor 0x72, 426 * If the slave address is neither 0x70 nor 0x72,
427 * it is not a SDVO device. Skip it. 427 * it is not a SDVO device. Skip it.
428 */ 428 */
429 continue; 429 continue;
430 } 430 }
431 if (p_child->dvo_port != DEVICE_PORT_DVOB && 431 if (p_child->old.dvo_port != DEVICE_PORT_DVOB &&
432 p_child->dvo_port != DEVICE_PORT_DVOC) { 432 p_child->old.dvo_port != DEVICE_PORT_DVOC) {
433 /* skip the incorrect SDVO port */ 433 /* skip the incorrect SDVO port */
434 DRM_DEBUG_KMS("Incorrect SDVO port. Skip it\n"); 434 DRM_DEBUG_KMS("Incorrect SDVO port. Skip it\n");
435 continue; 435 continue;
436 } 436 }
437 DRM_DEBUG_KMS("the SDVO device with slave addr %2x is found on" 437 DRM_DEBUG_KMS("the SDVO device with slave addr %2x is found on"
438 " %s port\n", 438 " %s port\n",
439 p_child->slave_addr, 439 p_child->old.slave_addr,
440 (p_child->dvo_port == DEVICE_PORT_DVOB) ? 440 (p_child->old.dvo_port == DEVICE_PORT_DVOB) ?
441 "SDVOB" : "SDVOC"); 441 "SDVOB" : "SDVOC");
442 p_mapping = &(dev_priv->sdvo_mappings[p_child->dvo_port - 1]); 442 p_mapping = &(dev_priv->sdvo_mappings[p_child->old.dvo_port - 1]);
443 if (!p_mapping->initialized) { 443 if (!p_mapping->initialized) {
444 p_mapping->dvo_port = p_child->dvo_port; 444 p_mapping->dvo_port = p_child->old.dvo_port;
445 p_mapping->slave_addr = p_child->slave_addr; 445 p_mapping->slave_addr = p_child->old.slave_addr;
446 p_mapping->dvo_wiring = p_child->dvo_wiring; 446 p_mapping->dvo_wiring = p_child->old.dvo_wiring;
447 p_mapping->ddc_pin = p_child->ddc_pin; 447 p_mapping->ddc_pin = p_child->old.ddc_pin;
448 p_mapping->i2c_pin = p_child->i2c_pin; 448 p_mapping->i2c_pin = p_child->old.i2c_pin;
449 p_mapping->initialized = 1; 449 p_mapping->initialized = 1;
450 DRM_DEBUG_KMS("SDVO device: dvo=%x, addr=%x, wiring=%d, ddc_pin=%d, i2c_pin=%d\n", 450 DRM_DEBUG_KMS("SDVO device: dvo=%x, addr=%x, wiring=%d, ddc_pin=%d, i2c_pin=%d\n",
451 p_mapping->dvo_port, 451 p_mapping->dvo_port,
@@ -457,7 +457,7 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
457 DRM_DEBUG_KMS("Maybe one SDVO port is shared by " 457 DRM_DEBUG_KMS("Maybe one SDVO port is shared by "
458 "two SDVO device.\n"); 458 "two SDVO device.\n");
459 } 459 }
460 if (p_child->slave2_addr) { 460 if (p_child->old.slave2_addr) {
461 /* Maybe this is a SDVO device with multiple inputs */ 461 /* Maybe this is a SDVO device with multiple inputs */
462 /* And the mapping info is not added */ 462 /* And the mapping info is not added */
463 DRM_DEBUG_KMS("there exists the slave2_addr. Maybe this" 463 DRM_DEBUG_KMS("there exists the slave2_addr. Maybe this"
@@ -477,15 +477,13 @@ static void
477parse_driver_features(struct drm_i915_private *dev_priv, 477parse_driver_features(struct drm_i915_private *dev_priv,
478 struct bdb_header *bdb) 478 struct bdb_header *bdb)
479{ 479{
480 struct drm_device *dev = dev_priv->dev;
481 struct bdb_driver_features *driver; 480 struct bdb_driver_features *driver;
482 481
483 driver = find_section(bdb, BDB_DRIVER_FEATURES); 482 driver = find_section(bdb, BDB_DRIVER_FEATURES);
484 if (!driver) 483 if (!driver)
485 return; 484 return;
486 485
487 if (SUPPORTS_EDP(dev) && 486 if (driver->lvds_config == BDB_DRIVER_FEATURE_EDP)
488 driver->lvds_config == BDB_DRIVER_FEATURE_EDP)
489 dev_priv->vbt.edp_support = 1; 487 dev_priv->vbt.edp_support = 1;
490 488
491 if (driver->dual_frequency) 489 if (driver->dual_frequency)
@@ -501,7 +499,7 @@ parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
501 499
502 edp = find_section(bdb, BDB_EDP); 500 edp = find_section(bdb, BDB_EDP);
503 if (!edp) { 501 if (!edp) {
504 if (SUPPORTS_EDP(dev_priv->dev) && dev_priv->vbt.edp_support) 502 if (dev_priv->vbt.edp_support)
505 DRM_DEBUG_KMS("No eDP BDB found but eDP panel supported.\n"); 503 DRM_DEBUG_KMS("No eDP BDB found but eDP panel supported.\n");
506 return; 504 return;
507 } 505 }
@@ -569,11 +567,149 @@ parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
569} 567}
570 568
571static void 569static void
570parse_mipi(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
571{
572 struct bdb_mipi *mipi;
573
574 mipi = find_section(bdb, BDB_MIPI);
575 if (!mipi) {
576 DRM_DEBUG_KMS("No MIPI BDB found");
577 return;
578 }
579
580 /* XXX: add more info */
581 dev_priv->vbt.dsi.panel_id = mipi->panel_id;
582}
583
584static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
585 struct bdb_header *bdb)
586{
587 union child_device_config *it, *child = NULL;
588 struct ddi_vbt_port_info *info = &dev_priv->vbt.ddi_port_info[port];
589 uint8_t hdmi_level_shift;
590 int i, j;
591 bool is_dvi, is_hdmi, is_dp, is_edp, is_crt;
592 uint8_t aux_channel;
593 /* Each DDI port can have more than one value on the "DVO Port" field,
594 * so look for all the possible values for each port and abort if more
595 * than one is found. */
596 int dvo_ports[][2] = {
597 {DVO_PORT_HDMIA, DVO_PORT_DPA},
598 {DVO_PORT_HDMIB, DVO_PORT_DPB},
599 {DVO_PORT_HDMIC, DVO_PORT_DPC},
600 {DVO_PORT_HDMID, DVO_PORT_DPD},
601 {DVO_PORT_CRT, -1 /* Port E can only be DVO_PORT_CRT */ },
602 };
603
604 /* Find the child device to use, abort if more than one found. */
605 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
606 it = dev_priv->vbt.child_dev + i;
607
608 for (j = 0; j < 2; j++) {
609 if (dvo_ports[port][j] == -1)
610 break;
611
612 if (it->common.dvo_port == dvo_ports[port][j]) {
613 if (child) {
614 DRM_DEBUG_KMS("More than one child device for port %c in VBT.\n",
615 port_name(port));
616 return;
617 }
618 child = it;
619 }
620 }
621 }
622 if (!child)
623 return;
624
625 aux_channel = child->raw[25];
626
627 is_dvi = child->common.device_type & DEVICE_TYPE_TMDS_DVI_SIGNALING;
628 is_dp = child->common.device_type & DEVICE_TYPE_DISPLAYPORT_OUTPUT;
629 is_crt = child->common.device_type & DEVICE_TYPE_ANALOG_OUTPUT;
630 is_hdmi = is_dvi && (child->common.device_type & DEVICE_TYPE_NOT_HDMI_OUTPUT) == 0;
631 is_edp = is_dp && (child->common.device_type & DEVICE_TYPE_INTERNAL_CONNECTOR);
632
633 info->supports_dvi = is_dvi;
634 info->supports_hdmi = is_hdmi;
635 info->supports_dp = is_dp;
636
637 DRM_DEBUG_KMS("Port %c VBT info: DP:%d HDMI:%d DVI:%d EDP:%d CRT:%d\n",
638 port_name(port), is_dp, is_hdmi, is_dvi, is_edp, is_crt);
639
640 if (is_edp && is_dvi)
641 DRM_DEBUG_KMS("Internal DP port %c is TMDS compatible\n",
642 port_name(port));
643 if (is_crt && port != PORT_E)
644 DRM_DEBUG_KMS("Port %c is analog\n", port_name(port));
645 if (is_crt && (is_dvi || is_dp))
646 DRM_DEBUG_KMS("Analog port %c is also DP or TMDS compatible\n",
647 port_name(port));
648 if (is_dvi && (port == PORT_A || port == PORT_E))
649 DRM_DEBUG_KMS("Port %c is TMDS compabile\n", port_name(port));
650 if (!is_dvi && !is_dp && !is_crt)
651 DRM_DEBUG_KMS("Port %c is not DP/TMDS/CRT compatible\n",
652 port_name(port));
653 if (is_edp && (port == PORT_B || port == PORT_C || port == PORT_E))
654 DRM_DEBUG_KMS("Port %c is internal DP\n", port_name(port));
655
656 if (is_dvi) {
657 if (child->common.ddc_pin == 0x05 && port != PORT_B)
658 DRM_DEBUG_KMS("Unexpected DDC pin for port B\n");
659 if (child->common.ddc_pin == 0x04 && port != PORT_C)
660 DRM_DEBUG_KMS("Unexpected DDC pin for port C\n");
661 if (child->common.ddc_pin == 0x06 && port != PORT_D)
662 DRM_DEBUG_KMS("Unexpected DDC pin for port D\n");
663 }
664
665 if (is_dp) {
666 if (aux_channel == 0x40 && port != PORT_A)
667 DRM_DEBUG_KMS("Unexpected AUX channel for port A\n");
668 if (aux_channel == 0x10 && port != PORT_B)
669 DRM_DEBUG_KMS("Unexpected AUX channel for port B\n");
670 if (aux_channel == 0x20 && port != PORT_C)
671 DRM_DEBUG_KMS("Unexpected AUX channel for port C\n");
672 if (aux_channel == 0x30 && port != PORT_D)
673 DRM_DEBUG_KMS("Unexpected AUX channel for port D\n");
674 }
675
676 if (bdb->version >= 158) {
677 /* The VBT HDMI level shift values match the table we have. */
678 hdmi_level_shift = child->raw[7] & 0xF;
679 if (hdmi_level_shift < 0xC) {
680 DRM_DEBUG_KMS("VBT HDMI level shift for port %c: %d\n",
681 port_name(port),
682 hdmi_level_shift);
683 info->hdmi_level_shift = hdmi_level_shift;
684 }
685 }
686}
687
688static void parse_ddi_ports(struct drm_i915_private *dev_priv,
689 struct bdb_header *bdb)
690{
691 struct drm_device *dev = dev_priv->dev;
692 enum port port;
693
694 if (!HAS_DDI(dev))
695 return;
696
697 if (!dev_priv->vbt.child_dev_num)
698 return;
699
700 if (bdb->version < 155)
701 return;
702
703 for (port = PORT_A; port < I915_MAX_PORTS; port++)
704 parse_ddi_port(dev_priv, port, bdb);
705}
706
707static void
572parse_device_mapping(struct drm_i915_private *dev_priv, 708parse_device_mapping(struct drm_i915_private *dev_priv,
573 struct bdb_header *bdb) 709 struct bdb_header *bdb)
574{ 710{
575 struct bdb_general_definitions *p_defs; 711 struct bdb_general_definitions *p_defs;
576 struct child_device_config *p_child, *child_dev_ptr; 712 union child_device_config *p_child, *child_dev_ptr;
577 int i, child_device_num, count; 713 int i, child_device_num, count;
578 u16 block_size; 714 u16 block_size;
579 715
@@ -601,7 +737,7 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
601 /* get the number of child device that is present */ 737 /* get the number of child device that is present */
602 for (i = 0; i < child_device_num; i++) { 738 for (i = 0; i < child_device_num; i++) {
603 p_child = &(p_defs->devices[i]); 739 p_child = &(p_defs->devices[i]);
604 if (!p_child->device_type) { 740 if (!p_child->common.device_type) {
605 /* skip the device block if device type is invalid */ 741 /* skip the device block if device type is invalid */
606 continue; 742 continue;
607 } 743 }
@@ -621,7 +757,7 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
621 count = 0; 757 count = 0;
622 for (i = 0; i < child_device_num; i++) { 758 for (i = 0; i < child_device_num; i++) {
623 p_child = &(p_defs->devices[i]); 759 p_child = &(p_defs->devices[i]);
624 if (!p_child->device_type) { 760 if (!p_child->common.device_type) {
625 /* skip the device block if device type is invalid */ 761 /* skip the device block if device type is invalid */
626 continue; 762 continue;
627 } 763 }
@@ -637,6 +773,7 @@ static void
637init_vbt_defaults(struct drm_i915_private *dev_priv) 773init_vbt_defaults(struct drm_i915_private *dev_priv)
638{ 774{
639 struct drm_device *dev = dev_priv->dev; 775 struct drm_device *dev = dev_priv->dev;
776 enum port port;
640 777
641 dev_priv->vbt.crt_ddc_pin = GMBUS_PORT_VGADDC; 778 dev_priv->vbt.crt_ddc_pin = GMBUS_PORT_VGADDC;
642 779
@@ -655,6 +792,18 @@ init_vbt_defaults(struct drm_i915_private *dev_priv)
655 dev_priv->vbt.lvds_use_ssc = 1; 792 dev_priv->vbt.lvds_use_ssc = 1;
656 dev_priv->vbt.lvds_ssc_freq = intel_bios_ssc_frequency(dev, 1); 793 dev_priv->vbt.lvds_ssc_freq = intel_bios_ssc_frequency(dev, 1);
657 DRM_DEBUG_KMS("Set default to SSC at %dMHz\n", dev_priv->vbt.lvds_ssc_freq); 794 DRM_DEBUG_KMS("Set default to SSC at %dMHz\n", dev_priv->vbt.lvds_ssc_freq);
795
796 for (port = PORT_A; port < I915_MAX_PORTS; port++) {
797 struct ddi_vbt_port_info *info =
798 &dev_priv->vbt.ddi_port_info[port];
799
800 /* Recommended BSpec default: 800mV 0dB. */
801 info->hdmi_level_shift = 6;
802
803 info->supports_dvi = (port != PORT_A && port != PORT_E);
804 info->supports_hdmi = info->supports_dvi;
805 info->supports_dp = (port != PORT_E);
806 }
658} 807}
659 808
660static int __init intel_no_opregion_vbt_callback(const struct dmi_system_id *id) 809static int __init intel_no_opregion_vbt_callback(const struct dmi_system_id *id)
@@ -745,6 +894,8 @@ intel_parse_bios(struct drm_device *dev)
745 parse_device_mapping(dev_priv, bdb); 894 parse_device_mapping(dev_priv, bdb);
746 parse_driver_features(dev_priv, bdb); 895 parse_driver_features(dev_priv, bdb);
747 parse_edp(dev_priv, bdb); 896 parse_edp(dev_priv, bdb);
897 parse_mipi(dev_priv, bdb);
898 parse_ddi_ports(dev_priv, bdb);
748 899
749 if (bios) 900 if (bios)
750 pci_unmap_rom(pdev, bios); 901 pci_unmap_rom(pdev, bios);
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
index e088d6f0956a..f580a2b0ddd3 100644
--- a/drivers/gpu/drm/i915/intel_bios.h
+++ b/drivers/gpu/drm/i915/intel_bios.h
@@ -104,6 +104,7 @@ struct vbios_data {
104#define BDB_LVDS_LFP_DATA 42 104#define BDB_LVDS_LFP_DATA 42
105#define BDB_LVDS_BACKLIGHT 43 105#define BDB_LVDS_BACKLIGHT 43
106#define BDB_LVDS_POWER 44 106#define BDB_LVDS_POWER 44
107#define BDB_MIPI 50
107#define BDB_SKIP 254 /* VBIOS private block, ignore */ 108#define BDB_SKIP 254 /* VBIOS private block, ignore */
108 109
109struct bdb_general_features { 110struct bdb_general_features {
@@ -201,7 +202,10 @@ struct bdb_general_features {
201#define DEVICE_PORT_DVOB 0x01 202#define DEVICE_PORT_DVOB 0x01
202#define DEVICE_PORT_DVOC 0x02 203#define DEVICE_PORT_DVOC 0x02
203 204
204struct child_device_config { 205/* We used to keep this struct but without any version control. We should avoid
206 * using it in the future, but it should be safe to keep using it in the old
207 * code. */
208struct old_child_dev_config {
205 u16 handle; 209 u16 handle;
206 u16 device_type; 210 u16 device_type;
207 u8 device_id[10]; /* ascii string */ 211 u8 device_id[10]; /* ascii string */
@@ -223,6 +227,32 @@ struct child_device_config {
223 u8 dvo_function; 227 u8 dvo_function;
224} __attribute__((packed)); 228} __attribute__((packed));
225 229
230/* This one contains field offsets that are known to be common for all BDB
231 * versions. Notice that the meaning of the contents contents may still change,
232 * but at least the offsets are consistent. */
233struct common_child_dev_config {
234 u16 handle;
235 u16 device_type;
236 u8 not_common1[12];
237 u8 dvo_port;
238 u8 not_common2[2];
239 u8 ddc_pin;
240 u16 edid_ptr;
241} __attribute__((packed));
242
243/* This field changes depending on the BDB version, so the most reliable way to
244 * read it is by checking the BDB version and reading the raw pointer. */
245union child_device_config {
246 /* This one is safe to be used anywhere, but the code should still check
247 * the BDB version. */
248 u8 raw[33];
249 /* This one should only be kept for legacy code. */
250 struct old_child_dev_config old;
251 /* This one should also be safe to use anywhere, even without version
252 * checks. */
253 struct common_child_dev_config common;
254};
255
226struct bdb_general_definitions { 256struct bdb_general_definitions {
227 /* DDC GPIO */ 257 /* DDC GPIO */
228 u8 crt_ddc_gmbus_pin; 258 u8 crt_ddc_gmbus_pin;
@@ -248,7 +278,7 @@ struct bdb_general_definitions {
248 * number = (block_size - sizeof(bdb_general_definitions))/ 278 * number = (block_size - sizeof(bdb_general_definitions))/
249 * sizeof(child_device_config); 279 * sizeof(child_device_config);
250 */ 280 */
251 struct child_device_config devices[0]; 281 union child_device_config devices[0];
252} __attribute__((packed)); 282} __attribute__((packed));
253 283
254struct bdb_lvds_options { 284struct bdb_lvds_options {
@@ -608,6 +638,40 @@ int intel_parse_bios(struct drm_device *dev);
608#define DEVICE_TYPE_DP 0x68C6 638#define DEVICE_TYPE_DP 0x68C6
609#define DEVICE_TYPE_eDP 0x78C6 639#define DEVICE_TYPE_eDP 0x78C6
610 640
641#define DEVICE_TYPE_CLASS_EXTENSION (1 << 15)
642#define DEVICE_TYPE_POWER_MANAGEMENT (1 << 14)
643#define DEVICE_TYPE_HOTPLUG_SIGNALING (1 << 13)
644#define DEVICE_TYPE_INTERNAL_CONNECTOR (1 << 12)
645#define DEVICE_TYPE_NOT_HDMI_OUTPUT (1 << 11)
646#define DEVICE_TYPE_MIPI_OUTPUT (1 << 10)
647#define DEVICE_TYPE_COMPOSITE_OUTPUT (1 << 9)
648#define DEVICE_TYPE_DUAL_CHANNEL (1 << 8)
649#define DEVICE_TYPE_HIGH_SPEED_LINK (1 << 6)
650#define DEVICE_TYPE_LVDS_SINGALING (1 << 5)
651#define DEVICE_TYPE_TMDS_DVI_SIGNALING (1 << 4)
652#define DEVICE_TYPE_VIDEO_SIGNALING (1 << 3)
653#define DEVICE_TYPE_DISPLAYPORT_OUTPUT (1 << 2)
654#define DEVICE_TYPE_DIGITAL_OUTPUT (1 << 1)
655#define DEVICE_TYPE_ANALOG_OUTPUT (1 << 0)
656
657/*
658 * Bits we care about when checking for DEVICE_TYPE_eDP
659 * Depending on the system, the other bits may or may not
660 * be set for eDP outputs.
661 */
662#define DEVICE_TYPE_eDP_BITS \
663 (DEVICE_TYPE_INTERNAL_CONNECTOR | \
664 DEVICE_TYPE_NOT_HDMI_OUTPUT | \
665 DEVICE_TYPE_MIPI_OUTPUT | \
666 DEVICE_TYPE_COMPOSITE_OUTPUT | \
667 DEVICE_TYPE_DUAL_CHANNEL | \
668 DEVICE_TYPE_LVDS_SINGALING | \
669 DEVICE_TYPE_TMDS_DVI_SIGNALING | \
670 DEVICE_TYPE_VIDEO_SIGNALING | \
671 DEVICE_TYPE_DISPLAYPORT_OUTPUT | \
672 DEVICE_TYPE_DIGITAL_OUTPUT | \
673 DEVICE_TYPE_ANALOG_OUTPUT)
674
611/* define the DVO port for HDMI output type */ 675/* define the DVO port for HDMI output type */
612#define DVO_B 1 676#define DVO_B 1
613#define DVO_C 2 677#define DVO_C 2
@@ -618,4 +682,57 @@ int intel_parse_bios(struct drm_device *dev);
618#define PORT_IDPC 8 682#define PORT_IDPC 8
619#define PORT_IDPD 9 683#define PORT_IDPD 9
620 684
685/* Possible values for the "DVO Port" field for versions >= 155: */
686#define DVO_PORT_HDMIA 0
687#define DVO_PORT_HDMIB 1
688#define DVO_PORT_HDMIC 2
689#define DVO_PORT_HDMID 3
690#define DVO_PORT_LVDS 4
691#define DVO_PORT_TV 5
692#define DVO_PORT_CRT 6
693#define DVO_PORT_DPB 7
694#define DVO_PORT_DPC 8
695#define DVO_PORT_DPD 9
696#define DVO_PORT_DPA 10
697
698/* MIPI DSI panel info */
699struct bdb_mipi {
700 u16 panel_id;
701 u16 bridge_revision;
702
703 /* General params */
704 u32 dithering:1;
705 u32 bpp_pixel_format:1;
706 u32 rsvd1:1;
707 u32 dphy_valid:1;
708 u32 resvd2:28;
709
710 u16 port_info;
711 u16 rsvd3:2;
712 u16 num_lanes:2;
713 u16 rsvd4:12;
714
715 /* DSI config */
716 u16 virt_ch_num:2;
717 u16 vtm:2;
718 u16 rsvd5:12;
719
720 u32 dsi_clock;
721 u32 bridge_ref_clk;
722 u16 rsvd_pwr;
723
724 /* Dphy Params */
725 u32 prepare_cnt:5;
726 u32 rsvd6:3;
727 u32 clk_zero_cnt:8;
728 u32 trail_cnt:5;
729 u32 rsvd7:3;
730 u32 exit_zero_cnt:6;
731 u32 rsvd8:2;
732
733 u32 hl_switch_cnt;
734 u32 lp_byte_clk;
735 u32 clk_lane_switch_cnt;
736} __attribute__((packed));
737
621#endif /* _I830_BIOS_H_ */ 738#endif /* _I830_BIOS_H_ */
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 10d1de5bce6f..b5b1b9b23adf 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -107,7 +107,17 @@ static unsigned int intel_crt_get_flags(struct intel_encoder *encoder)
107static void intel_crt_get_config(struct intel_encoder *encoder, 107static void intel_crt_get_config(struct intel_encoder *encoder,
108 struct intel_crtc_config *pipe_config) 108 struct intel_crtc_config *pipe_config)
109{ 109{
110 struct drm_device *dev = encoder->base.dev;
111 int dotclock;
112
110 pipe_config->adjusted_mode.flags |= intel_crt_get_flags(encoder); 113 pipe_config->adjusted_mode.flags |= intel_crt_get_flags(encoder);
114
115 dotclock = pipe_config->port_clock;
116
117 if (HAS_PCH_SPLIT(dev))
118 ironlake_check_encoder_dotclock(pipe_config, dotclock);
119
120 pipe_config->adjusted_mode.crtc_clock = dotclock;
111} 121}
112 122
113static void hsw_crt_get_config(struct intel_encoder *encoder, 123static void hsw_crt_get_config(struct intel_encoder *encoder,
@@ -264,7 +274,7 @@ static void intel_crt_mode_set(struct intel_encoder *encoder)
264 struct drm_display_mode *adjusted_mode = &crtc->config.adjusted_mode; 274 struct drm_display_mode *adjusted_mode = &crtc->config.adjusted_mode;
265 u32 adpa; 275 u32 adpa;
266 276
267 if (HAS_PCH_SPLIT(dev)) 277 if (INTEL_INFO(dev)->gen >= 5)
268 adpa = ADPA_HOTPLUG_BITS; 278 adpa = ADPA_HOTPLUG_BITS;
269 else 279 else
270 adpa = 0; 280 adpa = 0;
@@ -366,9 +376,6 @@ static bool valleyview_crt_detect_hotplug(struct drm_connector *connector)
366 376
367 DRM_DEBUG_KMS("valleyview hotplug adpa=0x%x, result %d\n", adpa, ret); 377 DRM_DEBUG_KMS("valleyview hotplug adpa=0x%x, result %d\n", adpa, ret);
368 378
369 /* FIXME: debug force function and remove */
370 ret = true;
371
372 return ret; 379 return ret;
373} 380}
374 381
@@ -670,7 +677,6 @@ intel_crt_detect(struct drm_connector *connector, bool force)
670 677
671static void intel_crt_destroy(struct drm_connector *connector) 678static void intel_crt_destroy(struct drm_connector *connector)
672{ 679{
673 drm_sysfs_connector_remove(connector);
674 drm_connector_cleanup(connector); 680 drm_connector_cleanup(connector);
675 kfree(connector); 681 kfree(connector);
676} 682}
@@ -776,7 +782,7 @@ void intel_crt_init(struct drm_device *dev)
776 if (!crt) 782 if (!crt)
777 return; 783 return;
778 784
779 intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); 785 intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
780 if (!intel_connector) { 786 if (!intel_connector) {
781 kfree(crt); 787 kfree(crt);
782 return; 788 return;
@@ -816,16 +822,15 @@ void intel_crt_init(struct drm_device *dev)
816 crt->base.mode_set = intel_crt_mode_set; 822 crt->base.mode_set = intel_crt_mode_set;
817 crt->base.disable = intel_disable_crt; 823 crt->base.disable = intel_disable_crt;
818 crt->base.enable = intel_enable_crt; 824 crt->base.enable = intel_enable_crt;
819 if (IS_HASWELL(dev))
820 crt->base.get_config = hsw_crt_get_config;
821 else
822 crt->base.get_config = intel_crt_get_config;
823 if (I915_HAS_HOTPLUG(dev)) 825 if (I915_HAS_HOTPLUG(dev))
824 crt->base.hpd_pin = HPD_CRT; 826 crt->base.hpd_pin = HPD_CRT;
825 if (HAS_DDI(dev)) 827 if (HAS_DDI(dev)) {
828 crt->base.get_config = hsw_crt_get_config;
826 crt->base.get_hw_state = intel_ddi_get_hw_state; 829 crt->base.get_hw_state = intel_ddi_get_hw_state;
827 else 830 } else {
831 crt->base.get_config = intel_crt_get_config;
828 crt->base.get_hw_state = intel_crt_get_hw_state; 832 crt->base.get_hw_state = intel_crt_get_hw_state;
833 }
829 intel_connector->get_hw_state = intel_connector_get_hw_state; 834 intel_connector->get_hw_state = intel_connector_get_hw_state;
830 835
831 drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs); 836 drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index b53fff84a7d5..1591576a6101 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -42,7 +42,6 @@ static const u32 hsw_ddi_translations_dp[] = {
42 0x80C30FFF, 0x000B0000, 42 0x80C30FFF, 0x000B0000,
43 0x00FFFFFF, 0x00040006, 43 0x00FFFFFF, 0x00040006,
44 0x80D75FFF, 0x000B0000, 44 0x80D75FFF, 0x000B0000,
45 0x00FFFFFF, 0x00040006 /* HDMI parameters */
46}; 45};
47 46
48static const u32 hsw_ddi_translations_fdi[] = { 47static const u32 hsw_ddi_translations_fdi[] = {
@@ -55,10 +54,64 @@ static const u32 hsw_ddi_translations_fdi[] = {
55 0x00C30FFF, 0x001E0000, 54 0x00C30FFF, 0x001E0000,
56 0x00FFFFFF, 0x00060006, 55 0x00FFFFFF, 0x00060006,
57 0x00D75FFF, 0x001E0000, 56 0x00D75FFF, 0x001E0000,
58 0x00FFFFFF, 0x00040006 /* HDMI parameters */
59}; 57};
60 58
61static enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder) 59static const u32 hsw_ddi_translations_hdmi[] = {
60 /* Idx NT mV diff T mV diff db */
61 0x00FFFFFF, 0x0006000E, /* 0: 400 400 0 */
62 0x00E79FFF, 0x000E000C, /* 1: 400 500 2 */
63 0x00D75FFF, 0x0005000A, /* 2: 400 600 3.5 */
64 0x00FFFFFF, 0x0005000A, /* 3: 600 600 0 */
65 0x00E79FFF, 0x001D0007, /* 4: 600 750 2 */
66 0x00D75FFF, 0x000C0004, /* 5: 600 900 3.5 */
67 0x00FFFFFF, 0x00040006, /* 6: 800 800 0 */
68 0x80E79FFF, 0x00030002, /* 7: 800 1000 2 */
69 0x00FFFFFF, 0x00140005, /* 8: 850 850 0 */
70 0x00FFFFFF, 0x000C0004, /* 9: 900 900 0 */
71 0x00FFFFFF, 0x001C0003, /* 10: 950 950 0 */
72 0x80FFFFFF, 0x00030002, /* 11: 1000 1000 0 */
73};
74
75static const u32 bdw_ddi_translations_edp[] = {
76 0x00FFFFFF, 0x00000012, /* DP parameters */
77 0x00EBAFFF, 0x00020011,
78 0x00C71FFF, 0x0006000F,
79 0x00FFFFFF, 0x00020011,
80 0x00DB6FFF, 0x0005000F,
81 0x00BEEFFF, 0x000A000C,
82 0x00FFFFFF, 0x0005000F,
83 0x00DB6FFF, 0x000A000C,
84 0x00FFFFFF, 0x000A000C,
85 0x00FFFFFF, 0x00140006 /* HDMI parameters 800mV 0dB*/
86};
87
88static const u32 bdw_ddi_translations_dp[] = {
89 0x00FFFFFF, 0x0007000E, /* DP parameters */
90 0x00D75FFF, 0x000E000A,
91 0x00BEFFFF, 0x00140006,
92 0x00FFFFFF, 0x000E000A,
93 0x00D75FFF, 0x00180004,
94 0x80CB2FFF, 0x001B0002,
95 0x00F7DFFF, 0x00180004,
96 0x80D75FFF, 0x001B0002,
97 0x80FFFFFF, 0x001B0002,
98 0x00FFFFFF, 0x00140006 /* HDMI parameters 800mV 0dB*/
99};
100
101static const u32 bdw_ddi_translations_fdi[] = {
102 0x00FFFFFF, 0x0001000E, /* FDI parameters */
103 0x00D75FFF, 0x0004000A,
104 0x00C30FFF, 0x00070006,
105 0x00AAAFFF, 0x000C0000,
106 0x00FFFFFF, 0x0004000A,
107 0x00D75FFF, 0x00090004,
108 0x00C30FFF, 0x000C0000,
109 0x00FFFFFF, 0x00070006,
110 0x00D75FFF, 0x000C0000,
111 0x00FFFFFF, 0x00140006 /* HDMI parameters 800mV 0dB*/
112};
113
114enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder)
62{ 115{
63 struct drm_encoder *encoder = &intel_encoder->base; 116 struct drm_encoder *encoder = &intel_encoder->base;
64 int type = intel_encoder->type; 117 int type = intel_encoder->type;
@@ -78,8 +131,9 @@ static enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder)
78 } 131 }
79} 132}
80 133
81/* On Haswell, DDI port buffers must be programmed with correct values 134/*
82 * in advance. The buffer values are different for FDI and DP modes, 135 * Starting with Haswell, DDI port buffers must be programmed with correct
136 * values in advance. The buffer values are different for FDI and DP modes,
83 * but the HDMI/DVI fields are shared among those. So we program the DDI 137 * but the HDMI/DVI fields are shared among those. So we program the DDI
84 * in either FDI or DP modes only, as HDMI connections will work with both 138 * in either FDI or DP modes only, as HDMI connections will work with both
85 * of those 139 * of those
@@ -89,15 +143,58 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port)
89 struct drm_i915_private *dev_priv = dev->dev_private; 143 struct drm_i915_private *dev_priv = dev->dev_private;
90 u32 reg; 144 u32 reg;
91 int i; 145 int i;
92 const u32 *ddi_translations = (port == PORT_E) ? 146 int hdmi_level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift;
93 hsw_ddi_translations_fdi : 147 const u32 *ddi_translations_fdi;
94 hsw_ddi_translations_dp; 148 const u32 *ddi_translations_dp;
149 const u32 *ddi_translations_edp;
150 const u32 *ddi_translations;
151
152 if (IS_BROADWELL(dev)) {
153 ddi_translations_fdi = bdw_ddi_translations_fdi;
154 ddi_translations_dp = bdw_ddi_translations_dp;
155 ddi_translations_edp = bdw_ddi_translations_edp;
156 } else if (IS_HASWELL(dev)) {
157 ddi_translations_fdi = hsw_ddi_translations_fdi;
158 ddi_translations_dp = hsw_ddi_translations_dp;
159 ddi_translations_edp = hsw_ddi_translations_dp;
160 } else {
161 WARN(1, "ddi translation table missing\n");
162 ddi_translations_edp = bdw_ddi_translations_dp;
163 ddi_translations_fdi = bdw_ddi_translations_fdi;
164 ddi_translations_dp = bdw_ddi_translations_dp;
165 }
166
167 switch (port) {
168 case PORT_A:
169 ddi_translations = ddi_translations_edp;
170 break;
171 case PORT_B:
172 case PORT_C:
173 ddi_translations = ddi_translations_dp;
174 break;
175 case PORT_D:
176 if (intel_dpd_is_edp(dev))
177 ddi_translations = ddi_translations_edp;
178 else
179 ddi_translations = ddi_translations_dp;
180 break;
181 case PORT_E:
182 ddi_translations = ddi_translations_fdi;
183 break;
184 default:
185 BUG();
186 }
95 187
96 for (i = 0, reg = DDI_BUF_TRANS(port); 188 for (i = 0, reg = DDI_BUF_TRANS(port);
97 i < ARRAY_SIZE(hsw_ddi_translations_fdi); i++) { 189 i < ARRAY_SIZE(hsw_ddi_translations_fdi); i++) {
98 I915_WRITE(reg, ddi_translations[i]); 190 I915_WRITE(reg, ddi_translations[i]);
99 reg += 4; 191 reg += 4;
100 } 192 }
193 /* Entry 9 is for HDMI: */
194 for (i = 0; i < 2; i++) {
195 I915_WRITE(reg, hsw_ddi_translations_hdmi[hdmi_level * 2 + i]);
196 reg += 4;
197 }
101} 198}
102 199
103/* Program DDI buffers translations for DP. By default, program ports A-D in DP 200/* Program DDI buffers translations for DP. By default, program ports A-D in DP
@@ -296,9 +393,6 @@ static void intel_ddi_mode_set(struct intel_encoder *encoder)
296 DRM_DEBUG_DRIVER("DP audio: write eld information\n"); 393 DRM_DEBUG_DRIVER("DP audio: write eld information\n");
297 intel_write_eld(&encoder->base, adjusted_mode); 394 intel_write_eld(&encoder->base, adjusted_mode);
298 } 395 }
299
300 intel_dp_init_link_config(intel_dp);
301
302 } else if (type == INTEL_OUTPUT_HDMI) { 396 } else if (type == INTEL_OUTPUT_HDMI) {
303 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); 397 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
304 398
@@ -739,7 +833,8 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
739 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 833 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
740 struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc); 834 struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
741 struct drm_encoder *encoder = &intel_encoder->base; 835 struct drm_encoder *encoder = &intel_encoder->base;
742 struct drm_i915_private *dev_priv = crtc->dev->dev_private; 836 struct drm_device *dev = crtc->dev;
837 struct drm_i915_private *dev_priv = dev->dev_private;
743 enum pipe pipe = intel_crtc->pipe; 838 enum pipe pipe = intel_crtc->pipe;
744 enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder; 839 enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
745 enum port port = intel_ddi_get_encoder_port(intel_encoder); 840 enum port port = intel_ddi_get_encoder_port(intel_encoder);
@@ -767,18 +862,19 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
767 BUG(); 862 BUG();
768 } 863 }
769 864
770 if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC) 865 if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_PVSYNC)
771 temp |= TRANS_DDI_PVSYNC; 866 temp |= TRANS_DDI_PVSYNC;
772 if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC) 867 if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_PHSYNC)
773 temp |= TRANS_DDI_PHSYNC; 868 temp |= TRANS_DDI_PHSYNC;
774 869
775 if (cpu_transcoder == TRANSCODER_EDP) { 870 if (cpu_transcoder == TRANSCODER_EDP) {
776 switch (pipe) { 871 switch (pipe) {
777 case PIPE_A: 872 case PIPE_A:
778 /* Can only use the always-on power well for eDP when 873 /* On Haswell, can only use the always-on power well for
779 * not using the panel fitter, and when not using motion 874 * eDP when not using the panel fitter, and when not
780 * blur mitigation (which we don't support). */ 875 * using motion blur mitigation (which we don't
781 if (intel_crtc->config.pch_pfit.enabled) 876 * support). */
877 if (IS_HASWELL(dev) && intel_crtc->config.pch_pfit.enabled)
782 temp |= TRANS_DDI_EDP_INPUT_A_ONOFF; 878 temp |= TRANS_DDI_EDP_INPUT_A_ONOFF;
783 else 879 else
784 temp |= TRANS_DDI_EDP_INPUT_A_ON; 880 temp |= TRANS_DDI_EDP_INPUT_A_ON;
@@ -1139,18 +1235,29 @@ static void intel_disable_ddi(struct intel_encoder *intel_encoder)
1139 1235
1140int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv) 1236int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv)
1141{ 1237{
1238 struct drm_device *dev = dev_priv->dev;
1142 uint32_t lcpll = I915_READ(LCPLL_CTL); 1239 uint32_t lcpll = I915_READ(LCPLL_CTL);
1240 uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK;
1143 1241
1144 if (lcpll & LCPLL_CD_SOURCE_FCLK) 1242 if (lcpll & LCPLL_CD_SOURCE_FCLK) {
1145 return 800000; 1243 return 800000;
1146 else if (I915_READ(HSW_FUSE_STRAP) & HSW_CDCLK_LIMIT) 1244 } else if (I915_READ(HSW_FUSE_STRAP) & HSW_CDCLK_LIMIT) {
1147 return 450000; 1245 return 450000;
1148 else if ((lcpll & LCPLL_CLK_FREQ_MASK) == LCPLL_CLK_FREQ_450) 1246 } else if (freq == LCPLL_CLK_FREQ_450) {
1149 return 450000; 1247 return 450000;
1150 else if (IS_ULT(dev_priv->dev)) 1248 } else if (IS_HASWELL(dev)) {
1151 return 337500; 1249 if (IS_ULT(dev))
1152 else 1250 return 337500;
1153 return 540000; 1251 else
1252 return 540000;
1253 } else {
1254 if (freq == LCPLL_CLK_FREQ_54O_BDW)
1255 return 540000;
1256 else if (freq == LCPLL_CLK_FREQ_337_5_BDW)
1257 return 337500;
1258 else
1259 return 675000;
1260 }
1154} 1261}
1155 1262
1156void intel_ddi_pll_init(struct drm_device *dev) 1263void intel_ddi_pll_init(struct drm_device *dev)
@@ -1202,7 +1309,7 @@ void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder)
1202 1309
1203 val = DP_TP_CTL_ENABLE | DP_TP_CTL_MODE_SST | 1310 val = DP_TP_CTL_ENABLE | DP_TP_CTL_MODE_SST |
1204 DP_TP_CTL_LINK_TRAIN_PAT1 | DP_TP_CTL_SCRAMBLE_DISABLE; 1311 DP_TP_CTL_LINK_TRAIN_PAT1 | DP_TP_CTL_SCRAMBLE_DISABLE;
1205 if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN) 1312 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1206 val |= DP_TP_CTL_ENHANCED_FRAME_ENABLE; 1313 val |= DP_TP_CTL_ENHANCED_FRAME_ENABLE;
1207 I915_WRITE(DP_TP_CTL(port), val); 1314 I915_WRITE(DP_TP_CTL(port), val);
1208 POSTING_READ(DP_TP_CTL(port)); 1315 POSTING_READ(DP_TP_CTL(port));
@@ -1285,6 +1392,20 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
1285 default: 1392 default:
1286 break; 1393 break;
1287 } 1394 }
1395
1396 switch (temp & TRANS_DDI_MODE_SELECT_MASK) {
1397 case TRANS_DDI_MODE_SELECT_HDMI:
1398 case TRANS_DDI_MODE_SELECT_DVI:
1399 case TRANS_DDI_MODE_SELECT_FDI:
1400 break;
1401 case TRANS_DDI_MODE_SELECT_DP_SST:
1402 case TRANS_DDI_MODE_SELECT_DP_MST:
1403 pipe_config->has_dp_encoder = true;
1404 intel_dp_get_m_n(intel_crtc, pipe_config);
1405 break;
1406 default:
1407 break;
1408 }
1288} 1409}
1289 1410
1290static void intel_ddi_destroy(struct drm_encoder *encoder) 1411static void intel_ddi_destroy(struct drm_encoder *encoder)
@@ -1314,6 +1435,41 @@ static const struct drm_encoder_funcs intel_ddi_funcs = {
1314 .destroy = intel_ddi_destroy, 1435 .destroy = intel_ddi_destroy,
1315}; 1436};
1316 1437
1438static struct intel_connector *
1439intel_ddi_init_dp_connector(struct intel_digital_port *intel_dig_port)
1440{
1441 struct intel_connector *connector;
1442 enum port port = intel_dig_port->port;
1443
1444 connector = kzalloc(sizeof(*connector), GFP_KERNEL);
1445 if (!connector)
1446 return NULL;
1447
1448 intel_dig_port->dp.output_reg = DDI_BUF_CTL(port);
1449 if (!intel_dp_init_connector(intel_dig_port, connector)) {
1450 kfree(connector);
1451 return NULL;
1452 }
1453
1454 return connector;
1455}
1456
1457static struct intel_connector *
1458intel_ddi_init_hdmi_connector(struct intel_digital_port *intel_dig_port)
1459{
1460 struct intel_connector *connector;
1461 enum port port = intel_dig_port->port;
1462
1463 connector = kzalloc(sizeof(*connector), GFP_KERNEL);
1464 if (!connector)
1465 return NULL;
1466
1467 intel_dig_port->hdmi.hdmi_reg = DDI_BUF_CTL(port);
1468 intel_hdmi_init_connector(intel_dig_port, connector);
1469
1470 return connector;
1471}
1472
1317void intel_ddi_init(struct drm_device *dev, enum port port) 1473void intel_ddi_init(struct drm_device *dev, enum port port)
1318{ 1474{
1319 struct drm_i915_private *dev_priv = dev->dev_private; 1475 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1322,17 +1478,22 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
1322 struct drm_encoder *encoder; 1478 struct drm_encoder *encoder;
1323 struct intel_connector *hdmi_connector = NULL; 1479 struct intel_connector *hdmi_connector = NULL;
1324 struct intel_connector *dp_connector = NULL; 1480 struct intel_connector *dp_connector = NULL;
1481 bool init_hdmi, init_dp;
1482
1483 init_hdmi = (dev_priv->vbt.ddi_port_info[port].supports_dvi ||
1484 dev_priv->vbt.ddi_port_info[port].supports_hdmi);
1485 init_dp = dev_priv->vbt.ddi_port_info[port].supports_dp;
1486 if (!init_dp && !init_hdmi) {
1487 DRM_DEBUG_KMS("VBT says port %c is not DVI/HDMI/DP compatible\n",
1488 port_name(port));
1489 init_hdmi = true;
1490 init_dp = true;
1491 }
1325 1492
1326 intel_dig_port = kzalloc(sizeof(struct intel_digital_port), GFP_KERNEL); 1493 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
1327 if (!intel_dig_port) 1494 if (!intel_dig_port)
1328 return; 1495 return;
1329 1496
1330 dp_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
1331 if (!dp_connector) {
1332 kfree(intel_dig_port);
1333 return;
1334 }
1335
1336 intel_encoder = &intel_dig_port->base; 1497 intel_encoder = &intel_dig_port->base;
1337 encoder = &intel_encoder->base; 1498 encoder = &intel_encoder->base;
1338 1499
@@ -1352,28 +1513,22 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
1352 intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) & 1513 intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) &
1353 (DDI_BUF_PORT_REVERSAL | 1514 (DDI_BUF_PORT_REVERSAL |
1354 DDI_A_4_LANES); 1515 DDI_A_4_LANES);
1355 intel_dig_port->dp.output_reg = DDI_BUF_CTL(port);
1356 1516
1357 intel_encoder->type = INTEL_OUTPUT_UNKNOWN; 1517 intel_encoder->type = INTEL_OUTPUT_UNKNOWN;
1358 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); 1518 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
1359 intel_encoder->cloneable = false; 1519 intel_encoder->cloneable = false;
1360 intel_encoder->hot_plug = intel_ddi_hot_plug; 1520 intel_encoder->hot_plug = intel_ddi_hot_plug;
1361 1521
1362 if (!intel_dp_init_connector(intel_dig_port, dp_connector)) { 1522 if (init_dp)
1363 drm_encoder_cleanup(encoder); 1523 dp_connector = intel_ddi_init_dp_connector(intel_dig_port);
1364 kfree(intel_dig_port);
1365 kfree(dp_connector);
1366 return;
1367 }
1368 1524
1369 if (intel_encoder->type != INTEL_OUTPUT_EDP) { 1525 /* In theory we don't need the encoder->type check, but leave it just in
1370 hdmi_connector = kzalloc(sizeof(struct intel_connector), 1526 * case we have some really bad VBTs... */
1371 GFP_KERNEL); 1527 if (intel_encoder->type != INTEL_OUTPUT_EDP && init_hdmi)
1372 if (!hdmi_connector) { 1528 hdmi_connector = intel_ddi_init_hdmi_connector(intel_dig_port);
1373 return;
1374 }
1375 1529
1376 intel_dig_port->hdmi.hdmi_reg = DDI_BUF_CTL(port); 1530 if (!dp_connector && !hdmi_connector) {
1377 intel_hdmi_init_connector(intel_dig_port, hdmi_connector); 1531 drm_encoder_cleanup(encoder);
1532 kfree(intel_dig_port);
1378 } 1533 }
1379} 1534}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index d78d33f9337d..3cddd508d110 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -41,14 +41,13 @@
41#include <drm/drm_crtc_helper.h> 41#include <drm/drm_crtc_helper.h>
42#include <linux/dma_remapping.h> 42#include <linux/dma_remapping.h>
43 43
44bool intel_pipe_has_type(struct drm_crtc *crtc, int type);
45static void intel_increase_pllclock(struct drm_crtc *crtc); 44static void intel_increase_pllclock(struct drm_crtc *crtc);
46static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on); 45static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
47 46
48static void i9xx_crtc_clock_get(struct intel_crtc *crtc, 47static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
49 struct intel_crtc_config *pipe_config); 48 struct intel_crtc_config *pipe_config);
50static void ironlake_crtc_clock_get(struct intel_crtc *crtc, 49static void ironlake_pch_clock_get(struct intel_crtc *crtc,
51 struct intel_crtc_config *pipe_config); 50 struct intel_crtc_config *pipe_config);
52 51
53static int intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode, 52static int intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode,
54 int x, int y, struct drm_framebuffer *old_fb); 53 int x, int y, struct drm_framebuffer *old_fb);
@@ -69,9 +68,6 @@ struct intel_limit {
69 intel_p2_t p2; 68 intel_p2_t p2;
70}; 69};
71 70
72/* FDI */
73#define IRONLAKE_FDI_FREQ 2700000 /* in kHz for mode->clock */
74
75int 71int
76intel_pch_rawclk(struct drm_device *dev) 72intel_pch_rawclk(struct drm_device *dev)
77{ 73{
@@ -313,44 +309,44 @@ static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
313 .p2_slow = 7, .p2_fast = 7 }, 309 .p2_slow = 7, .p2_fast = 7 },
314}; 310};
315 311
316static const intel_limit_t intel_limits_vlv_dac = { 312static const intel_limit_t intel_limits_vlv = {
317 .dot = { .min = 25000, .max = 270000 }, 313 /*
318 .vco = { .min = 4000000, .max = 6000000 }, 314 * These are the data rate limits (measured in fast clocks)
319 .n = { .min = 1, .max = 7 }, 315 * since those are the strictest limits we have. The fast
320 .m = { .min = 22, .max = 450 }, /* guess */ 316 * clock and actual rate limits are more relaxed, so checking
321 .m1 = { .min = 2, .max = 3 }, 317 * them would make no difference.
322 .m2 = { .min = 11, .max = 156 }, 318 */
323 .p = { .min = 10, .max = 30 }, 319 .dot = { .min = 25000 * 5, .max = 270000 * 5 },
324 .p1 = { .min = 1, .max = 3 },
325 .p2 = { .dot_limit = 270000,
326 .p2_slow = 2, .p2_fast = 20 },
327};
328
329static const intel_limit_t intel_limits_vlv_hdmi = {
330 .dot = { .min = 25000, .max = 270000 },
331 .vco = { .min = 4000000, .max = 6000000 }, 320 .vco = { .min = 4000000, .max = 6000000 },
332 .n = { .min = 1, .max = 7 }, 321 .n = { .min = 1, .max = 7 },
333 .m = { .min = 60, .max = 300 }, /* guess */
334 .m1 = { .min = 2, .max = 3 }, 322 .m1 = { .min = 2, .max = 3 },
335 .m2 = { .min = 11, .max = 156 }, 323 .m2 = { .min = 11, .max = 156 },
336 .p = { .min = 10, .max = 30 },
337 .p1 = { .min = 2, .max = 3 }, 324 .p1 = { .min = 2, .max = 3 },
338 .p2 = { .dot_limit = 270000, 325 .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
339 .p2_slow = 2, .p2_fast = 20 },
340}; 326};
341 327
342static const intel_limit_t intel_limits_vlv_dp = { 328static void vlv_clock(int refclk, intel_clock_t *clock)
343 .dot = { .min = 25000, .max = 270000 }, 329{
344 .vco = { .min = 4000000, .max = 6000000 }, 330 clock->m = clock->m1 * clock->m2;
345 .n = { .min = 1, .max = 7 }, 331 clock->p = clock->p1 * clock->p2;
346 .m = { .min = 22, .max = 450 }, 332 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
347 .m1 = { .min = 2, .max = 3 }, 333 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
348 .m2 = { .min = 11, .max = 156 }, 334}
349 .p = { .min = 10, .max = 30 }, 335
350 .p1 = { .min = 1, .max = 3 }, 336/**
351 .p2 = { .dot_limit = 270000, 337 * Returns whether any output on the specified pipe is of the specified type
352 .p2_slow = 2, .p2_fast = 20 }, 338 */
353}; 339static bool intel_pipe_has_type(struct drm_crtc *crtc, int type)
340{
341 struct drm_device *dev = crtc->dev;
342 struct intel_encoder *encoder;
343
344 for_each_encoder_on_crtc(dev, crtc, encoder)
345 if (encoder->type == type)
346 return true;
347
348 return false;
349}
354 350
355static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc, 351static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
356 int refclk) 352 int refclk)
@@ -412,12 +408,7 @@ static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk)
412 else 408 else
413 limit = &intel_limits_pineview_sdvo; 409 limit = &intel_limits_pineview_sdvo;
414 } else if (IS_VALLEYVIEW(dev)) { 410 } else if (IS_VALLEYVIEW(dev)) {
415 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) 411 limit = &intel_limits_vlv;
416 limit = &intel_limits_vlv_dac;
417 else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI))
418 limit = &intel_limits_vlv_hdmi;
419 else
420 limit = &intel_limits_vlv_dp;
421 } else if (!IS_GEN2(dev)) { 412 } else if (!IS_GEN2(dev)) {
422 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) 413 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
423 limit = &intel_limits_i9xx_lvds; 414 limit = &intel_limits_i9xx_lvds;
@@ -439,8 +430,8 @@ static void pineview_clock(int refclk, intel_clock_t *clock)
439{ 430{
440 clock->m = clock->m2 + 2; 431 clock->m = clock->m2 + 2;
441 clock->p = clock->p1 * clock->p2; 432 clock->p = clock->p1 * clock->p2;
442 clock->vco = refclk * clock->m / clock->n; 433 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
443 clock->dot = clock->vco / clock->p; 434 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
444} 435}
445 436
446static uint32_t i9xx_dpll_compute_m(struct dpll *dpll) 437static uint32_t i9xx_dpll_compute_m(struct dpll *dpll)
@@ -452,23 +443,8 @@ static void i9xx_clock(int refclk, intel_clock_t *clock)
452{ 443{
453 clock->m = i9xx_dpll_compute_m(clock); 444 clock->m = i9xx_dpll_compute_m(clock);
454 clock->p = clock->p1 * clock->p2; 445 clock->p = clock->p1 * clock->p2;
455 clock->vco = refclk * clock->m / (clock->n + 2); 446 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
456 clock->dot = clock->vco / clock->p; 447 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
457}
458
459/**
460 * Returns whether any output on the specified pipe is of the specified type
461 */
462bool intel_pipe_has_type(struct drm_crtc *crtc, int type)
463{
464 struct drm_device *dev = crtc->dev;
465 struct intel_encoder *encoder;
466
467 for_each_encoder_on_crtc(dev, crtc, encoder)
468 if (encoder->type == type)
469 return true;
470
471 return false;
472} 448}
473 449
474#define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0) 450#define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0)
@@ -481,20 +457,26 @@ static bool intel_PLL_is_valid(struct drm_device *dev,
481 const intel_limit_t *limit, 457 const intel_limit_t *limit,
482 const intel_clock_t *clock) 458 const intel_clock_t *clock)
483{ 459{
460 if (clock->n < limit->n.min || limit->n.max < clock->n)
461 INTELPllInvalid("n out of range\n");
484 if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1) 462 if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
485 INTELPllInvalid("p1 out of range\n"); 463 INTELPllInvalid("p1 out of range\n");
486 if (clock->p < limit->p.min || limit->p.max < clock->p)
487 INTELPllInvalid("p out of range\n");
488 if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2) 464 if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
489 INTELPllInvalid("m2 out of range\n"); 465 INTELPllInvalid("m2 out of range\n");
490 if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1) 466 if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
491 INTELPllInvalid("m1 out of range\n"); 467 INTELPllInvalid("m1 out of range\n");
492 if (clock->m1 <= clock->m2 && !IS_PINEVIEW(dev)) 468
493 INTELPllInvalid("m1 <= m2\n"); 469 if (!IS_PINEVIEW(dev) && !IS_VALLEYVIEW(dev))
494 if (clock->m < limit->m.min || limit->m.max < clock->m) 470 if (clock->m1 <= clock->m2)
495 INTELPllInvalid("m out of range\n"); 471 INTELPllInvalid("m1 <= m2\n");
496 if (clock->n < limit->n.min || limit->n.max < clock->n) 472
497 INTELPllInvalid("n out of range\n"); 473 if (!IS_VALLEYVIEW(dev)) {
474 if (clock->p < limit->p.min || limit->p.max < clock->p)
475 INTELPllInvalid("p out of range\n");
476 if (clock->m < limit->m.min || limit->m.max < clock->m)
477 INTELPllInvalid("m out of range\n");
478 }
479
498 if (clock->vco < limit->vco.min || limit->vco.max < clock->vco) 480 if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
499 INTELPllInvalid("vco out of range\n"); 481 INTELPllInvalid("vco out of range\n");
500 /* XXX: We may need to be checking "Dot clock" depending on the multiplier, 482 /* XXX: We may need to be checking "Dot clock" depending on the multiplier,
@@ -688,67 +670,73 @@ vlv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
688 int target, int refclk, intel_clock_t *match_clock, 670 int target, int refclk, intel_clock_t *match_clock,
689 intel_clock_t *best_clock) 671 intel_clock_t *best_clock)
690{ 672{
691 u32 p1, p2, m1, m2, vco, bestn, bestm1, bestm2, bestp1, bestp2; 673 struct drm_device *dev = crtc->dev;
692 u32 m, n, fastclk; 674 intel_clock_t clock;
693 u32 updrate, minupdate, p; 675 unsigned int bestppm = 1000000;
694 unsigned long bestppm, ppm, absppm; 676 /* min update 19.2 MHz */
695 int dotclk, flag; 677 int max_n = min(limit->n.max, refclk / 19200);
696 678 bool found = false;
697 flag = 0; 679
698 dotclk = target * 1000; 680 target *= 5; /* fast clock */
699 bestppm = 1000000; 681
700 ppm = absppm = 0; 682 memset(best_clock, 0, sizeof(*best_clock));
701 fastclk = dotclk / (2*100);
702 updrate = 0;
703 minupdate = 19200;
704 n = p = p1 = p2 = m = m1 = m2 = vco = bestn = 0;
705 bestm1 = bestm2 = bestp1 = bestp2 = 0;
706 683
707 /* based on hardware requirement, prefer smaller n to precision */ 684 /* based on hardware requirement, prefer smaller n to precision */
708 for (n = limit->n.min; n <= ((refclk) / minupdate); n++) { 685 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
709 updrate = refclk / n; 686 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
710 for (p1 = limit->p1.max; p1 > limit->p1.min; p1--) { 687 for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
711 for (p2 = limit->p2.p2_fast+1; p2 > 0; p2--) { 688 clock.p2 -= clock.p2 > 10 ? 2 : 1) {
712 if (p2 > 10) 689 clock.p = clock.p1 * clock.p2;
713 p2 = p2 - 1;
714 p = p1 * p2;
715 /* based on hardware requirement, prefer bigger m1,m2 values */ 690 /* based on hardware requirement, prefer bigger m1,m2 values */
716 for (m1 = limit->m1.min; m1 <= limit->m1.max; m1++) { 691 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
717 m2 = (((2*(fastclk * p * n / m1 )) + 692 unsigned int ppm, diff;
718 refclk) / (2*refclk)); 693
719 m = m1 * m2; 694 clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
720 vco = updrate * m; 695 refclk * clock.m1);
721 if (vco >= limit->vco.min && vco < limit->vco.max) { 696
722 ppm = 1000000 * ((vco / p) - fastclk) / fastclk; 697 vlv_clock(refclk, &clock);
723 absppm = (ppm > 0) ? ppm : (-ppm); 698
724 if (absppm < 100 && ((p1 * p2) > (bestp1 * bestp2))) { 699 if (!intel_PLL_is_valid(dev, limit,
725 bestppm = 0; 700 &clock))
726 flag = 1; 701 continue;
727 } 702
728 if (absppm < bestppm - 10) { 703 diff = abs(clock.dot - target);
729 bestppm = absppm; 704 ppm = div_u64(1000000ULL * diff, target);
730 flag = 1; 705
731 } 706 if (ppm < 100 && clock.p > best_clock->p) {
732 if (flag) { 707 bestppm = 0;
733 bestn = n; 708 *best_clock = clock;
734 bestm1 = m1; 709 found = true;
735 bestm2 = m2; 710 }
736 bestp1 = p1; 711
737 bestp2 = p2; 712 if (bestppm >= 10 && ppm < bestppm - 10) {
738 flag = 0; 713 bestppm = ppm;
739 } 714 *best_clock = clock;
715 found = true;
740 } 716 }
741 } 717 }
742 } 718 }
743 } 719 }
744 } 720 }
745 best_clock->n = bestn;
746 best_clock->m1 = bestm1;
747 best_clock->m2 = bestm2;
748 best_clock->p1 = bestp1;
749 best_clock->p2 = bestp2;
750 721
751 return true; 722 return found;
723}
724
725bool intel_crtc_active(struct drm_crtc *crtc)
726{
727 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
728
729 /* Be paranoid as we can arrive here with only partial
730 * state retrieved from the hardware during setup.
731 *
732 * We can ditch the adjusted_mode.crtc_clock check as soon
733 * as Haswell has gained clock readout/fastboot support.
734 *
735 * We can ditch the crtc->fb check as soon as we can
736 * properly reconstruct framebuffers.
737 */
738 return intel_crtc->active && crtc->fb &&
739 intel_crtc->config.adjusted_mode.crtc_clock;
752} 740}
753 741
754enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv, 742enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
@@ -812,6 +800,25 @@ void intel_wait_for_vblank(struct drm_device *dev, int pipe)
812 DRM_DEBUG_KMS("vblank wait timed out\n"); 800 DRM_DEBUG_KMS("vblank wait timed out\n");
813} 801}
814 802
803static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe)
804{
805 struct drm_i915_private *dev_priv = dev->dev_private;
806 u32 reg = PIPEDSL(pipe);
807 u32 line1, line2;
808 u32 line_mask;
809
810 if (IS_GEN2(dev))
811 line_mask = DSL_LINEMASK_GEN2;
812 else
813 line_mask = DSL_LINEMASK_GEN3;
814
815 line1 = I915_READ(reg) & line_mask;
816 mdelay(5);
817 line2 = I915_READ(reg) & line_mask;
818
819 return line1 == line2;
820}
821
815/* 822/*
816 * intel_wait_for_pipe_off - wait for pipe to turn off 823 * intel_wait_for_pipe_off - wait for pipe to turn off
817 * @dev: drm device 824 * @dev: drm device
@@ -843,22 +850,8 @@ void intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
843 100)) 850 100))
844 WARN(1, "pipe_off wait timed out\n"); 851 WARN(1, "pipe_off wait timed out\n");
845 } else { 852 } else {
846 u32 last_line, line_mask;
847 int reg = PIPEDSL(pipe);
848 unsigned long timeout = jiffies + msecs_to_jiffies(100);
849
850 if (IS_GEN2(dev))
851 line_mask = DSL_LINEMASK_GEN2;
852 else
853 line_mask = DSL_LINEMASK_GEN3;
854
855 /* Wait for the display line to settle */ 853 /* Wait for the display line to settle */
856 do { 854 if (wait_for(pipe_dsl_stopped(dev, pipe), 100))
857 last_line = I915_READ(reg) & line_mask;
858 mdelay(5);
859 } while (((I915_READ(reg) & line_mask) != last_line) &&
860 time_after(timeout, jiffies));
861 if (time_after(jiffies, timeout))
862 WARN(1, "pipe_off wait timed out\n"); 855 WARN(1, "pipe_off wait timed out\n");
863 } 856 }
864} 857}
@@ -929,6 +922,24 @@ void assert_pll(struct drm_i915_private *dev_priv,
929 state_string(state), state_string(cur_state)); 922 state_string(state), state_string(cur_state));
930} 923}
931 924
925/* XXX: the dsi pll is shared between MIPI DSI ports */
926static void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
927{
928 u32 val;
929 bool cur_state;
930
931 mutex_lock(&dev_priv->dpio_lock);
932 val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
933 mutex_unlock(&dev_priv->dpio_lock);
934
935 cur_state = val & DSI_PLL_VCO_EN;
936 WARN(cur_state != state,
937 "DSI PLL state assertion failure (expected %s, current %s)\n",
938 state_string(state), state_string(cur_state));
939}
940#define assert_dsi_pll_enabled(d) assert_dsi_pll(d, true)
941#define assert_dsi_pll_disabled(d) assert_dsi_pll(d, false)
942
932struct intel_shared_dpll * 943struct intel_shared_dpll *
933intel_crtc_to_shared_dpll(struct intel_crtc *crtc) 944intel_crtc_to_shared_dpll(struct intel_crtc *crtc)
934{ 945{
@@ -1069,6 +1080,26 @@ static void assert_panel_unlocked(struct drm_i915_private *dev_priv,
1069 pipe_name(pipe)); 1080 pipe_name(pipe));
1070} 1081}
1071 1082
1083static void assert_cursor(struct drm_i915_private *dev_priv,
1084 enum pipe pipe, bool state)
1085{
1086 struct drm_device *dev = dev_priv->dev;
1087 bool cur_state;
1088
1089 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
1090 cur_state = I915_READ(CURCNTR_IVB(pipe)) & CURSOR_MODE;
1091 else if (IS_845G(dev) || IS_I865G(dev))
1092 cur_state = I915_READ(_CURACNTR) & CURSOR_ENABLE;
1093 else
1094 cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
1095
1096 WARN(cur_state != state,
1097 "cursor on pipe %c assertion failure (expected %s, current %s)\n",
1098 pipe_name(pipe), state_string(state), state_string(cur_state));
1099}
1100#define assert_cursor_enabled(d, p) assert_cursor(d, p, true)
1101#define assert_cursor_disabled(d, p) assert_cursor(d, p, false)
1102
1072void assert_pipe(struct drm_i915_private *dev_priv, 1103void assert_pipe(struct drm_i915_private *dev_priv,
1073 enum pipe pipe, bool state) 1104 enum pipe pipe, bool state)
1074{ 1105{
@@ -1323,6 +1354,26 @@ static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1323 assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID); 1354 assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID);
1324} 1355}
1325 1356
1357static void intel_init_dpio(struct drm_device *dev)
1358{
1359 struct drm_i915_private *dev_priv = dev->dev_private;
1360
1361 if (!IS_VALLEYVIEW(dev))
1362 return;
1363
1364 /*
1365 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
1366 * 6. De-assert cmn_reset/side_reset. Same as VLV X0.
1367 * a. GUnit 0x2110 bit[0] set to 1 (def 0)
1368 * b. The other bits such as sfr settings / modesel may all be set
1369 * to 0.
1370 *
1371 * This should only be done on init and resume from S3 with both
1372 * PLLs disabled, or we risk losing DPIO and PLL synchronization.
1373 */
1374 I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
1375}
1376
1326static void vlv_enable_pll(struct intel_crtc *crtc) 1377static void vlv_enable_pll(struct intel_crtc *crtc)
1327{ 1378{
1328 struct drm_device *dev = crtc->base.dev; 1379 struct drm_device *dev = crtc->base.dev;
@@ -1429,6 +1480,20 @@ static void i9xx_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1429 POSTING_READ(DPLL(pipe)); 1480 POSTING_READ(DPLL(pipe));
1430} 1481}
1431 1482
1483static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1484{
1485 u32 val = 0;
1486
1487 /* Make sure the pipe isn't still relying on us */
1488 assert_pipe_disabled(dev_priv, pipe);
1489
1490 /* Leave integrated clock source enabled */
1491 if (pipe == PIPE_B)
1492 val = DPLL_INTEGRATED_CRI_CLK_VLV;
1493 I915_WRITE(DPLL(pipe), val);
1494 POSTING_READ(DPLL(pipe));
1495}
1496
1432void vlv_wait_port_ready(struct drm_i915_private *dev_priv, int port) 1497void vlv_wait_port_ready(struct drm_i915_private *dev_priv, int port)
1433{ 1498{
1434 u32 port_mask; 1499 u32 port_mask;
@@ -1661,7 +1726,7 @@ static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
1661 * returning. 1726 * returning.
1662 */ 1727 */
1663static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, 1728static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
1664 bool pch_port) 1729 bool pch_port, bool dsi)
1665{ 1730{
1666 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, 1731 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1667 pipe); 1732 pipe);
@@ -1670,6 +1735,7 @@ static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
1670 u32 val; 1735 u32 val;
1671 1736
1672 assert_planes_disabled(dev_priv, pipe); 1737 assert_planes_disabled(dev_priv, pipe);
1738 assert_cursor_disabled(dev_priv, pipe);
1673 assert_sprites_disabled(dev_priv, pipe); 1739 assert_sprites_disabled(dev_priv, pipe);
1674 1740
1675 if (HAS_PCH_LPT(dev_priv->dev)) 1741 if (HAS_PCH_LPT(dev_priv->dev))
@@ -1683,7 +1749,10 @@ static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
1683 * need the check. 1749 * need the check.
1684 */ 1750 */
1685 if (!HAS_PCH_SPLIT(dev_priv->dev)) 1751 if (!HAS_PCH_SPLIT(dev_priv->dev))
1686 assert_pll_enabled(dev_priv, pipe); 1752 if (dsi)
1753 assert_dsi_pll_enabled(dev_priv);
1754 else
1755 assert_pll_enabled(dev_priv, pipe);
1687 else { 1756 else {
1688 if (pch_port) { 1757 if (pch_port) {
1689 /* if driving the PCH, we need FDI enabled */ 1758 /* if driving the PCH, we need FDI enabled */
@@ -1728,6 +1797,7 @@ static void intel_disable_pipe(struct drm_i915_private *dev_priv,
1728 * or we might hang the display. 1797 * or we might hang the display.
1729 */ 1798 */
1730 assert_planes_disabled(dev_priv, pipe); 1799 assert_planes_disabled(dev_priv, pipe);
1800 assert_cursor_disabled(dev_priv, pipe);
1731 assert_sprites_disabled(dev_priv, pipe); 1801 assert_sprites_disabled(dev_priv, pipe);
1732 1802
1733 /* Don't disable pipe A or pipe A PLLs if needed */ 1803 /* Don't disable pipe A or pipe A PLLs if needed */
@@ -1747,63 +1817,75 @@ static void intel_disable_pipe(struct drm_i915_private *dev_priv,
1747 * Plane regs are double buffered, going from enabled->disabled needs a 1817 * Plane regs are double buffered, going from enabled->disabled needs a
1748 * trigger in order to latch. The display address reg provides this. 1818 * trigger in order to latch. The display address reg provides this.
1749 */ 1819 */
1750void intel_flush_display_plane(struct drm_i915_private *dev_priv, 1820void intel_flush_primary_plane(struct drm_i915_private *dev_priv,
1751 enum plane plane) 1821 enum plane plane)
1752{ 1822{
1753 if (dev_priv->info->gen >= 4) 1823 u32 reg = dev_priv->info->gen >= 4 ? DSPSURF(plane) : DSPADDR(plane);
1754 I915_WRITE(DSPSURF(plane), I915_READ(DSPSURF(plane))); 1824
1755 else 1825 I915_WRITE(reg, I915_READ(reg));
1756 I915_WRITE(DSPADDR(plane), I915_READ(DSPADDR(plane))); 1826 POSTING_READ(reg);
1757} 1827}
1758 1828
1759/** 1829/**
1760 * intel_enable_plane - enable a display plane on a given pipe 1830 * intel_enable_primary_plane - enable the primary plane on a given pipe
1761 * @dev_priv: i915 private structure 1831 * @dev_priv: i915 private structure
1762 * @plane: plane to enable 1832 * @plane: plane to enable
1763 * @pipe: pipe being fed 1833 * @pipe: pipe being fed
1764 * 1834 *
1765 * Enable @plane on @pipe, making sure that @pipe is running first. 1835 * Enable @plane on @pipe, making sure that @pipe is running first.
1766 */ 1836 */
1767static void intel_enable_plane(struct drm_i915_private *dev_priv, 1837static void intel_enable_primary_plane(struct drm_i915_private *dev_priv,
1768 enum plane plane, enum pipe pipe) 1838 enum plane plane, enum pipe pipe)
1769{ 1839{
1840 struct intel_crtc *intel_crtc =
1841 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
1770 int reg; 1842 int reg;
1771 u32 val; 1843 u32 val;
1772 1844
1773 /* If the pipe isn't enabled, we can't pump pixels and may hang */ 1845 /* If the pipe isn't enabled, we can't pump pixels and may hang */
1774 assert_pipe_enabled(dev_priv, pipe); 1846 assert_pipe_enabled(dev_priv, pipe);
1775 1847
1848 WARN(intel_crtc->primary_enabled, "Primary plane already enabled\n");
1849
1850 intel_crtc->primary_enabled = true;
1851
1776 reg = DSPCNTR(plane); 1852 reg = DSPCNTR(plane);
1777 val = I915_READ(reg); 1853 val = I915_READ(reg);
1778 if (val & DISPLAY_PLANE_ENABLE) 1854 if (val & DISPLAY_PLANE_ENABLE)
1779 return; 1855 return;
1780 1856
1781 I915_WRITE(reg, val | DISPLAY_PLANE_ENABLE); 1857 I915_WRITE(reg, val | DISPLAY_PLANE_ENABLE);
1782 intel_flush_display_plane(dev_priv, plane); 1858 intel_flush_primary_plane(dev_priv, plane);
1783 intel_wait_for_vblank(dev_priv->dev, pipe); 1859 intel_wait_for_vblank(dev_priv->dev, pipe);
1784} 1860}
1785 1861
1786/** 1862/**
1787 * intel_disable_plane - disable a display plane 1863 * intel_disable_primary_plane - disable the primary plane
1788 * @dev_priv: i915 private structure 1864 * @dev_priv: i915 private structure
1789 * @plane: plane to disable 1865 * @plane: plane to disable
1790 * @pipe: pipe consuming the data 1866 * @pipe: pipe consuming the data
1791 * 1867 *
1792 * Disable @plane; should be an independent operation. 1868 * Disable @plane; should be an independent operation.
1793 */ 1869 */
1794static void intel_disable_plane(struct drm_i915_private *dev_priv, 1870static void intel_disable_primary_plane(struct drm_i915_private *dev_priv,
1795 enum plane plane, enum pipe pipe) 1871 enum plane plane, enum pipe pipe)
1796{ 1872{
1873 struct intel_crtc *intel_crtc =
1874 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
1797 int reg; 1875 int reg;
1798 u32 val; 1876 u32 val;
1799 1877
1878 WARN(!intel_crtc->primary_enabled, "Primary plane already disabled\n");
1879
1880 intel_crtc->primary_enabled = false;
1881
1800 reg = DSPCNTR(plane); 1882 reg = DSPCNTR(plane);
1801 val = I915_READ(reg); 1883 val = I915_READ(reg);
1802 if ((val & DISPLAY_PLANE_ENABLE) == 0) 1884 if ((val & DISPLAY_PLANE_ENABLE) == 0)
1803 return; 1885 return;
1804 1886
1805 I915_WRITE(reg, val & ~DISPLAY_PLANE_ENABLE); 1887 I915_WRITE(reg, val & ~DISPLAY_PLANE_ENABLE);
1806 intel_flush_display_plane(dev_priv, plane); 1888 intel_flush_primary_plane(dev_priv, plane);
1807 intel_wait_for_vblank(dev_priv->dev, pipe); 1889 intel_wait_for_vblank(dev_priv->dev, pipe);
1808} 1890}
1809 1891
@@ -1839,10 +1921,7 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev,
1839 alignment = 0; 1921 alignment = 0;
1840 break; 1922 break;
1841 case I915_TILING_Y: 1923 case I915_TILING_Y:
1842 /* Despite that we check this in framebuffer_init userspace can 1924 WARN(1, "Y tiled bo slipped through, driver bug!\n");
1843 * screw us over and change the tiling after the fact. Only
1844 * pinned buffers can't change their tiling. */
1845 DRM_DEBUG_DRIVER("Y tiled not allowed for scan out buffers\n");
1846 return -EINVAL; 1925 return -EINVAL;
1847 default: 1926 default:
1848 BUG(); 1927 BUG();
@@ -2077,7 +2156,7 @@ static int ironlake_update_plane(struct drm_crtc *crtc,
2077 else 2156 else
2078 dspcntr &= ~DISPPLANE_TILED; 2157 dspcntr &= ~DISPPLANE_TILED;
2079 2158
2080 if (IS_HASWELL(dev)) 2159 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2081 dspcntr &= ~DISPPLANE_TRICKLE_FEED_DISABLE; 2160 dspcntr &= ~DISPPLANE_TRICKLE_FEED_DISABLE;
2082 else 2161 else
2083 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; 2162 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
@@ -2097,7 +2176,7 @@ static int ironlake_update_plane(struct drm_crtc *crtc,
2097 I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]); 2176 I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
2098 I915_MODIFY_DISPBASE(DSPSURF(plane), 2177 I915_MODIFY_DISPBASE(DSPSURF(plane),
2099 i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset); 2178 i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
2100 if (IS_HASWELL(dev)) { 2179 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2101 I915_WRITE(DSPOFFSET(plane), (y << 16) | x); 2180 I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
2102 } else { 2181 } else {
2103 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x); 2182 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
@@ -2244,11 +2323,26 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
2244 return ret; 2323 return ret;
2245 } 2324 }
2246 2325
2247 /* Update pipe size and adjust fitter if needed */ 2326 /*
2327 * Update pipe size and adjust fitter if needed: the reason for this is
2328 * that in compute_mode_changes we check the native mode (not the pfit
2329 * mode) to see if we can flip rather than do a full mode set. In the
2330 * fastboot case, we'll flip, but if we don't update the pipesrc and
2331 * pfit state, we'll end up with a big fb scanned out into the wrong
2332 * sized surface.
2333 *
2334 * To fix this properly, we need to hoist the checks up into
2335 * compute_mode_changes (or above), check the actual pfit state and
2336 * whether the platform allows pfit disable with pipe active, and only
2337 * then update the pipesrc and pfit state, even on the flip path.
2338 */
2248 if (i915_fastboot) { 2339 if (i915_fastboot) {
2340 const struct drm_display_mode *adjusted_mode =
2341 &intel_crtc->config.adjusted_mode;
2342
2249 I915_WRITE(PIPESRC(intel_crtc->pipe), 2343 I915_WRITE(PIPESRC(intel_crtc->pipe),
2250 ((crtc->mode.hdisplay - 1) << 16) | 2344 ((adjusted_mode->crtc_hdisplay - 1) << 16) |
2251 (crtc->mode.vdisplay - 1)); 2345 (adjusted_mode->crtc_vdisplay - 1));
2252 if (!intel_crtc->config.pch_pfit.enabled && 2346 if (!intel_crtc->config.pch_pfit.enabled &&
2253 (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) || 2347 (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) ||
2254 intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) { 2348 intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
@@ -2873,6 +2967,7 @@ static void lpt_program_iclkip(struct drm_crtc *crtc)
2873{ 2967{
2874 struct drm_device *dev = crtc->dev; 2968 struct drm_device *dev = crtc->dev;
2875 struct drm_i915_private *dev_priv = dev->dev_private; 2969 struct drm_i915_private *dev_priv = dev->dev_private;
2970 int clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock;
2876 u32 divsel, phaseinc, auxdiv, phasedir = 0; 2971 u32 divsel, phaseinc, auxdiv, phasedir = 0;
2877 u32 temp; 2972 u32 temp;
2878 2973
@@ -2890,14 +2985,14 @@ static void lpt_program_iclkip(struct drm_crtc *crtc)
2890 SBI_ICLK); 2985 SBI_ICLK);
2891 2986
2892 /* 20MHz is a corner case which is out of range for the 7-bit divisor */ 2987 /* 20MHz is a corner case which is out of range for the 7-bit divisor */
2893 if (crtc->mode.clock == 20000) { 2988 if (clock == 20000) {
2894 auxdiv = 1; 2989 auxdiv = 1;
2895 divsel = 0x41; 2990 divsel = 0x41;
2896 phaseinc = 0x20; 2991 phaseinc = 0x20;
2897 } else { 2992 } else {
2898 /* The iCLK virtual clock root frequency is in MHz, 2993 /* The iCLK virtual clock root frequency is in MHz,
2899 * but the crtc->mode.clock in in KHz. To get the divisors, 2994 * but the adjusted_mode->crtc_clock in in KHz. To get the
2900 * it is necessary to divide one by another, so we 2995 * divisors, it is necessary to divide one by another, so we
2901 * convert the virtual clock precision to KHz here for higher 2996 * convert the virtual clock precision to KHz here for higher
2902 * precision. 2997 * precision.
2903 */ 2998 */
@@ -2905,7 +3000,7 @@ static void lpt_program_iclkip(struct drm_crtc *crtc)
2905 u32 iclk_pi_range = 64; 3000 u32 iclk_pi_range = 64;
2906 u32 desired_divisor, msb_divisor_value, pi_value; 3001 u32 desired_divisor, msb_divisor_value, pi_value;
2907 3002
2908 desired_divisor = (iclk_virtual_root_freq / crtc->mode.clock); 3003 desired_divisor = (iclk_virtual_root_freq / clock);
2909 msb_divisor_value = desired_divisor / iclk_pi_range; 3004 msb_divisor_value = desired_divisor / iclk_pi_range;
2910 pi_value = desired_divisor % iclk_pi_range; 3005 pi_value = desired_divisor % iclk_pi_range;
2911 3006
@@ -2921,7 +3016,7 @@ static void lpt_program_iclkip(struct drm_crtc *crtc)
2921 ~SBI_SSCDIVINTPHASE_INCVAL_MASK); 3016 ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
2922 3017
2923 DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n", 3018 DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
2924 crtc->mode.clock, 3019 clock,
2925 auxdiv, 3020 auxdiv,
2926 divsel, 3021 divsel,
2927 phasedir, 3022 phasedir,
@@ -3286,6 +3381,108 @@ static void intel_disable_planes(struct drm_crtc *crtc)
3286 intel_plane_disable(&intel_plane->base); 3381 intel_plane_disable(&intel_plane->base);
3287} 3382}
3288 3383
3384void hsw_enable_ips(struct intel_crtc *crtc)
3385{
3386 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
3387
3388 if (!crtc->config.ips_enabled)
3389 return;
3390
3391 /* We can only enable IPS after we enable a plane and wait for a vblank.
3392 * We guarantee that the plane is enabled by calling intel_enable_ips
3393 * only after intel_enable_plane. And intel_enable_plane already waits
3394 * for a vblank, so all we need to do here is to enable the IPS bit. */
3395 assert_plane_enabled(dev_priv, crtc->plane);
3396 if (IS_BROADWELL(crtc->base.dev)) {
3397 mutex_lock(&dev_priv->rps.hw_lock);
3398 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0xc0000000));
3399 mutex_unlock(&dev_priv->rps.hw_lock);
3400 /* Quoting Art Runyan: "its not safe to expect any particular
3401 * value in IPS_CTL bit 31 after enabling IPS through the
3402 * mailbox." Therefore we need to defer waiting on the state
3403 * change.
3404 * TODO: need to fix this for state checker
3405 */
3406 } else {
3407 I915_WRITE(IPS_CTL, IPS_ENABLE);
3408 /* The bit only becomes 1 in the next vblank, so this wait here
3409 * is essentially intel_wait_for_vblank. If we don't have this
3410 * and don't wait for vblanks until the end of crtc_enable, then
3411 * the HW state readout code will complain that the expected
3412 * IPS_CTL value is not the one we read. */
3413 if (wait_for(I915_READ_NOTRACE(IPS_CTL) & IPS_ENABLE, 50))
3414 DRM_ERROR("Timed out waiting for IPS enable\n");
3415 }
3416}
3417
3418void hsw_disable_ips(struct intel_crtc *crtc)
3419{
3420 struct drm_device *dev = crtc->base.dev;
3421 struct drm_i915_private *dev_priv = dev->dev_private;
3422
3423 if (!crtc->config.ips_enabled)
3424 return;
3425
3426 assert_plane_enabled(dev_priv, crtc->plane);
3427 if (IS_BROADWELL(crtc->base.dev)) {
3428 mutex_lock(&dev_priv->rps.hw_lock);
3429 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
3430 mutex_unlock(&dev_priv->rps.hw_lock);
3431 } else
3432 I915_WRITE(IPS_CTL, 0);
3433 POSTING_READ(IPS_CTL);
3434
3435 /* We need to wait for a vblank before we can disable the plane. */
3436 intel_wait_for_vblank(dev, crtc->pipe);
3437}
3438
3439/** Loads the palette/gamma unit for the CRTC with the prepared values */
3440static void intel_crtc_load_lut(struct drm_crtc *crtc)
3441{
3442 struct drm_device *dev = crtc->dev;
3443 struct drm_i915_private *dev_priv = dev->dev_private;
3444 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3445 enum pipe pipe = intel_crtc->pipe;
3446 int palreg = PALETTE(pipe);
3447 int i;
3448 bool reenable_ips = false;
3449
3450 /* The clocks have to be on to load the palette. */
3451 if (!crtc->enabled || !intel_crtc->active)
3452 return;
3453
3454 if (!HAS_PCH_SPLIT(dev_priv->dev)) {
3455 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI))
3456 assert_dsi_pll_enabled(dev_priv);
3457 else
3458 assert_pll_enabled(dev_priv, pipe);
3459 }
3460
3461 /* use legacy palette for Ironlake */
3462 if (HAS_PCH_SPLIT(dev))
3463 palreg = LGC_PALETTE(pipe);
3464
3465 /* Workaround : Do not read or write the pipe palette/gamma data while
3466 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
3467 */
3468 if (intel_crtc->config.ips_enabled &&
3469 ((I915_READ(GAMMA_MODE(pipe)) & GAMMA_MODE_MODE_MASK) ==
3470 GAMMA_MODE_MODE_SPLIT)) {
3471 hsw_disable_ips(intel_crtc);
3472 reenable_ips = true;
3473 }
3474
3475 for (i = 0; i < 256; i++) {
3476 I915_WRITE(palreg + 4 * i,
3477 (intel_crtc->lut_r[i] << 16) |
3478 (intel_crtc->lut_g[i] << 8) |
3479 intel_crtc->lut_b[i]);
3480 }
3481
3482 if (reenable_ips)
3483 hsw_enable_ips(intel_crtc);
3484}
3485
3289static void ironlake_crtc_enable(struct drm_crtc *crtc) 3486static void ironlake_crtc_enable(struct drm_crtc *crtc)
3290{ 3487{
3291 struct drm_device *dev = crtc->dev; 3488 struct drm_device *dev = crtc->dev;
@@ -3305,8 +3502,6 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
3305 intel_set_cpu_fifo_underrun_reporting(dev, pipe, true); 3502 intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
3306 intel_set_pch_fifo_underrun_reporting(dev, pipe, true); 3503 intel_set_pch_fifo_underrun_reporting(dev, pipe, true);
3307 3504
3308 intel_update_watermarks(dev);
3309
3310 for_each_encoder_on_crtc(dev, crtc, encoder) 3505 for_each_encoder_on_crtc(dev, crtc, encoder)
3311 if (encoder->pre_enable) 3506 if (encoder->pre_enable)
3312 encoder->pre_enable(encoder); 3507 encoder->pre_enable(encoder);
@@ -3329,9 +3524,10 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
3329 */ 3524 */
3330 intel_crtc_load_lut(crtc); 3525 intel_crtc_load_lut(crtc);
3331 3526
3527 intel_update_watermarks(crtc);
3332 intel_enable_pipe(dev_priv, pipe, 3528 intel_enable_pipe(dev_priv, pipe,
3333 intel_crtc->config.has_pch_encoder); 3529 intel_crtc->config.has_pch_encoder, false);
3334 intel_enable_plane(dev_priv, plane, pipe); 3530 intel_enable_primary_plane(dev_priv, plane, pipe);
3335 intel_enable_planes(crtc); 3531 intel_enable_planes(crtc);
3336 intel_crtc_update_cursor(crtc, true); 3532 intel_crtc_update_cursor(crtc, true);
3337 3533
@@ -3365,34 +3561,74 @@ static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
3365 return HAS_IPS(crtc->base.dev) && crtc->pipe == PIPE_A; 3561 return HAS_IPS(crtc->base.dev) && crtc->pipe == PIPE_A;
3366} 3562}
3367 3563
3368static void hsw_enable_ips(struct intel_crtc *crtc) 3564static void haswell_crtc_enable_planes(struct drm_crtc *crtc)
3369{ 3565{
3370 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 3566 struct drm_device *dev = crtc->dev;
3567 struct drm_i915_private *dev_priv = dev->dev_private;
3568 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3569 int pipe = intel_crtc->pipe;
3570 int plane = intel_crtc->plane;
3371 3571
3372 if (!crtc->config.ips_enabled) 3572 intel_enable_primary_plane(dev_priv, plane, pipe);
3373 return; 3573 intel_enable_planes(crtc);
3574 intel_crtc_update_cursor(crtc, true);
3374 3575
3375 /* We can only enable IPS after we enable a plane and wait for a vblank. 3576 hsw_enable_ips(intel_crtc);
3376 * We guarantee that the plane is enabled by calling intel_enable_ips 3577
3377 * only after intel_enable_plane. And intel_enable_plane already waits 3578 mutex_lock(&dev->struct_mutex);
3378 * for a vblank, so all we need to do here is to enable the IPS bit. */ 3579 intel_update_fbc(dev);
3379 assert_plane_enabled(dev_priv, crtc->plane); 3580 mutex_unlock(&dev->struct_mutex);
3380 I915_WRITE(IPS_CTL, IPS_ENABLE);
3381} 3581}
3382 3582
3383static void hsw_disable_ips(struct intel_crtc *crtc) 3583static void haswell_crtc_disable_planes(struct drm_crtc *crtc)
3384{ 3584{
3385 struct drm_device *dev = crtc->base.dev; 3585 struct drm_device *dev = crtc->dev;
3386 struct drm_i915_private *dev_priv = dev->dev_private; 3586 struct drm_i915_private *dev_priv = dev->dev_private;
3587 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3588 int pipe = intel_crtc->pipe;
3589 int plane = intel_crtc->plane;
3387 3590
3388 if (!crtc->config.ips_enabled) 3591 intel_crtc_wait_for_pending_flips(crtc);
3389 return; 3592 drm_vblank_off(dev, pipe);
3390 3593
3391 assert_plane_enabled(dev_priv, crtc->plane); 3594 /* FBC must be disabled before disabling the plane on HSW. */
3392 I915_WRITE(IPS_CTL, 0); 3595 if (dev_priv->fbc.plane == plane)
3596 intel_disable_fbc(dev);
3393 3597
3394 /* We need to wait for a vblank before we can disable the plane. */ 3598 hsw_disable_ips(intel_crtc);
3395 intel_wait_for_vblank(dev, crtc->pipe); 3599
3600 intel_crtc_update_cursor(crtc, false);
3601 intel_disable_planes(crtc);
3602 intel_disable_primary_plane(dev_priv, plane, pipe);
3603}
3604
3605/*
3606 * This implements the workaround described in the "notes" section of the mode
3607 * set sequence documentation. When going from no pipes or single pipe to
3608 * multiple pipes, and planes are enabled after the pipe, we need to wait at
3609 * least 2 vblanks on the first pipe before enabling planes on the second pipe.
3610 */
3611static void haswell_mode_set_planes_workaround(struct intel_crtc *crtc)
3612{
3613 struct drm_device *dev = crtc->base.dev;
3614 struct intel_crtc *crtc_it, *other_active_crtc = NULL;
3615
3616 /* We want to get the other_active_crtc only if there's only 1 other
3617 * active crtc. */
3618 list_for_each_entry(crtc_it, &dev->mode_config.crtc_list, base.head) {
3619 if (!crtc_it->active || crtc_it == crtc)
3620 continue;
3621
3622 if (other_active_crtc)
3623 return;
3624
3625 other_active_crtc = crtc_it;
3626 }
3627 if (!other_active_crtc)
3628 return;
3629
3630 intel_wait_for_vblank(dev, other_active_crtc->pipe);
3631 intel_wait_for_vblank(dev, other_active_crtc->pipe);
3396} 3632}
3397 3633
3398static void haswell_crtc_enable(struct drm_crtc *crtc) 3634static void haswell_crtc_enable(struct drm_crtc *crtc)
@@ -3402,7 +3638,6 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
3402 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3638 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3403 struct intel_encoder *encoder; 3639 struct intel_encoder *encoder;
3404 int pipe = intel_crtc->pipe; 3640 int pipe = intel_crtc->pipe;
3405 int plane = intel_crtc->plane;
3406 3641
3407 WARN_ON(!crtc->enabled); 3642 WARN_ON(!crtc->enabled);
3408 3643
@@ -3415,8 +3650,6 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
3415 if (intel_crtc->config.has_pch_encoder) 3650 if (intel_crtc->config.has_pch_encoder)
3416 intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, true); 3651 intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, true);
3417 3652
3418 intel_update_watermarks(dev);
3419
3420 if (intel_crtc->config.has_pch_encoder) 3653 if (intel_crtc->config.has_pch_encoder)
3421 dev_priv->display.fdi_link_train(crtc); 3654 dev_priv->display.fdi_link_train(crtc);
3422 3655
@@ -3437,23 +3670,22 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
3437 intel_ddi_set_pipe_settings(crtc); 3670 intel_ddi_set_pipe_settings(crtc);
3438 intel_ddi_enable_transcoder_func(crtc); 3671 intel_ddi_enable_transcoder_func(crtc);
3439 3672
3673 intel_update_watermarks(crtc);
3440 intel_enable_pipe(dev_priv, pipe, 3674 intel_enable_pipe(dev_priv, pipe,
3441 intel_crtc->config.has_pch_encoder); 3675 intel_crtc->config.has_pch_encoder, false);
3442 intel_enable_plane(dev_priv, plane, pipe);
3443 intel_enable_planes(crtc);
3444 intel_crtc_update_cursor(crtc, true);
3445
3446 hsw_enable_ips(intel_crtc);
3447 3676
3448 if (intel_crtc->config.has_pch_encoder) 3677 if (intel_crtc->config.has_pch_encoder)
3449 lpt_pch_enable(crtc); 3678 lpt_pch_enable(crtc);
3450 3679
3451 mutex_lock(&dev->struct_mutex); 3680 for_each_encoder_on_crtc(dev, crtc, encoder) {
3452 intel_update_fbc(dev);
3453 mutex_unlock(&dev->struct_mutex);
3454
3455 for_each_encoder_on_crtc(dev, crtc, encoder)
3456 encoder->enable(encoder); 3681 encoder->enable(encoder);
3682 intel_opregion_notify_encoder(encoder, true);
3683 }
3684
3685 /* If we change the relative order between pipe/planes enabling, we need
3686 * to change the workaround. */
3687 haswell_mode_set_planes_workaround(intel_crtc);
3688 haswell_crtc_enable_planes(crtc);
3457 3689
3458 /* 3690 /*
3459 * There seems to be a race in PCH platform hw (at least on some 3691 * There seems to be a race in PCH platform hw (at least on some
@@ -3506,7 +3738,7 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
3506 3738
3507 intel_crtc_update_cursor(crtc, false); 3739 intel_crtc_update_cursor(crtc, false);
3508 intel_disable_planes(crtc); 3740 intel_disable_planes(crtc);
3509 intel_disable_plane(dev_priv, plane, pipe); 3741 intel_disable_primary_plane(dev_priv, plane, pipe);
3510 3742
3511 if (intel_crtc->config.has_pch_encoder) 3743 if (intel_crtc->config.has_pch_encoder)
3512 intel_set_pch_fifo_underrun_reporting(dev, pipe, false); 3744 intel_set_pch_fifo_underrun_reporting(dev, pipe, false);
@@ -3547,7 +3779,7 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
3547 } 3779 }
3548 3780
3549 intel_crtc->active = false; 3781 intel_crtc->active = false;
3550 intel_update_watermarks(dev); 3782 intel_update_watermarks(crtc);
3551 3783
3552 mutex_lock(&dev->struct_mutex); 3784 mutex_lock(&dev->struct_mutex);
3553 intel_update_fbc(dev); 3785 intel_update_fbc(dev);
@@ -3561,27 +3793,17 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
3561 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3793 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3562 struct intel_encoder *encoder; 3794 struct intel_encoder *encoder;
3563 int pipe = intel_crtc->pipe; 3795 int pipe = intel_crtc->pipe;
3564 int plane = intel_crtc->plane;
3565 enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder; 3796 enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
3566 3797
3567 if (!intel_crtc->active) 3798 if (!intel_crtc->active)
3568 return; 3799 return;
3569 3800
3570 for_each_encoder_on_crtc(dev, crtc, encoder) 3801 haswell_crtc_disable_planes(crtc);
3571 encoder->disable(encoder);
3572
3573 intel_crtc_wait_for_pending_flips(crtc);
3574 drm_vblank_off(dev, pipe);
3575 3802
3576 /* FBC must be disabled before disabling the plane on HSW. */ 3803 for_each_encoder_on_crtc(dev, crtc, encoder) {
3577 if (dev_priv->fbc.plane == plane) 3804 intel_opregion_notify_encoder(encoder, false);
3578 intel_disable_fbc(dev); 3805 encoder->disable(encoder);
3579 3806 }
3580 hsw_disable_ips(intel_crtc);
3581
3582 intel_crtc_update_cursor(crtc, false);
3583 intel_disable_planes(crtc);
3584 intel_disable_plane(dev_priv, plane, pipe);
3585 3807
3586 if (intel_crtc->config.has_pch_encoder) 3808 if (intel_crtc->config.has_pch_encoder)
3587 intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, false); 3809 intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, false);
@@ -3604,7 +3826,7 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
3604 } 3826 }
3605 3827
3606 intel_crtc->active = false; 3828 intel_crtc->active = false;
3607 intel_update_watermarks(dev); 3829 intel_update_watermarks(crtc);
3608 3830
3609 mutex_lock(&dev->struct_mutex); 3831 mutex_lock(&dev->struct_mutex);
3610 intel_update_fbc(dev); 3832 intel_update_fbc(dev);
@@ -3696,6 +3918,7 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
3696 struct intel_encoder *encoder; 3918 struct intel_encoder *encoder;
3697 int pipe = intel_crtc->pipe; 3919 int pipe = intel_crtc->pipe;
3698 int plane = intel_crtc->plane; 3920 int plane = intel_crtc->plane;
3921 bool is_dsi;
3699 3922
3700 WARN_ON(!crtc->enabled); 3923 WARN_ON(!crtc->enabled);
3701 3924
@@ -3703,13 +3926,15 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
3703 return; 3926 return;
3704 3927
3705 intel_crtc->active = true; 3928 intel_crtc->active = true;
3706 intel_update_watermarks(dev);
3707 3929
3708 for_each_encoder_on_crtc(dev, crtc, encoder) 3930 for_each_encoder_on_crtc(dev, crtc, encoder)
3709 if (encoder->pre_pll_enable) 3931 if (encoder->pre_pll_enable)
3710 encoder->pre_pll_enable(encoder); 3932 encoder->pre_pll_enable(encoder);
3711 3933
3712 vlv_enable_pll(intel_crtc); 3934 is_dsi = intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI);
3935
3936 if (!is_dsi)
3937 vlv_enable_pll(intel_crtc);
3713 3938
3714 for_each_encoder_on_crtc(dev, crtc, encoder) 3939 for_each_encoder_on_crtc(dev, crtc, encoder)
3715 if (encoder->pre_enable) 3940 if (encoder->pre_enable)
@@ -3719,8 +3944,9 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
3719 3944
3720 intel_crtc_load_lut(crtc); 3945 intel_crtc_load_lut(crtc);
3721 3946
3722 intel_enable_pipe(dev_priv, pipe, false); 3947 intel_update_watermarks(crtc);
3723 intel_enable_plane(dev_priv, plane, pipe); 3948 intel_enable_pipe(dev_priv, pipe, false, is_dsi);
3949 intel_enable_primary_plane(dev_priv, plane, pipe);
3724 intel_enable_planes(crtc); 3950 intel_enable_planes(crtc);
3725 intel_crtc_update_cursor(crtc, true); 3951 intel_crtc_update_cursor(crtc, true);
3726 3952
@@ -3745,7 +3971,6 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
3745 return; 3971 return;
3746 3972
3747 intel_crtc->active = true; 3973 intel_crtc->active = true;
3748 intel_update_watermarks(dev);
3749 3974
3750 for_each_encoder_on_crtc(dev, crtc, encoder) 3975 for_each_encoder_on_crtc(dev, crtc, encoder)
3751 if (encoder->pre_enable) 3976 if (encoder->pre_enable)
@@ -3757,8 +3982,9 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
3757 3982
3758 intel_crtc_load_lut(crtc); 3983 intel_crtc_load_lut(crtc);
3759 3984
3760 intel_enable_pipe(dev_priv, pipe, false); 3985 intel_update_watermarks(crtc);
3761 intel_enable_plane(dev_priv, plane, pipe); 3986 intel_enable_pipe(dev_priv, pipe, false, false);
3987 intel_enable_primary_plane(dev_priv, plane, pipe);
3762 intel_enable_planes(crtc); 3988 intel_enable_planes(crtc);
3763 /* The fixup needs to happen before cursor is enabled */ 3989 /* The fixup needs to happen before cursor is enabled */
3764 if (IS_G4X(dev)) 3990 if (IS_G4X(dev))
@@ -3814,7 +4040,7 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
3814 intel_crtc_dpms_overlay(intel_crtc, false); 4040 intel_crtc_dpms_overlay(intel_crtc, false);
3815 intel_crtc_update_cursor(crtc, false); 4041 intel_crtc_update_cursor(crtc, false);
3816 intel_disable_planes(crtc); 4042 intel_disable_planes(crtc);
3817 intel_disable_plane(dev_priv, plane, pipe); 4043 intel_disable_primary_plane(dev_priv, plane, pipe);
3818 4044
3819 intel_disable_pipe(dev_priv, pipe); 4045 intel_disable_pipe(dev_priv, pipe);
3820 4046
@@ -3824,11 +4050,15 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
3824 if (encoder->post_disable) 4050 if (encoder->post_disable)
3825 encoder->post_disable(encoder); 4051 encoder->post_disable(encoder);
3826 4052
3827 i9xx_disable_pll(dev_priv, pipe); 4053 if (IS_VALLEYVIEW(dev) && !intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI))
4054 vlv_disable_pll(dev_priv, pipe);
4055 else if (!IS_VALLEYVIEW(dev))
4056 i9xx_disable_pll(dev_priv, pipe);
3828 4057
3829 intel_crtc->active = false; 4058 intel_crtc->active = false;
4059 intel_update_watermarks(crtc);
4060
3830 intel_update_fbc(dev); 4061 intel_update_fbc(dev);
3831 intel_update_watermarks(dev);
3832} 4062}
3833 4063
3834static void i9xx_crtc_off(struct drm_crtc *crtc) 4064static void i9xx_crtc_off(struct drm_crtc *crtc)
@@ -3902,6 +4132,7 @@ static void intel_crtc_disable(struct drm_crtc *crtc)
3902 dev_priv->display.off(crtc); 4132 dev_priv->display.off(crtc);
3903 4133
3904 assert_plane_disabled(dev->dev_private, to_intel_crtc(crtc)->plane); 4134 assert_plane_disabled(dev->dev_private, to_intel_crtc(crtc)->plane);
4135 assert_cursor_disabled(dev_priv, to_intel_crtc(crtc)->pipe);
3905 assert_pipe_disabled(dev->dev_private, to_intel_crtc(crtc)->pipe); 4136 assert_pipe_disabled(dev->dev_private, to_intel_crtc(crtc)->pipe);
3906 4137
3907 if (crtc->fb) { 4138 if (crtc->fb) {
@@ -4029,7 +4260,7 @@ static bool ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
4029 return false; 4260 return false;
4030 } 4261 }
4031 4262
4032 if (IS_HASWELL(dev)) { 4263 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
4033 if (pipe_config->fdi_lanes > 2) { 4264 if (pipe_config->fdi_lanes > 2) {
4034 DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n", 4265 DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n",
4035 pipe_config->fdi_lanes); 4266 pipe_config->fdi_lanes);
@@ -4091,8 +4322,7 @@ retry:
4091 */ 4322 */
4092 link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10; 4323 link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10;
4093 4324
4094 fdi_dotclock = adjusted_mode->clock; 4325 fdi_dotclock = adjusted_mode->crtc_clock;
4095 fdi_dotclock /= pipe_config->pixel_multiplier;
4096 4326
4097 lane = ironlake_get_lanes_required(fdi_dotclock, link_bw, 4327 lane = ironlake_get_lanes_required(fdi_dotclock, link_bw,
4098 pipe_config->pipe_bpp); 4328 pipe_config->pipe_bpp);
@@ -4134,13 +4364,39 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
4134 struct drm_device *dev = crtc->base.dev; 4364 struct drm_device *dev = crtc->base.dev;
4135 struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode; 4365 struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
4136 4366
4137 if (HAS_PCH_SPLIT(dev)) { 4367 /* FIXME should check pixel clock limits on all platforms */
4138 /* FDI link clock is fixed at 2.7G */ 4368 if (INTEL_INFO(dev)->gen < 4) {
4139 if (pipe_config->requested_mode.clock * 3 4369 struct drm_i915_private *dev_priv = dev->dev_private;
4140 > IRONLAKE_FDI_FREQ * 4) 4370 int clock_limit =
4371 dev_priv->display.get_display_clock_speed(dev);
4372
4373 /*
4374 * Enable pixel doubling when the dot clock
4375 * is > 90% of the (display) core speed.
4376 *
4377 * GDG double wide on either pipe,
4378 * otherwise pipe A only.
4379 */
4380 if ((crtc->pipe == PIPE_A || IS_I915G(dev)) &&
4381 adjusted_mode->crtc_clock > clock_limit * 9 / 10) {
4382 clock_limit *= 2;
4383 pipe_config->double_wide = true;
4384 }
4385
4386 if (adjusted_mode->crtc_clock > clock_limit * 9 / 10)
4141 return -EINVAL; 4387 return -EINVAL;
4142 } 4388 }
4143 4389
4390 /*
4391 * Pipe horizontal size must be even in:
4392 * - DVO ganged mode
4393 * - LVDS dual channel mode
4394 * - Double wide pipe
4395 */
4396 if ((intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
4397 intel_is_dual_link_lvds(dev)) || pipe_config->double_wide)
4398 pipe_config->pipe_src_w &= ~1;
4399
4144 /* Cantiga+ cannot handle modes with a hsync front porch of 0. 4400 /* Cantiga+ cannot handle modes with a hsync front porch of 0.
4145 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw. 4401 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
4146 */ 4402 */
@@ -4304,28 +4560,6 @@ static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
4304 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE); 4560 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
4305} 4561}
4306 4562
4307static int vlv_get_refclk(struct drm_crtc *crtc)
4308{
4309 struct drm_device *dev = crtc->dev;
4310 struct drm_i915_private *dev_priv = dev->dev_private;
4311 int refclk = 27000; /* for DP & HDMI */
4312
4313 return 100000; /* only one validated so far */
4314
4315 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) {
4316 refclk = 96000;
4317 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
4318 if (intel_panel_use_ssc(dev_priv))
4319 refclk = 100000;
4320 else
4321 refclk = 96000;
4322 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) {
4323 refclk = 100000;
4324 }
4325
4326 return refclk;
4327}
4328
4329static int i9xx_get_refclk(struct drm_crtc *crtc, int num_connectors) 4563static int i9xx_get_refclk(struct drm_crtc *crtc, int num_connectors)
4330{ 4564{
4331 struct drm_device *dev = crtc->dev; 4565 struct drm_device *dev = crtc->dev;
@@ -4333,7 +4567,7 @@ static int i9xx_get_refclk(struct drm_crtc *crtc, int num_connectors)
4333 int refclk; 4567 int refclk;
4334 4568
4335 if (IS_VALLEYVIEW(dev)) { 4569 if (IS_VALLEYVIEW(dev)) {
4336 refclk = vlv_get_refclk(crtc); 4570 refclk = 100000;
4337 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) && 4571 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
4338 intel_panel_use_ssc(dev_priv) && num_connectors < 2) { 4572 intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
4339 refclk = dev_priv->vbt.lvds_ssc_freq * 1000; 4573 refclk = dev_priv->vbt.lvds_ssc_freq * 1000;
@@ -4391,7 +4625,8 @@ static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
4391 } 4625 }
4392} 4626}
4393 4627
4394static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv) 4628static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe
4629 pipe)
4395{ 4630{
4396 u32 reg_val; 4631 u32 reg_val;
4397 4632
@@ -4399,24 +4634,24 @@ static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv)
4399 * PLLB opamp always calibrates to max value of 0x3f, force enable it 4634 * PLLB opamp always calibrates to max value of 0x3f, force enable it
4400 * and set it to a reasonable value instead. 4635 * and set it to a reasonable value instead.
4401 */ 4636 */
4402 reg_val = vlv_dpio_read(dev_priv, DPIO_IREF(1)); 4637 reg_val = vlv_dpio_read(dev_priv, pipe, DPIO_IREF(1));
4403 reg_val &= 0xffffff00; 4638 reg_val &= 0xffffff00;
4404 reg_val |= 0x00000030; 4639 reg_val |= 0x00000030;
4405 vlv_dpio_write(dev_priv, DPIO_IREF(1), reg_val); 4640 vlv_dpio_write(dev_priv, pipe, DPIO_IREF(1), reg_val);
4406 4641
4407 reg_val = vlv_dpio_read(dev_priv, DPIO_CALIBRATION); 4642 reg_val = vlv_dpio_read(dev_priv, pipe, DPIO_CALIBRATION);
4408 reg_val &= 0x8cffffff; 4643 reg_val &= 0x8cffffff;
4409 reg_val = 0x8c000000; 4644 reg_val = 0x8c000000;
4410 vlv_dpio_write(dev_priv, DPIO_CALIBRATION, reg_val); 4645 vlv_dpio_write(dev_priv, pipe, DPIO_CALIBRATION, reg_val);
4411 4646
4412 reg_val = vlv_dpio_read(dev_priv, DPIO_IREF(1)); 4647 reg_val = vlv_dpio_read(dev_priv, pipe, DPIO_IREF(1));
4413 reg_val &= 0xffffff00; 4648 reg_val &= 0xffffff00;
4414 vlv_dpio_write(dev_priv, DPIO_IREF(1), reg_val); 4649 vlv_dpio_write(dev_priv, pipe, DPIO_IREF(1), reg_val);
4415 4650
4416 reg_val = vlv_dpio_read(dev_priv, DPIO_CALIBRATION); 4651 reg_val = vlv_dpio_read(dev_priv, pipe, DPIO_CALIBRATION);
4417 reg_val &= 0x00ffffff; 4652 reg_val &= 0x00ffffff;
4418 reg_val |= 0xb0000000; 4653 reg_val |= 0xb0000000;
4419 vlv_dpio_write(dev_priv, DPIO_CALIBRATION, reg_val); 4654 vlv_dpio_write(dev_priv, pipe, DPIO_CALIBRATION, reg_val);
4420} 4655}
4421 4656
4422static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc, 4657static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc,
@@ -4482,18 +4717,18 @@ static void vlv_update_pll(struct intel_crtc *crtc)
4482 4717
4483 /* PLL B needs special handling */ 4718 /* PLL B needs special handling */
4484 if (pipe) 4719 if (pipe)
4485 vlv_pllb_recal_opamp(dev_priv); 4720 vlv_pllb_recal_opamp(dev_priv, pipe);
4486 4721
4487 /* Set up Tx target for periodic Rcomp update */ 4722 /* Set up Tx target for periodic Rcomp update */
4488 vlv_dpio_write(dev_priv, DPIO_IREF_BCAST, 0x0100000f); 4723 vlv_dpio_write(dev_priv, pipe, DPIO_IREF_BCAST, 0x0100000f);
4489 4724
4490 /* Disable target IRef on PLL */ 4725 /* Disable target IRef on PLL */
4491 reg_val = vlv_dpio_read(dev_priv, DPIO_IREF_CTL(pipe)); 4726 reg_val = vlv_dpio_read(dev_priv, pipe, DPIO_IREF_CTL(pipe));
4492 reg_val &= 0x00ffffff; 4727 reg_val &= 0x00ffffff;
4493 vlv_dpio_write(dev_priv, DPIO_IREF_CTL(pipe), reg_val); 4728 vlv_dpio_write(dev_priv, pipe, DPIO_IREF_CTL(pipe), reg_val);
4494 4729
4495 /* Disable fast lock */ 4730 /* Disable fast lock */
4496 vlv_dpio_write(dev_priv, DPIO_FASTCLK_DISABLE, 0x610); 4731 vlv_dpio_write(dev_priv, pipe, DPIO_FASTCLK_DISABLE, 0x610);
4497 4732
4498 /* Set idtafcrecal before PLL is enabled */ 4733 /* Set idtafcrecal before PLL is enabled */
4499 mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK)); 4734 mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
@@ -4507,55 +4742,55 @@ static void vlv_update_pll(struct intel_crtc *crtc)
4507 * Note: don't use the DAC post divider as it seems unstable. 4742 * Note: don't use the DAC post divider as it seems unstable.
4508 */ 4743 */
4509 mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT); 4744 mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
4510 vlv_dpio_write(dev_priv, DPIO_DIV(pipe), mdiv); 4745 vlv_dpio_write(dev_priv, pipe, DPIO_DIV(pipe), mdiv);
4511 4746
4512 mdiv |= DPIO_ENABLE_CALIBRATION; 4747 mdiv |= DPIO_ENABLE_CALIBRATION;
4513 vlv_dpio_write(dev_priv, DPIO_DIV(pipe), mdiv); 4748 vlv_dpio_write(dev_priv, pipe, DPIO_DIV(pipe), mdiv);
4514 4749
4515 /* Set HBR and RBR LPF coefficients */ 4750 /* Set HBR and RBR LPF coefficients */
4516 if (crtc->config.port_clock == 162000 || 4751 if (crtc->config.port_clock == 162000 ||
4517 intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_ANALOG) || 4752 intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_ANALOG) ||
4518 intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI)) 4753 intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI))
4519 vlv_dpio_write(dev_priv, DPIO_LPF_COEFF(pipe), 4754 vlv_dpio_write(dev_priv, pipe, DPIO_LPF_COEFF(pipe),
4520 0x009f0003); 4755 0x009f0003);
4521 else 4756 else
4522 vlv_dpio_write(dev_priv, DPIO_LPF_COEFF(pipe), 4757 vlv_dpio_write(dev_priv, pipe, DPIO_LPF_COEFF(pipe),
4523 0x00d0000f); 4758 0x00d0000f);
4524 4759
4525 if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP) || 4760 if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP) ||
4526 intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT)) { 4761 intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT)) {
4527 /* Use SSC source */ 4762 /* Use SSC source */
4528 if (!pipe) 4763 if (!pipe)
4529 vlv_dpio_write(dev_priv, DPIO_REFSFR(pipe), 4764 vlv_dpio_write(dev_priv, pipe, DPIO_REFSFR(pipe),
4530 0x0df40000); 4765 0x0df40000);
4531 else 4766 else
4532 vlv_dpio_write(dev_priv, DPIO_REFSFR(pipe), 4767 vlv_dpio_write(dev_priv, pipe, DPIO_REFSFR(pipe),
4533 0x0df70000); 4768 0x0df70000);
4534 } else { /* HDMI or VGA */ 4769 } else { /* HDMI or VGA */
4535 /* Use bend source */ 4770 /* Use bend source */
4536 if (!pipe) 4771 if (!pipe)
4537 vlv_dpio_write(dev_priv, DPIO_REFSFR(pipe), 4772 vlv_dpio_write(dev_priv, pipe, DPIO_REFSFR(pipe),
4538 0x0df70000); 4773 0x0df70000);
4539 else 4774 else
4540 vlv_dpio_write(dev_priv, DPIO_REFSFR(pipe), 4775 vlv_dpio_write(dev_priv, pipe, DPIO_REFSFR(pipe),
4541 0x0df40000); 4776 0x0df40000);
4542 } 4777 }
4543 4778
4544 coreclk = vlv_dpio_read(dev_priv, DPIO_CORE_CLK(pipe)); 4779 coreclk = vlv_dpio_read(dev_priv, pipe, DPIO_CORE_CLK(pipe));
4545 coreclk = (coreclk & 0x0000ff00) | 0x01c00000; 4780 coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
4546 if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT) || 4781 if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT) ||
4547 intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP)) 4782 intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP))
4548 coreclk |= 0x01000000; 4783 coreclk |= 0x01000000;
4549 vlv_dpio_write(dev_priv, DPIO_CORE_CLK(pipe), coreclk); 4784 vlv_dpio_write(dev_priv, pipe, DPIO_CORE_CLK(pipe), coreclk);
4550 4785
4551 vlv_dpio_write(dev_priv, DPIO_PLL_CML(pipe), 0x87871000); 4786 vlv_dpio_write(dev_priv, pipe, DPIO_PLL_CML(pipe), 0x87871000);
4552 4787
4553 /* Enable DPIO clock input */ 4788 /* Enable DPIO clock input */
4554 dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REFA_CLK_ENABLE_VLV | 4789 dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REFA_CLK_ENABLE_VLV |
4555 DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_CLOCK_VLV; 4790 DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_CLOCK_VLV;
4556 if (pipe) 4791 /* We should never disable this, set it here for state tracking */
4792 if (pipe == PIPE_B)
4557 dpll |= DPLL_INTEGRATED_CRI_CLK_VLV; 4793 dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
4558
4559 dpll |= DPLL_VCO_ENABLE; 4794 dpll |= DPLL_VCO_ENABLE;
4560 crtc->config.dpll_hw_state.dpll = dpll; 4795 crtc->config.dpll_hw_state.dpll = dpll;
4561 4796
@@ -4693,7 +4928,6 @@ static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
4693 enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder; 4928 enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
4694 struct drm_display_mode *adjusted_mode = 4929 struct drm_display_mode *adjusted_mode =
4695 &intel_crtc->config.adjusted_mode; 4930 &intel_crtc->config.adjusted_mode;
4696 struct drm_display_mode *mode = &intel_crtc->config.requested_mode;
4697 uint32_t vsyncshift, crtc_vtotal, crtc_vblank_end; 4931 uint32_t vsyncshift, crtc_vtotal, crtc_vblank_end;
4698 4932
4699 /* We need to be careful not to changed the adjusted mode, for otherwise 4933 /* We need to be careful not to changed the adjusted mode, for otherwise
@@ -4746,7 +4980,8 @@ static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
4746 * always be the user's requested size. 4980 * always be the user's requested size.
4747 */ 4981 */
4748 I915_WRITE(PIPESRC(pipe), 4982 I915_WRITE(PIPESRC(pipe),
4749 ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1)); 4983 ((intel_crtc->config.pipe_src_w - 1) << 16) |
4984 (intel_crtc->config.pipe_src_h - 1));
4750} 4985}
4751 4986
4752static void intel_get_pipe_timings(struct intel_crtc *crtc, 4987static void intel_get_pipe_timings(struct intel_crtc *crtc,
@@ -4784,8 +5019,11 @@ static void intel_get_pipe_timings(struct intel_crtc *crtc,
4784 } 5019 }
4785 5020
4786 tmp = I915_READ(PIPESRC(crtc->pipe)); 5021 tmp = I915_READ(PIPESRC(crtc->pipe));
4787 pipe_config->requested_mode.vdisplay = (tmp & 0xffff) + 1; 5022 pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
4788 pipe_config->requested_mode.hdisplay = ((tmp >> 16) & 0xffff) + 1; 5023 pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
5024
5025 pipe_config->requested_mode.vdisplay = pipe_config->pipe_src_h;
5026 pipe_config->requested_mode.hdisplay = pipe_config->pipe_src_w;
4789} 5027}
4790 5028
4791static void intel_crtc_mode_from_pipe_config(struct intel_crtc *intel_crtc, 5029static void intel_crtc_mode_from_pipe_config(struct intel_crtc *intel_crtc,
@@ -4805,7 +5043,7 @@ static void intel_crtc_mode_from_pipe_config(struct intel_crtc *intel_crtc,
4805 5043
4806 crtc->mode.flags = pipe_config->adjusted_mode.flags; 5044 crtc->mode.flags = pipe_config->adjusted_mode.flags;
4807 5045
4808 crtc->mode.clock = pipe_config->adjusted_mode.clock; 5046 crtc->mode.clock = pipe_config->adjusted_mode.crtc_clock;
4809 crtc->mode.flags |= pipe_config->adjusted_mode.flags; 5047 crtc->mode.flags |= pipe_config->adjusted_mode.flags;
4810} 5048}
4811 5049
@@ -4821,17 +5059,8 @@ static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
4821 I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE) 5059 I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE)
4822 pipeconf |= PIPECONF_ENABLE; 5060 pipeconf |= PIPECONF_ENABLE;
4823 5061
4824 if (intel_crtc->pipe == 0 && INTEL_INFO(dev)->gen < 4) { 5062 if (intel_crtc->config.double_wide)
4825 /* Enable pixel doubling when the dot clock is > 90% of the (display) 5063 pipeconf |= PIPECONF_DOUBLE_WIDE;
4826 * core speed.
4827 *
4828 * XXX: No double-wide on 915GM pipe B. Is that the only reason for the
4829 * pipe == 0 check?
4830 */
4831 if (intel_crtc->config.requested_mode.clock >
4832 dev_priv->display.get_display_clock_speed(dev) * 9 / 10)
4833 pipeconf |= PIPECONF_DOUBLE_WIDE;
4834 }
4835 5064
4836 /* only g4x and later have fancy bpc/dither controls */ 5065 /* only g4x and later have fancy bpc/dither controls */
4837 if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) { 5066 if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
@@ -4885,14 +5114,13 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
4885 struct drm_device *dev = crtc->dev; 5114 struct drm_device *dev = crtc->dev;
4886 struct drm_i915_private *dev_priv = dev->dev_private; 5115 struct drm_i915_private *dev_priv = dev->dev_private;
4887 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5116 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4888 struct drm_display_mode *mode = &intel_crtc->config.requested_mode;
4889 int pipe = intel_crtc->pipe; 5117 int pipe = intel_crtc->pipe;
4890 int plane = intel_crtc->plane; 5118 int plane = intel_crtc->plane;
4891 int refclk, num_connectors = 0; 5119 int refclk, num_connectors = 0;
4892 intel_clock_t clock, reduced_clock; 5120 intel_clock_t clock, reduced_clock;
4893 u32 dspcntr; 5121 u32 dspcntr;
4894 bool ok, has_reduced_clock = false; 5122 bool ok, has_reduced_clock = false;
4895 bool is_lvds = false; 5123 bool is_lvds = false, is_dsi = false;
4896 struct intel_encoder *encoder; 5124 struct intel_encoder *encoder;
4897 const intel_limit_t *limit; 5125 const intel_limit_t *limit;
4898 int ret; 5126 int ret;
@@ -4902,42 +5130,49 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
4902 case INTEL_OUTPUT_LVDS: 5130 case INTEL_OUTPUT_LVDS:
4903 is_lvds = true; 5131 is_lvds = true;
4904 break; 5132 break;
5133 case INTEL_OUTPUT_DSI:
5134 is_dsi = true;
5135 break;
4905 } 5136 }
4906 5137
4907 num_connectors++; 5138 num_connectors++;
4908 } 5139 }
4909 5140
4910 refclk = i9xx_get_refclk(crtc, num_connectors); 5141 if (is_dsi)
5142 goto skip_dpll;
4911 5143
4912 /* 5144 if (!intel_crtc->config.clock_set) {
4913 * Returns a set of divisors for the desired target clock with the given 5145 refclk = i9xx_get_refclk(crtc, num_connectors);
4914 * refclk, or FALSE. The returned values represent the clock equation:
4915 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
4916 */
4917 limit = intel_limit(crtc, refclk);
4918 ok = dev_priv->display.find_dpll(limit, crtc,
4919 intel_crtc->config.port_clock,
4920 refclk, NULL, &clock);
4921 if (!ok && !intel_crtc->config.clock_set) {
4922 DRM_ERROR("Couldn't find PLL settings for mode!\n");
4923 return -EINVAL;
4924 }
4925 5146
4926 if (is_lvds && dev_priv->lvds_downclock_avail) {
4927 /* 5147 /*
4928 * Ensure we match the reduced clock's P to the target clock. 5148 * Returns a set of divisors for the desired target clock with
4929 * If the clocks don't match, we can't switch the display clock 5149 * the given refclk, or FALSE. The returned values represent
4930 * by using the FP0/FP1. In such case we will disable the LVDS 5150 * the clock equation: reflck * (5 * (m1 + 2) + (m2 + 2)) / (n +
4931 * downclock feature. 5151 * 2) / p1 / p2.
4932 */ 5152 */
4933 has_reduced_clock = 5153 limit = intel_limit(crtc, refclk);
4934 dev_priv->display.find_dpll(limit, crtc, 5154 ok = dev_priv->display.find_dpll(limit, crtc,
4935 dev_priv->lvds_downclock, 5155 intel_crtc->config.port_clock,
4936 refclk, &clock, 5156 refclk, NULL, &clock);
4937 &reduced_clock); 5157 if (!ok) {
4938 } 5158 DRM_ERROR("Couldn't find PLL settings for mode!\n");
4939 /* Compat-code for transition, will disappear. */ 5159 return -EINVAL;
4940 if (!intel_crtc->config.clock_set) { 5160 }
5161
5162 if (is_lvds && dev_priv->lvds_downclock_avail) {
5163 /*
5164 * Ensure we match the reduced clock's P to the target
5165 * clock. If the clocks don't match, we can't switch
5166 * the display clock by using the FP0/FP1. In such case
5167 * we will disable the LVDS downclock feature.
5168 */
5169 has_reduced_clock =
5170 dev_priv->display.find_dpll(limit, crtc,
5171 dev_priv->lvds_downclock,
5172 refclk, &clock,
5173 &reduced_clock);
5174 }
5175 /* Compat-code for transition, will disappear. */
4941 intel_crtc->config.dpll.n = clock.n; 5176 intel_crtc->config.dpll.n = clock.n;
4942 intel_crtc->config.dpll.m1 = clock.m1; 5177 intel_crtc->config.dpll.m1 = clock.m1;
4943 intel_crtc->config.dpll.m2 = clock.m2; 5178 intel_crtc->config.dpll.m2 = clock.m2;
@@ -4945,17 +5180,19 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
4945 intel_crtc->config.dpll.p2 = clock.p2; 5180 intel_crtc->config.dpll.p2 = clock.p2;
4946 } 5181 }
4947 5182
4948 if (IS_GEN2(dev)) 5183 if (IS_GEN2(dev)) {
4949 i8xx_update_pll(intel_crtc, 5184 i8xx_update_pll(intel_crtc,
4950 has_reduced_clock ? &reduced_clock : NULL, 5185 has_reduced_clock ? &reduced_clock : NULL,
4951 num_connectors); 5186 num_connectors);
4952 else if (IS_VALLEYVIEW(dev)) 5187 } else if (IS_VALLEYVIEW(dev)) {
4953 vlv_update_pll(intel_crtc); 5188 vlv_update_pll(intel_crtc);
4954 else 5189 } else {
4955 i9xx_update_pll(intel_crtc, 5190 i9xx_update_pll(intel_crtc,
4956 has_reduced_clock ? &reduced_clock : NULL, 5191 has_reduced_clock ? &reduced_clock : NULL,
4957 num_connectors); 5192 num_connectors);
5193 }
4958 5194
5195skip_dpll:
4959 /* Set up the display plane register */ 5196 /* Set up the display plane register */
4960 dspcntr = DISPPLANE_GAMMA_ENABLE; 5197 dspcntr = DISPPLANE_GAMMA_ENABLE;
4961 5198
@@ -4972,8 +5209,8 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
4972 * which should always be the user's requested size. 5209 * which should always be the user's requested size.
4973 */ 5210 */
4974 I915_WRITE(DSPSIZE(plane), 5211 I915_WRITE(DSPSIZE(plane),
4975 ((mode->vdisplay - 1) << 16) | 5212 ((intel_crtc->config.pipe_src_h - 1) << 16) |
4976 (mode->hdisplay - 1)); 5213 (intel_crtc->config.pipe_src_w - 1));
4977 I915_WRITE(DSPPOS(plane), 0); 5214 I915_WRITE(DSPPOS(plane), 0);
4978 5215
4979 i9xx_set_pipeconf(intel_crtc); 5216 i9xx_set_pipeconf(intel_crtc);
@@ -4983,8 +5220,6 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
4983 5220
4984 ret = intel_pipe_set_base(crtc, x, y, fb); 5221 ret = intel_pipe_set_base(crtc, x, y, fb);
4985 5222
4986 intel_update_watermarks(dev);
4987
4988 return ret; 5223 return ret;
4989} 5224}
4990 5225
@@ -5015,6 +5250,32 @@ static void i9xx_get_pfit_config(struct intel_crtc *crtc,
5015 I915_READ(LVDS) & LVDS_BORDER_ENABLE; 5250 I915_READ(LVDS) & LVDS_BORDER_ENABLE;
5016} 5251}
5017 5252
5253static void vlv_crtc_clock_get(struct intel_crtc *crtc,
5254 struct intel_crtc_config *pipe_config)
5255{
5256 struct drm_device *dev = crtc->base.dev;
5257 struct drm_i915_private *dev_priv = dev->dev_private;
5258 int pipe = pipe_config->cpu_transcoder;
5259 intel_clock_t clock;
5260 u32 mdiv;
5261 int refclk = 100000;
5262
5263 mutex_lock(&dev_priv->dpio_lock);
5264 mdiv = vlv_dpio_read(dev_priv, pipe, DPIO_DIV(pipe));
5265 mutex_unlock(&dev_priv->dpio_lock);
5266
5267 clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
5268 clock.m2 = mdiv & DPIO_M2DIV_MASK;
5269 clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
5270 clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
5271 clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
5272
5273 vlv_clock(refclk, &clock);
5274
5275 /* clock.dot is the fast clock */
5276 pipe_config->port_clock = clock.dot / 5;
5277}
5278
5018static bool i9xx_get_pipe_config(struct intel_crtc *crtc, 5279static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
5019 struct intel_crtc_config *pipe_config) 5280 struct intel_crtc_config *pipe_config)
5020{ 5281{
@@ -5045,6 +5306,9 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
5045 } 5306 }
5046 } 5307 }
5047 5308
5309 if (INTEL_INFO(dev)->gen < 4)
5310 pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
5311
5048 intel_get_pipe_timings(crtc, pipe_config); 5312 intel_get_pipe_timings(crtc, pipe_config);
5049 5313
5050 i9xx_get_pfit_config(crtc, pipe_config); 5314 i9xx_get_pfit_config(crtc, pipe_config);
@@ -5077,6 +5341,11 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
5077 DPLL_PORTB_READY_MASK); 5341 DPLL_PORTB_READY_MASK);
5078 } 5342 }
5079 5343
5344 if (IS_VALLEYVIEW(dev))
5345 vlv_crtc_clock_get(crtc, pipe_config);
5346 else
5347 i9xx_crtc_clock_get(crtc, pipe_config);
5348
5080 return true; 5349 return true;
5081} 5350}
5082 5351
@@ -5565,14 +5834,16 @@ static void intel_set_pipe_csc(struct drm_crtc *crtc)
5565 5834
5566static void haswell_set_pipeconf(struct drm_crtc *crtc) 5835static void haswell_set_pipeconf(struct drm_crtc *crtc)
5567{ 5836{
5568 struct drm_i915_private *dev_priv = crtc->dev->dev_private; 5837 struct drm_device *dev = crtc->dev;
5838 struct drm_i915_private *dev_priv = dev->dev_private;
5569 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5839 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5840 enum pipe pipe = intel_crtc->pipe;
5570 enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder; 5841 enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
5571 uint32_t val; 5842 uint32_t val;
5572 5843
5573 val = 0; 5844 val = 0;
5574 5845
5575 if (intel_crtc->config.dither) 5846 if (IS_HASWELL(dev) && intel_crtc->config.dither)
5576 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP); 5847 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
5577 5848
5578 if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) 5849 if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
@@ -5585,6 +5856,33 @@ static void haswell_set_pipeconf(struct drm_crtc *crtc)
5585 5856
5586 I915_WRITE(GAMMA_MODE(intel_crtc->pipe), GAMMA_MODE_MODE_8BIT); 5857 I915_WRITE(GAMMA_MODE(intel_crtc->pipe), GAMMA_MODE_MODE_8BIT);
5587 POSTING_READ(GAMMA_MODE(intel_crtc->pipe)); 5858 POSTING_READ(GAMMA_MODE(intel_crtc->pipe));
5859
5860 if (IS_BROADWELL(dev)) {
5861 val = 0;
5862
5863 switch (intel_crtc->config.pipe_bpp) {
5864 case 18:
5865 val |= PIPEMISC_DITHER_6_BPC;
5866 break;
5867 case 24:
5868 val |= PIPEMISC_DITHER_8_BPC;
5869 break;
5870 case 30:
5871 val |= PIPEMISC_DITHER_10_BPC;
5872 break;
5873 case 36:
5874 val |= PIPEMISC_DITHER_12_BPC;
5875 break;
5876 default:
5877 /* Case prevented by pipe_config_set_bpp. */
5878 BUG();
5879 }
5880
5881 if (intel_crtc->config.dither)
5882 val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
5883
5884 I915_WRITE(PIPEMISC(pipe), val);
5885 }
5588} 5886}
5589 5887
5590static bool ironlake_compute_clocks(struct drm_crtc *crtc, 5888static bool ironlake_compute_clocks(struct drm_crtc *crtc,
@@ -5819,11 +6117,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
5819 else 6117 else
5820 intel_crtc->lowfreq_avail = false; 6118 intel_crtc->lowfreq_avail = false;
5821 6119
5822 if (intel_crtc->config.has_pch_encoder) {
5823 pll = intel_crtc_to_shared_dpll(intel_crtc);
5824
5825 }
5826
5827 intel_set_pipe_timings(intel_crtc); 6120 intel_set_pipe_timings(intel_crtc);
5828 6121
5829 if (intel_crtc->config.has_pch_encoder) { 6122 if (intel_crtc->config.has_pch_encoder) {
@@ -5839,25 +6132,67 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
5839 6132
5840 ret = intel_pipe_set_base(crtc, x, y, fb); 6133 ret = intel_pipe_set_base(crtc, x, y, fb);
5841 6134
5842 intel_update_watermarks(dev);
5843
5844 return ret; 6135 return ret;
5845} 6136}
5846 6137
5847static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc, 6138static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
5848 struct intel_crtc_config *pipe_config) 6139 struct intel_link_m_n *m_n)
5849{ 6140{
5850 struct drm_device *dev = crtc->base.dev; 6141 struct drm_device *dev = crtc->base.dev;
5851 struct drm_i915_private *dev_priv = dev->dev_private; 6142 struct drm_i915_private *dev_priv = dev->dev_private;
5852 enum transcoder transcoder = pipe_config->cpu_transcoder; 6143 enum pipe pipe = crtc->pipe;
5853 6144
5854 pipe_config->fdi_m_n.link_m = I915_READ(PIPE_LINK_M1(transcoder)); 6145 m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe));
5855 pipe_config->fdi_m_n.link_n = I915_READ(PIPE_LINK_N1(transcoder)); 6146 m_n->link_n = I915_READ(PCH_TRANS_LINK_N1(pipe));
5856 pipe_config->fdi_m_n.gmch_m = I915_READ(PIPE_DATA_M1(transcoder)) 6147 m_n->gmch_m = I915_READ(PCH_TRANS_DATA_M1(pipe))
5857 & ~TU_SIZE_MASK; 6148 & ~TU_SIZE_MASK;
5858 pipe_config->fdi_m_n.gmch_n = I915_READ(PIPE_DATA_N1(transcoder)); 6149 m_n->gmch_n = I915_READ(PCH_TRANS_DATA_N1(pipe));
5859 pipe_config->fdi_m_n.tu = ((I915_READ(PIPE_DATA_M1(transcoder)) 6150 m_n->tu = ((I915_READ(PCH_TRANS_DATA_M1(pipe))
5860 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; 6151 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
6152}
6153
6154static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
6155 enum transcoder transcoder,
6156 struct intel_link_m_n *m_n)
6157{
6158 struct drm_device *dev = crtc->base.dev;
6159 struct drm_i915_private *dev_priv = dev->dev_private;
6160 enum pipe pipe = crtc->pipe;
6161
6162 if (INTEL_INFO(dev)->gen >= 5) {
6163 m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder));
6164 m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder));
6165 m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder))
6166 & ~TU_SIZE_MASK;
6167 m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder));
6168 m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder))
6169 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
6170 } else {
6171 m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe));
6172 m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe));
6173 m_n->gmch_m = I915_READ(PIPE_DATA_M_G4X(pipe))
6174 & ~TU_SIZE_MASK;
6175 m_n->gmch_n = I915_READ(PIPE_DATA_N_G4X(pipe));
6176 m_n->tu = ((I915_READ(PIPE_DATA_M_G4X(pipe))
6177 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
6178 }
6179}
6180
6181void intel_dp_get_m_n(struct intel_crtc *crtc,
6182 struct intel_crtc_config *pipe_config)
6183{
6184 if (crtc->config.has_pch_encoder)
6185 intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
6186 else
6187 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
6188 &pipe_config->dp_m_n);
6189}
6190
6191static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc,
6192 struct intel_crtc_config *pipe_config)
6193{
6194 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
6195 &pipe_config->fdi_m_n);
5861} 6196}
5862 6197
5863static void ironlake_get_pfit_config(struct intel_crtc *crtc, 6198static void ironlake_get_pfit_config(struct intel_crtc *crtc,
@@ -5946,6 +6281,8 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
5946 pipe_config->pixel_multiplier = 6281 pipe_config->pixel_multiplier =
5947 ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK) 6282 ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
5948 >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1; 6283 >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
6284
6285 ironlake_pch_clock_get(crtc, pipe_config);
5949 } else { 6286 } else {
5950 pipe_config->pixel_multiplier = 1; 6287 pipe_config->pixel_multiplier = 1;
5951 } 6288 }
@@ -6002,8 +6339,8 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
6002 * register. Callers should take care of disabling all the display engine 6339 * register. Callers should take care of disabling all the display engine
6003 * functions, doing the mode unset, fixing interrupts, etc. 6340 * functions, doing the mode unset, fixing interrupts, etc.
6004 */ 6341 */
6005void hsw_disable_lcpll(struct drm_i915_private *dev_priv, 6342static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
6006 bool switch_to_fclk, bool allow_power_down) 6343 bool switch_to_fclk, bool allow_power_down)
6007{ 6344{
6008 uint32_t val; 6345 uint32_t val;
6009 6346
@@ -6031,7 +6368,10 @@ void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
6031 6368
6032 val = I915_READ(D_COMP); 6369 val = I915_READ(D_COMP);
6033 val |= D_COMP_COMP_DISABLE; 6370 val |= D_COMP_COMP_DISABLE;
6034 I915_WRITE(D_COMP, val); 6371 mutex_lock(&dev_priv->rps.hw_lock);
6372 if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP, val))
6373 DRM_ERROR("Failed to disable D_COMP\n");
6374 mutex_unlock(&dev_priv->rps.hw_lock);
6035 POSTING_READ(D_COMP); 6375 POSTING_READ(D_COMP);
6036 ndelay(100); 6376 ndelay(100);
6037 6377
@@ -6050,7 +6390,7 @@ void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
6050 * Fully restores LCPLL, disallowing power down and switching back to LCPLL 6390 * Fully restores LCPLL, disallowing power down and switching back to LCPLL
6051 * source. 6391 * source.
6052 */ 6392 */
6053void hsw_restore_lcpll(struct drm_i915_private *dev_priv) 6393static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
6054{ 6394{
6055 uint32_t val; 6395 uint32_t val;
6056 6396
@@ -6073,7 +6413,10 @@ void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
6073 val = I915_READ(D_COMP); 6413 val = I915_READ(D_COMP);
6074 val |= D_COMP_COMP_FORCE; 6414 val |= D_COMP_COMP_FORCE;
6075 val &= ~D_COMP_COMP_DISABLE; 6415 val &= ~D_COMP_COMP_DISABLE;
6076 I915_WRITE(D_COMP, val); 6416 mutex_lock(&dev_priv->rps.hw_lock);
6417 if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP, val))
6418 DRM_ERROR("Failed to enable D_COMP\n");
6419 mutex_unlock(&dev_priv->rps.hw_lock);
6077 POSTING_READ(D_COMP); 6420 POSTING_READ(D_COMP);
6078 6421
6079 val = I915_READ(LCPLL_CTL); 6422 val = I915_READ(LCPLL_CTL);
@@ -6256,22 +6599,79 @@ static void hsw_package_c8_gpu_busy(struct drm_i915_private *dev_priv)
6256 } 6599 }
6257} 6600}
6258 6601
6259static void haswell_modeset_global_resources(struct drm_device *dev) 6602#define for_each_power_domain(domain, mask) \
6603 for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \
6604 if ((1 << (domain)) & (mask))
6605
6606static unsigned long get_pipe_power_domains(struct drm_device *dev,
6607 enum pipe pipe, bool pfit_enabled)
6260{ 6608{
6261 bool enable = false; 6609 unsigned long mask;
6610 enum transcoder transcoder;
6611
6612 transcoder = intel_pipe_to_cpu_transcoder(dev->dev_private, pipe);
6613
6614 mask = BIT(POWER_DOMAIN_PIPE(pipe));
6615 mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder));
6616 if (pfit_enabled)
6617 mask |= BIT(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
6618
6619 return mask;
6620}
6621
6622void intel_display_set_init_power(struct drm_device *dev, bool enable)
6623{
6624 struct drm_i915_private *dev_priv = dev->dev_private;
6625
6626 if (dev_priv->power_domains.init_power_on == enable)
6627 return;
6628
6629 if (enable)
6630 intel_display_power_get(dev, POWER_DOMAIN_INIT);
6631 else
6632 intel_display_power_put(dev, POWER_DOMAIN_INIT);
6633
6634 dev_priv->power_domains.init_power_on = enable;
6635}
6636
6637static void modeset_update_power_wells(struct drm_device *dev)
6638{
6639 unsigned long pipe_domains[I915_MAX_PIPES] = { 0, };
6262 struct intel_crtc *crtc; 6640 struct intel_crtc *crtc;
6263 6641
6642 /*
6643 * First get all needed power domains, then put all unneeded, to avoid
6644 * any unnecessary toggling of the power wells.
6645 */
6264 list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) { 6646 list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
6647 enum intel_display_power_domain domain;
6648
6265 if (!crtc->base.enabled) 6649 if (!crtc->base.enabled)
6266 continue; 6650 continue;
6267 6651
6268 if (crtc->pipe != PIPE_A || crtc->config.pch_pfit.enabled || 6652 pipe_domains[crtc->pipe] = get_pipe_power_domains(dev,
6269 crtc->config.cpu_transcoder != TRANSCODER_EDP) 6653 crtc->pipe,
6270 enable = true; 6654 crtc->config.pch_pfit.enabled);
6655
6656 for_each_power_domain(domain, pipe_domains[crtc->pipe])
6657 intel_display_power_get(dev, domain);
6271 } 6658 }
6272 6659
6273 intel_set_power_well(dev, enable); 6660 list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
6661 enum intel_display_power_domain domain;
6662
6663 for_each_power_domain(domain, crtc->enabled_power_domains)
6664 intel_display_power_put(dev, domain);
6274 6665
6666 crtc->enabled_power_domains = pipe_domains[crtc->pipe];
6667 }
6668
6669 intel_display_set_init_power(dev, false);
6670}
6671
6672static void haswell_modeset_global_resources(struct drm_device *dev)
6673{
6674 modeset_update_power_wells(dev);
6275 hsw_update_package_c8(dev); 6675 hsw_update_package_c8(dev);
6276} 6676}
6277 6677
@@ -6310,8 +6710,6 @@ static int haswell_crtc_mode_set(struct drm_crtc *crtc,
6310 6710
6311 ret = intel_pipe_set_base(crtc, x, y, fb); 6711 ret = intel_pipe_set_base(crtc, x, y, fb);
6312 6712
6313 intel_update_watermarks(dev);
6314
6315 return ret; 6713 return ret;
6316} 6714}
6317 6715
@@ -6419,6 +6817,44 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
6419 return 0; 6817 return 0;
6420} 6818}
6421 6819
6820static struct {
6821 int clock;
6822 u32 config;
6823} hdmi_audio_clock[] = {
6824 { DIV_ROUND_UP(25200 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_25175 },
6825 { 25200, AUD_CONFIG_PIXEL_CLOCK_HDMI_25200 }, /* default per bspec */
6826 { 27000, AUD_CONFIG_PIXEL_CLOCK_HDMI_27000 },
6827 { 27000 * 1001 / 1000, AUD_CONFIG_PIXEL_CLOCK_HDMI_27027 },
6828 { 54000, AUD_CONFIG_PIXEL_CLOCK_HDMI_54000 },
6829 { 54000 * 1001 / 1000, AUD_CONFIG_PIXEL_CLOCK_HDMI_54054 },
6830 { DIV_ROUND_UP(74250 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_74176 },
6831 { 74250, AUD_CONFIG_PIXEL_CLOCK_HDMI_74250 },
6832 { DIV_ROUND_UP(148500 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_148352 },
6833 { 148500, AUD_CONFIG_PIXEL_CLOCK_HDMI_148500 },
6834};
6835
6836/* get AUD_CONFIG_PIXEL_CLOCK_HDMI_* value for mode */
6837static u32 audio_config_hdmi_pixel_clock(struct drm_display_mode *mode)
6838{
6839 int i;
6840
6841 for (i = 0; i < ARRAY_SIZE(hdmi_audio_clock); i++) {
6842 if (mode->clock == hdmi_audio_clock[i].clock)
6843 break;
6844 }
6845
6846 if (i == ARRAY_SIZE(hdmi_audio_clock)) {
6847 DRM_DEBUG_KMS("HDMI audio pixel clock setting for %d not found, falling back to defaults\n", mode->clock);
6848 i = 1;
6849 }
6850
6851 DRM_DEBUG_KMS("Configuring HDMI audio for pixel clock %d (0x%08x)\n",
6852 hdmi_audio_clock[i].clock,
6853 hdmi_audio_clock[i].config);
6854
6855 return hdmi_audio_clock[i].config;
6856}
6857
6422static bool intel_eld_uptodate(struct drm_connector *connector, 6858static bool intel_eld_uptodate(struct drm_connector *connector,
6423 int reg_eldv, uint32_t bits_eldv, 6859 int reg_eldv, uint32_t bits_eldv,
6424 int reg_elda, uint32_t bits_elda, 6860 int reg_elda, uint32_t bits_elda,
@@ -6449,7 +6885,8 @@ static bool intel_eld_uptodate(struct drm_connector *connector,
6449} 6885}
6450 6886
6451static void g4x_write_eld(struct drm_connector *connector, 6887static void g4x_write_eld(struct drm_connector *connector,
6452 struct drm_crtc *crtc) 6888 struct drm_crtc *crtc,
6889 struct drm_display_mode *mode)
6453{ 6890{
6454 struct drm_i915_private *dev_priv = connector->dev->dev_private; 6891 struct drm_i915_private *dev_priv = connector->dev->dev_private;
6455 uint8_t *eld = connector->eld; 6892 uint8_t *eld = connector->eld;
@@ -6489,7 +6926,8 @@ static void g4x_write_eld(struct drm_connector *connector,
6489} 6926}
6490 6927
6491static void haswell_write_eld(struct drm_connector *connector, 6928static void haswell_write_eld(struct drm_connector *connector,
6492 struct drm_crtc *crtc) 6929 struct drm_crtc *crtc,
6930 struct drm_display_mode *mode)
6493{ 6931{
6494 struct drm_i915_private *dev_priv = connector->dev->dev_private; 6932 struct drm_i915_private *dev_priv = connector->dev->dev_private;
6495 uint8_t *eld = connector->eld; 6933 uint8_t *eld = connector->eld;
@@ -6542,8 +6980,9 @@ static void haswell_write_eld(struct drm_connector *connector,
6542 DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n"); 6980 DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
6543 eld[5] |= (1 << 2); /* Conn_Type, 0x1 = DisplayPort */ 6981 eld[5] |= (1 << 2); /* Conn_Type, 0x1 = DisplayPort */
6544 I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */ 6982 I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */
6545 } else 6983 } else {
6546 I915_WRITE(aud_config, 0); 6984 I915_WRITE(aud_config, audio_config_hdmi_pixel_clock(mode));
6985 }
6547 6986
6548 if (intel_eld_uptodate(connector, 6987 if (intel_eld_uptodate(connector,
6549 aud_cntrl_st2, eldv, 6988 aud_cntrl_st2, eldv,
@@ -6576,7 +7015,8 @@ static void haswell_write_eld(struct drm_connector *connector,
6576} 7015}
6577 7016
6578static void ironlake_write_eld(struct drm_connector *connector, 7017static void ironlake_write_eld(struct drm_connector *connector,
6579 struct drm_crtc *crtc) 7018 struct drm_crtc *crtc,
7019 struct drm_display_mode *mode)
6580{ 7020{
6581 struct drm_i915_private *dev_priv = connector->dev->dev_private; 7021 struct drm_i915_private *dev_priv = connector->dev->dev_private;
6582 uint8_t *eld = connector->eld; 7022 uint8_t *eld = connector->eld;
@@ -6594,6 +7034,11 @@ static void ironlake_write_eld(struct drm_connector *connector,
6594 aud_config = IBX_AUD_CFG(pipe); 7034 aud_config = IBX_AUD_CFG(pipe);
6595 aud_cntl_st = IBX_AUD_CNTL_ST(pipe); 7035 aud_cntl_st = IBX_AUD_CNTL_ST(pipe);
6596 aud_cntrl_st2 = IBX_AUD_CNTL_ST2; 7036 aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
7037 } else if (IS_VALLEYVIEW(connector->dev)) {
7038 hdmiw_hdmiedid = VLV_HDMIW_HDMIEDID(pipe);
7039 aud_config = VLV_AUD_CFG(pipe);
7040 aud_cntl_st = VLV_AUD_CNTL_ST(pipe);
7041 aud_cntrl_st2 = VLV_AUD_CNTL_ST2;
6597 } else { 7042 } else {
6598 hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID(pipe); 7043 hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID(pipe);
6599 aud_config = CPT_AUD_CFG(pipe); 7044 aud_config = CPT_AUD_CFG(pipe);
@@ -6603,8 +7048,19 @@ static void ironlake_write_eld(struct drm_connector *connector,
6603 7048
6604 DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe)); 7049 DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe));
6605 7050
6606 i = I915_READ(aud_cntl_st); 7051 if (IS_VALLEYVIEW(connector->dev)) {
6607 i = (i >> 29) & DIP_PORT_SEL_MASK; /* DIP_Port_Select, 0x1 = PortB */ 7052 struct intel_encoder *intel_encoder;
7053 struct intel_digital_port *intel_dig_port;
7054
7055 intel_encoder = intel_attached_encoder(connector);
7056 intel_dig_port = enc_to_dig_port(&intel_encoder->base);
7057 i = intel_dig_port->port;
7058 } else {
7059 i = I915_READ(aud_cntl_st);
7060 i = (i >> 29) & DIP_PORT_SEL_MASK;
7061 /* DIP_Port_Select, 0x1 = PortB */
7062 }
7063
6608 if (!i) { 7064 if (!i) {
6609 DRM_DEBUG_DRIVER("Audio directed to unknown port\n"); 7065 DRM_DEBUG_DRIVER("Audio directed to unknown port\n");
6610 /* operate blindly on all ports */ 7066 /* operate blindly on all ports */
@@ -6620,8 +7076,9 @@ static void ironlake_write_eld(struct drm_connector *connector,
6620 DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n"); 7076 DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
6621 eld[5] |= (1 << 2); /* Conn_Type, 0x1 = DisplayPort */ 7077 eld[5] |= (1 << 2); /* Conn_Type, 0x1 = DisplayPort */
6622 I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */ 7078 I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */
6623 } else 7079 } else {
6624 I915_WRITE(aud_config, 0); 7080 I915_WRITE(aud_config, audio_config_hdmi_pixel_clock(mode));
7081 }
6625 7082
6626 if (intel_eld_uptodate(connector, 7083 if (intel_eld_uptodate(connector,
6627 aud_cntrl_st2, eldv, 7084 aud_cntrl_st2, eldv,
@@ -6671,50 +7128,7 @@ void intel_write_eld(struct drm_encoder *encoder,
6671 connector->eld[6] = drm_av_sync_delay(connector, mode) / 2; 7128 connector->eld[6] = drm_av_sync_delay(connector, mode) / 2;
6672 7129
6673 if (dev_priv->display.write_eld) 7130 if (dev_priv->display.write_eld)
6674 dev_priv->display.write_eld(connector, crtc); 7131 dev_priv->display.write_eld(connector, crtc, mode);
6675}
6676
6677/** Loads the palette/gamma unit for the CRTC with the prepared values */
6678void intel_crtc_load_lut(struct drm_crtc *crtc)
6679{
6680 struct drm_device *dev = crtc->dev;
6681 struct drm_i915_private *dev_priv = dev->dev_private;
6682 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6683 enum pipe pipe = intel_crtc->pipe;
6684 int palreg = PALETTE(pipe);
6685 int i;
6686 bool reenable_ips = false;
6687
6688 /* The clocks have to be on to load the palette. */
6689 if (!crtc->enabled || !intel_crtc->active)
6690 return;
6691
6692 if (!HAS_PCH_SPLIT(dev_priv->dev))
6693 assert_pll_enabled(dev_priv, pipe);
6694
6695 /* use legacy palette for Ironlake */
6696 if (HAS_PCH_SPLIT(dev))
6697 palreg = LGC_PALETTE(pipe);
6698
6699 /* Workaround : Do not read or write the pipe palette/gamma data while
6700 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
6701 */
6702 if (intel_crtc->config.ips_enabled &&
6703 ((I915_READ(GAMMA_MODE(pipe)) & GAMMA_MODE_MODE_MASK) ==
6704 GAMMA_MODE_MODE_SPLIT)) {
6705 hsw_disable_ips(intel_crtc);
6706 reenable_ips = true;
6707 }
6708
6709 for (i = 0; i < 256; i++) {
6710 I915_WRITE(palreg + 4 * i,
6711 (intel_crtc->lut_r[i] << 16) |
6712 (intel_crtc->lut_g[i] << 8) |
6713 intel_crtc->lut_b[i]);
6714 }
6715
6716 if (reenable_ips)
6717 hsw_enable_ips(intel_crtc);
6718} 7132}
6719 7133
6720static void i845_update_cursor(struct drm_crtc *crtc, u32 base) 7134static void i845_update_cursor(struct drm_crtc *crtc, u32 base)
@@ -6790,7 +7204,7 @@ static void ivb_update_cursor(struct drm_crtc *crtc, u32 base)
6790 cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE); 7204 cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
6791 cntl |= CURSOR_MODE_DISABLE; 7205 cntl |= CURSOR_MODE_DISABLE;
6792 } 7206 }
6793 if (IS_HASWELL(dev)) { 7207 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
6794 cntl |= CURSOR_PIPE_CSC_ENABLE; 7208 cntl |= CURSOR_PIPE_CSC_ENABLE;
6795 cntl &= ~CURSOR_TRICKLE_FEED_DISABLE; 7209 cntl &= ~CURSOR_TRICKLE_FEED_DISABLE;
6796 } 7210 }
@@ -6812,23 +7226,20 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
6812 int pipe = intel_crtc->pipe; 7226 int pipe = intel_crtc->pipe;
6813 int x = intel_crtc->cursor_x; 7227 int x = intel_crtc->cursor_x;
6814 int y = intel_crtc->cursor_y; 7228 int y = intel_crtc->cursor_y;
6815 u32 base, pos; 7229 u32 base = 0, pos = 0;
6816 bool visible; 7230 bool visible;
6817 7231
6818 pos = 0; 7232 if (on)
6819
6820 if (on && crtc->enabled && crtc->fb) {
6821 base = intel_crtc->cursor_addr; 7233 base = intel_crtc->cursor_addr;
6822 if (x > (int) crtc->fb->width)
6823 base = 0;
6824 7234
6825 if (y > (int) crtc->fb->height) 7235 if (x >= intel_crtc->config.pipe_src_w)
6826 base = 0; 7236 base = 0;
6827 } else 7237
7238 if (y >= intel_crtc->config.pipe_src_h)
6828 base = 0; 7239 base = 0;
6829 7240
6830 if (x < 0) { 7241 if (x < 0) {
6831 if (x + intel_crtc->cursor_width < 0) 7242 if (x + intel_crtc->cursor_width <= 0)
6832 base = 0; 7243 base = 0;
6833 7244
6834 pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT; 7245 pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
@@ -6837,7 +7248,7 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
6837 pos |= x << CURSOR_X_SHIFT; 7248 pos |= x << CURSOR_X_SHIFT;
6838 7249
6839 if (y < 0) { 7250 if (y < 0) {
6840 if (y + intel_crtc->cursor_height < 0) 7251 if (y + intel_crtc->cursor_height <= 0)
6841 base = 0; 7252 base = 0;
6842 7253
6843 pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT; 7254 pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
@@ -6849,7 +7260,7 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
6849 if (!visible && !intel_crtc->cursor_visible) 7260 if (!visible && !intel_crtc->cursor_visible)
6850 return; 7261 return;
6851 7262
6852 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) { 7263 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev) || IS_BROADWELL(dev)) {
6853 I915_WRITE(CURPOS_IVB(pipe), pos); 7264 I915_WRITE(CURPOS_IVB(pipe), pos);
6854 ivb_update_cursor(crtc, base); 7265 ivb_update_cursor(crtc, base);
6855 } else { 7266 } else {
@@ -6980,8 +7391,8 @@ static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
6980{ 7391{
6981 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 7392 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6982 7393
6983 intel_crtc->cursor_x = x; 7394 intel_crtc->cursor_x = clamp_t(int, x, SHRT_MIN, SHRT_MAX);
6984 intel_crtc->cursor_y = y; 7395 intel_crtc->cursor_y = clamp_t(int, y, SHRT_MIN, SHRT_MAX);
6985 7396
6986 if (intel_crtc->active) 7397 if (intel_crtc->active)
6987 intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL); 7398 intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL);
@@ -6989,27 +7400,6 @@ static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
6989 return 0; 7400 return 0;
6990} 7401}
6991 7402
6992/** Sets the color ramps on behalf of RandR */
6993void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
6994 u16 blue, int regno)
6995{
6996 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6997
6998 intel_crtc->lut_r[regno] = red >> 8;
6999 intel_crtc->lut_g[regno] = green >> 8;
7000 intel_crtc->lut_b[regno] = blue >> 8;
7001}
7002
7003void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
7004 u16 *blue, int regno)
7005{
7006 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7007
7008 *red = intel_crtc->lut_r[regno] << 8;
7009 *green = intel_crtc->lut_g[regno] << 8;
7010 *blue = intel_crtc->lut_b[regno] << 8;
7011}
7012
7013static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, 7403static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
7014 u16 *blue, uint32_t start, uint32_t size) 7404 u16 *blue, uint32_t start, uint32_t size)
7015{ 7405{
@@ -7045,14 +7435,21 @@ intel_framebuffer_create(struct drm_device *dev,
7045 return ERR_PTR(-ENOMEM); 7435 return ERR_PTR(-ENOMEM);
7046 } 7436 }
7047 7437
7438 ret = i915_mutex_lock_interruptible(dev);
7439 if (ret)
7440 goto err;
7441
7048 ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj); 7442 ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj);
7049 if (ret) { 7443 mutex_unlock(&dev->struct_mutex);
7050 drm_gem_object_unreference_unlocked(&obj->base); 7444 if (ret)
7051 kfree(intel_fb); 7445 goto err;
7052 return ERR_PTR(ret);
7053 }
7054 7446
7055 return &intel_fb->base; 7447 return &intel_fb->base;
7448err:
7449 drm_gem_object_unreference_unlocked(&obj->base);
7450 kfree(intel_fb);
7451
7452 return ERR_PTR(ret);
7056} 7453}
7057 7454
7058static u32 7455static u32
@@ -7095,6 +7492,7 @@ static struct drm_framebuffer *
7095mode_fits_in_fbdev(struct drm_device *dev, 7492mode_fits_in_fbdev(struct drm_device *dev,
7096 struct drm_display_mode *mode) 7493 struct drm_display_mode *mode)
7097{ 7494{
7495#ifdef CONFIG_DRM_I915_FBDEV
7098 struct drm_i915_private *dev_priv = dev->dev_private; 7496 struct drm_i915_private *dev_priv = dev->dev_private;
7099 struct drm_i915_gem_object *obj; 7497 struct drm_i915_gem_object *obj;
7100 struct drm_framebuffer *fb; 7498 struct drm_framebuffer *fb;
@@ -7115,6 +7513,9 @@ mode_fits_in_fbdev(struct drm_device *dev,
7115 return NULL; 7513 return NULL;
7116 7514
7117 return fb; 7515 return fb;
7516#else
7517 return NULL;
7518#endif
7118} 7519}
7119 7520
7120bool intel_get_load_detect_pipe(struct drm_connector *connector, 7521bool intel_get_load_detect_pipe(struct drm_connector *connector,
@@ -7258,6 +7659,22 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
7258 mutex_unlock(&crtc->mutex); 7659 mutex_unlock(&crtc->mutex);
7259} 7660}
7260 7661
7662static int i9xx_pll_refclk(struct drm_device *dev,
7663 const struct intel_crtc_config *pipe_config)
7664{
7665 struct drm_i915_private *dev_priv = dev->dev_private;
7666 u32 dpll = pipe_config->dpll_hw_state.dpll;
7667
7668 if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
7669 return dev_priv->vbt.lvds_ssc_freq * 1000;
7670 else if (HAS_PCH_SPLIT(dev))
7671 return 120000;
7672 else if (!IS_GEN2(dev))
7673 return 96000;
7674 else
7675 return 48000;
7676}
7677
7261/* Returns the clock of the currently programmed mode of the given pipe. */ 7678/* Returns the clock of the currently programmed mode of the given pipe. */
7262static void i9xx_crtc_clock_get(struct intel_crtc *crtc, 7679static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
7263 struct intel_crtc_config *pipe_config) 7680 struct intel_crtc_config *pipe_config)
@@ -7265,14 +7682,15 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
7265 struct drm_device *dev = crtc->base.dev; 7682 struct drm_device *dev = crtc->base.dev;
7266 struct drm_i915_private *dev_priv = dev->dev_private; 7683 struct drm_i915_private *dev_priv = dev->dev_private;
7267 int pipe = pipe_config->cpu_transcoder; 7684 int pipe = pipe_config->cpu_transcoder;
7268 u32 dpll = I915_READ(DPLL(pipe)); 7685 u32 dpll = pipe_config->dpll_hw_state.dpll;
7269 u32 fp; 7686 u32 fp;
7270 intel_clock_t clock; 7687 intel_clock_t clock;
7688 int refclk = i9xx_pll_refclk(dev, pipe_config);
7271 7689
7272 if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0) 7690 if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
7273 fp = I915_READ(FP0(pipe)); 7691 fp = pipe_config->dpll_hw_state.fp0;
7274 else 7692 else
7275 fp = I915_READ(FP1(pipe)); 7693 fp = pipe_config->dpll_hw_state.fp1;
7276 7694
7277 clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT; 7695 clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
7278 if (IS_PINEVIEW(dev)) { 7696 if (IS_PINEVIEW(dev)) {
@@ -7303,14 +7721,13 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
7303 default: 7721 default:
7304 DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed " 7722 DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
7305 "mode\n", (int)(dpll & DPLL_MODE_MASK)); 7723 "mode\n", (int)(dpll & DPLL_MODE_MASK));
7306 pipe_config->adjusted_mode.clock = 0;
7307 return; 7724 return;
7308 } 7725 }
7309 7726
7310 if (IS_PINEVIEW(dev)) 7727 if (IS_PINEVIEW(dev))
7311 pineview_clock(96000, &clock); 7728 pineview_clock(refclk, &clock);
7312 else 7729 else
7313 i9xx_clock(96000, &clock); 7730 i9xx_clock(refclk, &clock);
7314 } else { 7731 } else {
7315 bool is_lvds = (pipe == 1) && (I915_READ(LVDS) & LVDS_PORT_EN); 7732 bool is_lvds = (pipe == 1) && (I915_READ(LVDS) & LVDS_PORT_EN);
7316 7733
@@ -7318,13 +7735,6 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
7318 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >> 7735 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
7319 DPLL_FPA01_P1_POST_DIV_SHIFT); 7736 DPLL_FPA01_P1_POST_DIV_SHIFT);
7320 clock.p2 = 14; 7737 clock.p2 = 14;
7321
7322 if ((dpll & PLL_REF_INPUT_MASK) ==
7323 PLLB_REF_INPUT_SPREADSPECTRUMIN) {
7324 /* XXX: might not be 66MHz */
7325 i9xx_clock(66000, &clock);
7326 } else
7327 i9xx_clock(48000, &clock);
7328 } else { 7738 } else {
7329 if (dpll & PLL_P1_DIVIDE_BY_TWO) 7739 if (dpll & PLL_P1_DIVIDE_BY_TWO)
7330 clock.p1 = 2; 7740 clock.p1 = 2;
@@ -7336,59 +7746,55 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
7336 clock.p2 = 4; 7746 clock.p2 = 4;
7337 else 7747 else
7338 clock.p2 = 2; 7748 clock.p2 = 2;
7339
7340 i9xx_clock(48000, &clock);
7341 } 7749 }
7750
7751 i9xx_clock(refclk, &clock);
7342 } 7752 }
7343 7753
7344 pipe_config->adjusted_mode.clock = clock.dot; 7754 /*
7755 * This value includes pixel_multiplier. We will use
7756 * port_clock to compute adjusted_mode.crtc_clock in the
7757 * encoder's get_config() function.
7758 */
7759 pipe_config->port_clock = clock.dot;
7345} 7760}
7346 7761
7347static void ironlake_crtc_clock_get(struct intel_crtc *crtc, 7762int intel_dotclock_calculate(int link_freq,
7348 struct intel_crtc_config *pipe_config) 7763 const struct intel_link_m_n *m_n)
7349{ 7764{
7350 struct drm_device *dev = crtc->base.dev;
7351 struct drm_i915_private *dev_priv = dev->dev_private;
7352 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
7353 int link_freq, repeat;
7354 u64 clock;
7355 u32 link_m, link_n;
7356
7357 repeat = pipe_config->pixel_multiplier;
7358
7359 /* 7765 /*
7360 * The calculation for the data clock is: 7766 * The calculation for the data clock is:
7361 * pixel_clock = ((m/n)*(link_clock * nr_lanes * repeat))/bpp 7767 * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
7362 * But we want to avoid losing precison if possible, so: 7768 * But we want to avoid losing precison if possible, so:
7363 * pixel_clock = ((m * link_clock * nr_lanes * repeat)/(n*bpp)) 7769 * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
7364 * 7770 *
7365 * and the link clock is simpler: 7771 * and the link clock is simpler:
7366 * link_clock = (m * link_clock * repeat) / n 7772 * link_clock = (m * link_clock) / n
7367 */ 7773 */
7368 7774
7369 /* 7775 if (!m_n->link_n)
7370 * We need to get the FDI or DP link clock here to derive 7776 return 0;
7371 * the M/N dividers.
7372 *
7373 * For FDI, we read it from the BIOS or use a fixed 2.7GHz.
7374 * For DP, it's either 1.62GHz or 2.7GHz.
7375 * We do our calculations in 10*MHz since we don't need much precison.
7376 */
7377 if (pipe_config->has_pch_encoder)
7378 link_freq = intel_fdi_link_freq(dev) * 10000;
7379 else
7380 link_freq = pipe_config->port_clock;
7381 7777
7382 link_m = I915_READ(PIPE_LINK_M1(cpu_transcoder)); 7778 return div_u64((u64)m_n->link_m * link_freq, m_n->link_n);
7383 link_n = I915_READ(PIPE_LINK_N1(cpu_transcoder)); 7779}
7384 7780
7385 if (!link_m || !link_n) 7781static void ironlake_pch_clock_get(struct intel_crtc *crtc,
7386 return; 7782 struct intel_crtc_config *pipe_config)
7783{
7784 struct drm_device *dev = crtc->base.dev;
7387 7785
7388 clock = ((u64)link_m * (u64)link_freq * (u64)repeat); 7786 /* read out port_clock from the DPLL */
7389 do_div(clock, link_n); 7787 i9xx_crtc_clock_get(crtc, pipe_config);
7390 7788
7391 pipe_config->adjusted_mode.clock = clock; 7789 /*
7790 * This value does not include pixel_multiplier.
7791 * We will check that port_clock and adjusted_mode.crtc_clock
7792 * agree once we know their relationship in the encoder's
7793 * get_config() function.
7794 */
7795 pipe_config->adjusted_mode.crtc_clock =
7796 intel_dotclock_calculate(intel_fdi_link_freq(dev) * 10000,
7797 &pipe_config->fdi_m_n);
7392} 7798}
7393 7799
7394/** Returns the currently programmed mode of the given pipe. */ 7800/** Returns the currently programmed mode of the given pipe. */
@@ -7404,6 +7810,7 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
7404 int hsync = I915_READ(HSYNC(cpu_transcoder)); 7810 int hsync = I915_READ(HSYNC(cpu_transcoder));
7405 int vtot = I915_READ(VTOTAL(cpu_transcoder)); 7811 int vtot = I915_READ(VTOTAL(cpu_transcoder));
7406 int vsync = I915_READ(VSYNC(cpu_transcoder)); 7812 int vsync = I915_READ(VSYNC(cpu_transcoder));
7813 enum pipe pipe = intel_crtc->pipe;
7407 7814
7408 mode = kzalloc(sizeof(*mode), GFP_KERNEL); 7815 mode = kzalloc(sizeof(*mode), GFP_KERNEL);
7409 if (!mode) 7816 if (!mode)
@@ -7416,11 +7823,14 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
7416 * Note, if LVDS ever uses a non-1 pixel multiplier, we'll need 7823 * Note, if LVDS ever uses a non-1 pixel multiplier, we'll need
7417 * to use a real value here instead. 7824 * to use a real value here instead.
7418 */ 7825 */
7419 pipe_config.cpu_transcoder = (enum transcoder) intel_crtc->pipe; 7826 pipe_config.cpu_transcoder = (enum transcoder) pipe;
7420 pipe_config.pixel_multiplier = 1; 7827 pipe_config.pixel_multiplier = 1;
7828 pipe_config.dpll_hw_state.dpll = I915_READ(DPLL(pipe));
7829 pipe_config.dpll_hw_state.fp0 = I915_READ(FP0(pipe));
7830 pipe_config.dpll_hw_state.fp1 = I915_READ(FP1(pipe));
7421 i9xx_crtc_clock_get(intel_crtc, &pipe_config); 7831 i9xx_crtc_clock_get(intel_crtc, &pipe_config);
7422 7832
7423 mode->clock = pipe_config.adjusted_mode.clock; 7833 mode->clock = pipe_config.port_clock / pipe_config.pixel_multiplier;
7424 mode->hdisplay = (htot & 0xffff) + 1; 7834 mode->hdisplay = (htot & 0xffff) + 1;
7425 mode->htotal = ((htot & 0xffff0000) >> 16) + 1; 7835 mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
7426 mode->hsync_start = (hsync & 0xffff) + 1; 7836 mode->hsync_start = (hsync & 0xffff) + 1;
@@ -7526,6 +7936,9 @@ void intel_mark_idle(struct drm_device *dev)
7526 7936
7527 intel_decrease_pllclock(crtc); 7937 intel_decrease_pllclock(crtc);
7528 } 7938 }
7939
7940 if (dev_priv->info->gen >= 6)
7941 gen6_rps_idle(dev->dev_private);
7529} 7942}
7530 7943
7531void intel_mark_fb_busy(struct drm_i915_gem_object *obj, 7944void intel_mark_fb_busy(struct drm_i915_gem_object *obj,
@@ -7714,7 +8127,7 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
7714 intel_ring_emit(ring, 0); /* aux display base address, unused */ 8127 intel_ring_emit(ring, 0); /* aux display base address, unused */
7715 8128
7716 intel_mark_page_flip_active(intel_crtc); 8129 intel_mark_page_flip_active(intel_crtc);
7717 intel_ring_advance(ring); 8130 __intel_ring_advance(ring);
7718 return 0; 8131 return 0;
7719 8132
7720err_unpin: 8133err_unpin:
@@ -7756,7 +8169,7 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
7756 intel_ring_emit(ring, MI_NOOP); 8169 intel_ring_emit(ring, MI_NOOP);
7757 8170
7758 intel_mark_page_flip_active(intel_crtc); 8171 intel_mark_page_flip_active(intel_crtc);
7759 intel_ring_advance(ring); 8172 __intel_ring_advance(ring);
7760 return 0; 8173 return 0;
7761 8174
7762err_unpin: 8175err_unpin:
@@ -7805,7 +8218,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
7805 intel_ring_emit(ring, pf | pipesrc); 8218 intel_ring_emit(ring, pf | pipesrc);
7806 8219
7807 intel_mark_page_flip_active(intel_crtc); 8220 intel_mark_page_flip_active(intel_crtc);
7808 intel_ring_advance(ring); 8221 __intel_ring_advance(ring);
7809 return 0; 8222 return 0;
7810 8223
7811err_unpin: 8224err_unpin:
@@ -7850,7 +8263,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
7850 intel_ring_emit(ring, pf | pipesrc); 8263 intel_ring_emit(ring, pf | pipesrc);
7851 8264
7852 intel_mark_page_flip_active(intel_crtc); 8265 intel_mark_page_flip_active(intel_crtc);
7853 intel_ring_advance(ring); 8266 __intel_ring_advance(ring);
7854 return 0; 8267 return 0;
7855 8268
7856err_unpin: 8269err_unpin:
@@ -7929,7 +8342,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
7929 intel_ring_emit(ring, (MI_NOOP)); 8342 intel_ring_emit(ring, (MI_NOOP));
7930 8343
7931 intel_mark_page_flip_active(intel_crtc); 8344 intel_mark_page_flip_active(intel_crtc);
7932 intel_ring_advance(ring); 8345 __intel_ring_advance(ring);
7933 return 0; 8346 return 0;
7934 8347
7935err_unpin: 8348err_unpin:
@@ -7974,7 +8387,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
7974 fb->pitches[0] != crtc->fb->pitches[0])) 8387 fb->pitches[0] != crtc->fb->pitches[0]))
7975 return -EINVAL; 8388 return -EINVAL;
7976 8389
7977 work = kzalloc(sizeof *work, GFP_KERNEL); 8390 work = kzalloc(sizeof(*work), GFP_KERNEL);
7978 if (work == NULL) 8391 if (work == NULL)
7979 return -ENOMEM; 8392 return -ENOMEM;
7980 8393
@@ -8209,6 +8622,17 @@ compute_baseline_pipe_bpp(struct intel_crtc *crtc,
8209 return bpp; 8622 return bpp;
8210} 8623}
8211 8624
8625static void intel_dump_crtc_timings(const struct drm_display_mode *mode)
8626{
8627 DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, "
8628 "type: 0x%x flags: 0x%x\n",
8629 mode->crtc_clock,
8630 mode->crtc_hdisplay, mode->crtc_hsync_start,
8631 mode->crtc_hsync_end, mode->crtc_htotal,
8632 mode->crtc_vdisplay, mode->crtc_vsync_start,
8633 mode->crtc_vsync_end, mode->crtc_vtotal, mode->type, mode->flags);
8634}
8635
8212static void intel_dump_pipe_config(struct intel_crtc *crtc, 8636static void intel_dump_pipe_config(struct intel_crtc *crtc,
8213 struct intel_crtc_config *pipe_config, 8637 struct intel_crtc_config *pipe_config,
8214 const char *context) 8638 const char *context)
@@ -8225,10 +8649,19 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
8225 pipe_config->fdi_m_n.gmch_m, pipe_config->fdi_m_n.gmch_n, 8649 pipe_config->fdi_m_n.gmch_m, pipe_config->fdi_m_n.gmch_n,
8226 pipe_config->fdi_m_n.link_m, pipe_config->fdi_m_n.link_n, 8650 pipe_config->fdi_m_n.link_m, pipe_config->fdi_m_n.link_n,
8227 pipe_config->fdi_m_n.tu); 8651 pipe_config->fdi_m_n.tu);
8652 DRM_DEBUG_KMS("dp: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
8653 pipe_config->has_dp_encoder,
8654 pipe_config->dp_m_n.gmch_m, pipe_config->dp_m_n.gmch_n,
8655 pipe_config->dp_m_n.link_m, pipe_config->dp_m_n.link_n,
8656 pipe_config->dp_m_n.tu);
8228 DRM_DEBUG_KMS("requested mode:\n"); 8657 DRM_DEBUG_KMS("requested mode:\n");
8229 drm_mode_debug_printmodeline(&pipe_config->requested_mode); 8658 drm_mode_debug_printmodeline(&pipe_config->requested_mode);
8230 DRM_DEBUG_KMS("adjusted mode:\n"); 8659 DRM_DEBUG_KMS("adjusted mode:\n");
8231 drm_mode_debug_printmodeline(&pipe_config->adjusted_mode); 8660 drm_mode_debug_printmodeline(&pipe_config->adjusted_mode);
8661 intel_dump_crtc_timings(&pipe_config->adjusted_mode);
8662 DRM_DEBUG_KMS("port clock: %d\n", pipe_config->port_clock);
8663 DRM_DEBUG_KMS("pipe src size: %dx%d\n",
8664 pipe_config->pipe_src_w, pipe_config->pipe_src_h);
8232 DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n", 8665 DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
8233 pipe_config->gmch_pfit.control, 8666 pipe_config->gmch_pfit.control,
8234 pipe_config->gmch_pfit.pgm_ratios, 8667 pipe_config->gmch_pfit.pgm_ratios,
@@ -8238,6 +8671,7 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
8238 pipe_config->pch_pfit.size, 8671 pipe_config->pch_pfit.size,
8239 pipe_config->pch_pfit.enabled ? "enabled" : "disabled"); 8672 pipe_config->pch_pfit.enabled ? "enabled" : "disabled");
8240 DRM_DEBUG_KMS("ips: %i\n", pipe_config->ips_enabled); 8673 DRM_DEBUG_KMS("ips: %i\n", pipe_config->ips_enabled);
8674 DRM_DEBUG_KMS("double wide: %i\n", pipe_config->double_wide);
8241} 8675}
8242 8676
8243static bool check_encoder_cloning(struct drm_crtc *crtc) 8677static bool check_encoder_cloning(struct drm_crtc *crtc)
@@ -8281,6 +8715,7 @@ intel_modeset_pipe_config(struct drm_crtc *crtc,
8281 8715
8282 drm_mode_copy(&pipe_config->adjusted_mode, mode); 8716 drm_mode_copy(&pipe_config->adjusted_mode, mode);
8283 drm_mode_copy(&pipe_config->requested_mode, mode); 8717 drm_mode_copy(&pipe_config->requested_mode, mode);
8718
8284 pipe_config->cpu_transcoder = 8719 pipe_config->cpu_transcoder =
8285 (enum transcoder) to_intel_crtc(crtc)->pipe; 8720 (enum transcoder) to_intel_crtc(crtc)->pipe;
8286 pipe_config->shared_dpll = DPLL_ID_PRIVATE; 8721 pipe_config->shared_dpll = DPLL_ID_PRIVATE;
@@ -8307,13 +8742,25 @@ intel_modeset_pipe_config(struct drm_crtc *crtc,
8307 if (plane_bpp < 0) 8742 if (plane_bpp < 0)
8308 goto fail; 8743 goto fail;
8309 8744
8745 /*
8746 * Determine the real pipe dimensions. Note that stereo modes can
8747 * increase the actual pipe size due to the frame doubling and
8748 * insertion of additional space for blanks between the frame. This
8749 * is stored in the crtc timings. We use the requested mode to do this
8750 * computation to clearly distinguish it from the adjusted mode, which
8751 * can be changed by the connectors in the below retry loop.
8752 */
8753 drm_mode_set_crtcinfo(&pipe_config->requested_mode, CRTC_STEREO_DOUBLE);
8754 pipe_config->pipe_src_w = pipe_config->requested_mode.crtc_hdisplay;
8755 pipe_config->pipe_src_h = pipe_config->requested_mode.crtc_vdisplay;
8756
8310encoder_retry: 8757encoder_retry:
8311 /* Ensure the port clock defaults are reset when retrying. */ 8758 /* Ensure the port clock defaults are reset when retrying. */
8312 pipe_config->port_clock = 0; 8759 pipe_config->port_clock = 0;
8313 pipe_config->pixel_multiplier = 1; 8760 pipe_config->pixel_multiplier = 1;
8314 8761
8315 /* Fill in default crtc timings, allow encoders to overwrite them. */ 8762 /* Fill in default crtc timings, allow encoders to overwrite them. */
8316 drm_mode_set_crtcinfo(&pipe_config->adjusted_mode, 0); 8763 drm_mode_set_crtcinfo(&pipe_config->adjusted_mode, CRTC_STEREO_DOUBLE);
8317 8764
8318 /* Pass our mode to the connectors and the CRTC to give them a chance to 8765 /* Pass our mode to the connectors and the CRTC to give them a chance to
8319 * adjust it according to limitations or connector properties, and also 8766 * adjust it according to limitations or connector properties, and also
@@ -8334,7 +8781,8 @@ encoder_retry:
8334 /* Set default port clock if not overwritten by the encoder. Needs to be 8781 /* Set default port clock if not overwritten by the encoder. Needs to be
8335 * done afterwards in case the encoder adjusts the mode. */ 8782 * done afterwards in case the encoder adjusts the mode. */
8336 if (!pipe_config->port_clock) 8783 if (!pipe_config->port_clock)
8337 pipe_config->port_clock = pipe_config->adjusted_mode.clock; 8784 pipe_config->port_clock = pipe_config->adjusted_mode.crtc_clock
8785 * pipe_config->pixel_multiplier;
8338 8786
8339 ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config); 8787 ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
8340 if (ret < 0) { 8788 if (ret < 0) {
@@ -8521,13 +8969,9 @@ intel_modeset_update_state(struct drm_device *dev, unsigned prepare_pipes)
8521 8969
8522} 8970}
8523 8971
8524static bool intel_fuzzy_clock_check(struct intel_crtc_config *cur, 8972static bool intel_fuzzy_clock_check(int clock1, int clock2)
8525 struct intel_crtc_config *new)
8526{ 8973{
8527 int clock1, clock2, diff; 8974 int diff;
8528
8529 clock1 = cur->adjusted_mode.clock;
8530 clock2 = new->adjusted_mode.clock;
8531 8975
8532 if (clock1 == clock2) 8976 if (clock1 == clock2)
8533 return true; 8977 return true;
@@ -8581,6 +9025,15 @@ intel_pipe_config_compare(struct drm_device *dev,
8581 return false; \ 9025 return false; \
8582 } 9026 }
8583 9027
9028#define PIPE_CONF_CHECK_CLOCK_FUZZY(name) \
9029 if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
9030 DRM_ERROR("mismatch in " #name " " \
9031 "(expected %i, found %i)\n", \
9032 current_config->name, \
9033 pipe_config->name); \
9034 return false; \
9035 }
9036
8584#define PIPE_CONF_QUIRK(quirk) \ 9037#define PIPE_CONF_QUIRK(quirk) \
8585 ((current_config->quirks | pipe_config->quirks) & (quirk)) 9038 ((current_config->quirks | pipe_config->quirks) & (quirk))
8586 9039
@@ -8594,6 +9047,13 @@ intel_pipe_config_compare(struct drm_device *dev,
8594 PIPE_CONF_CHECK_I(fdi_m_n.link_n); 9047 PIPE_CONF_CHECK_I(fdi_m_n.link_n);
8595 PIPE_CONF_CHECK_I(fdi_m_n.tu); 9048 PIPE_CONF_CHECK_I(fdi_m_n.tu);
8596 9049
9050 PIPE_CONF_CHECK_I(has_dp_encoder);
9051 PIPE_CONF_CHECK_I(dp_m_n.gmch_m);
9052 PIPE_CONF_CHECK_I(dp_m_n.gmch_n);
9053 PIPE_CONF_CHECK_I(dp_m_n.link_m);
9054 PIPE_CONF_CHECK_I(dp_m_n.link_n);
9055 PIPE_CONF_CHECK_I(dp_m_n.tu);
9056
8597 PIPE_CONF_CHECK_I(adjusted_mode.crtc_hdisplay); 9057 PIPE_CONF_CHECK_I(adjusted_mode.crtc_hdisplay);
8598 PIPE_CONF_CHECK_I(adjusted_mode.crtc_htotal); 9058 PIPE_CONF_CHECK_I(adjusted_mode.crtc_htotal);
8599 PIPE_CONF_CHECK_I(adjusted_mode.crtc_hblank_start); 9059 PIPE_CONF_CHECK_I(adjusted_mode.crtc_hblank_start);
@@ -8624,8 +9084,8 @@ intel_pipe_config_compare(struct drm_device *dev,
8624 DRM_MODE_FLAG_NVSYNC); 9084 DRM_MODE_FLAG_NVSYNC);
8625 } 9085 }
8626 9086
8627 PIPE_CONF_CHECK_I(requested_mode.hdisplay); 9087 PIPE_CONF_CHECK_I(pipe_src_w);
8628 PIPE_CONF_CHECK_I(requested_mode.vdisplay); 9088 PIPE_CONF_CHECK_I(pipe_src_h);
8629 9089
8630 PIPE_CONF_CHECK_I(gmch_pfit.control); 9090 PIPE_CONF_CHECK_I(gmch_pfit.control);
8631 /* pfit ratios are autocomputed by the hw on gen4+ */ 9091 /* pfit ratios are autocomputed by the hw on gen4+ */
@@ -8640,6 +9100,8 @@ intel_pipe_config_compare(struct drm_device *dev,
8640 9100
8641 PIPE_CONF_CHECK_I(ips_enabled); 9101 PIPE_CONF_CHECK_I(ips_enabled);
8642 9102
9103 PIPE_CONF_CHECK_I(double_wide);
9104
8643 PIPE_CONF_CHECK_I(shared_dpll); 9105 PIPE_CONF_CHECK_I(shared_dpll);
8644 PIPE_CONF_CHECK_X(dpll_hw_state.dpll); 9106 PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
8645 PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md); 9107 PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
@@ -8649,20 +9111,17 @@ intel_pipe_config_compare(struct drm_device *dev,
8649 if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) 9111 if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5)
8650 PIPE_CONF_CHECK_I(pipe_bpp); 9112 PIPE_CONF_CHECK_I(pipe_bpp);
8651 9113
9114 if (!IS_HASWELL(dev)) {
9115 PIPE_CONF_CHECK_CLOCK_FUZZY(adjusted_mode.crtc_clock);
9116 PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
9117 }
9118
8652#undef PIPE_CONF_CHECK_X 9119#undef PIPE_CONF_CHECK_X
8653#undef PIPE_CONF_CHECK_I 9120#undef PIPE_CONF_CHECK_I
8654#undef PIPE_CONF_CHECK_FLAGS 9121#undef PIPE_CONF_CHECK_FLAGS
9122#undef PIPE_CONF_CHECK_CLOCK_FUZZY
8655#undef PIPE_CONF_QUIRK 9123#undef PIPE_CONF_QUIRK
8656 9124
8657 if (!IS_HASWELL(dev)) {
8658 if (!intel_fuzzy_clock_check(current_config, pipe_config)) {
8659 DRM_ERROR("mismatch in clock (expected %d, found %d)\n",
8660 current_config->adjusted_mode.clock,
8661 pipe_config->adjusted_mode.clock);
8662 return false;
8663 }
8664 }
8665
8666 return true; 9125 return true;
8667} 9126}
8668 9127
@@ -8794,9 +9253,6 @@ check_crtc_state(struct drm_device *dev)
8794 encoder->get_config(encoder, &pipe_config); 9253 encoder->get_config(encoder, &pipe_config);
8795 } 9254 }
8796 9255
8797 if (dev_priv->display.get_clock)
8798 dev_priv->display.get_clock(crtc, &pipe_config);
8799
8800 WARN(crtc->active != active, 9256 WARN(crtc->active != active,
8801 "crtc active state doesn't match with hw state " 9257 "crtc active state doesn't match with hw state "
8802 "(expected %i, found %i)\n", crtc->active, active); 9258 "(expected %i, found %i)\n", crtc->active, active);
@@ -8871,6 +9327,18 @@ intel_modeset_check_state(struct drm_device *dev)
8871 check_shared_dpll_state(dev); 9327 check_shared_dpll_state(dev);
8872} 9328}
8873 9329
9330void ironlake_check_encoder_dotclock(const struct intel_crtc_config *pipe_config,
9331 int dotclock)
9332{
9333 /*
9334 * FDI already provided one idea for the dotclock.
9335 * Yell if the encoder disagrees.
9336 */
9337 WARN(!intel_fuzzy_clock_check(pipe_config->adjusted_mode.crtc_clock, dotclock),
9338 "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
9339 pipe_config->adjusted_mode.crtc_clock, dotclock);
9340}
9341
8874static int __intel_set_mode(struct drm_crtc *crtc, 9342static int __intel_set_mode(struct drm_crtc *crtc,
8875 struct drm_display_mode *mode, 9343 struct drm_display_mode *mode,
8876 int x, int y, struct drm_framebuffer *fb) 9344 int x, int y, struct drm_framebuffer *fb)
@@ -8883,7 +9351,7 @@ static int __intel_set_mode(struct drm_crtc *crtc,
8883 unsigned disable_pipes, prepare_pipes, modeset_pipes; 9351 unsigned disable_pipes, prepare_pipes, modeset_pipes;
8884 int ret = 0; 9352 int ret = 0;
8885 9353
8886 saved_mode = kmalloc(2 * sizeof(*saved_mode), GFP_KERNEL); 9354 saved_mode = kcalloc(2, sizeof(*saved_mode), GFP_KERNEL);
8887 if (!saved_mode) 9355 if (!saved_mode)
8888 return -ENOMEM; 9356 return -ENOMEM;
8889 saved_hwmode = saved_mode + 1; 9357 saved_hwmode = saved_mode + 1;
@@ -9422,7 +9890,7 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
9422 struct intel_crtc *intel_crtc; 9890 struct intel_crtc *intel_crtc;
9423 int i; 9891 int i;
9424 9892
9425 intel_crtc = kzalloc(sizeof(struct intel_crtc) + (INTELFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL); 9893 intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL);
9426 if (intel_crtc == NULL) 9894 if (intel_crtc == NULL)
9427 return; 9895 return;
9428 9896
@@ -9451,6 +9919,18 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
9451 drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs); 9919 drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
9452} 9920}
9453 9921
9922enum pipe intel_get_pipe_from_connector(struct intel_connector *connector)
9923{
9924 struct drm_encoder *encoder = connector->base.encoder;
9925
9926 WARN_ON(!mutex_is_locked(&connector->base.dev->mode_config.mutex));
9927
9928 if (!encoder)
9929 return INVALID_PIPE;
9930
9931 return to_intel_crtc(encoder->crtc)->pipe;
9932}
9933
9454int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, 9934int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
9455 struct drm_file *file) 9935 struct drm_file *file)
9456{ 9936{
@@ -9466,7 +9946,7 @@ int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
9466 9946
9467 if (!drmmode_obj) { 9947 if (!drmmode_obj) {
9468 DRM_ERROR("no such CRTC id\n"); 9948 DRM_ERROR("no such CRTC id\n");
9469 return -EINVAL; 9949 return -ENOENT;
9470 } 9950 }
9471 9951
9472 crtc = to_intel_crtc(obj_to_crtc(drmmode_obj)); 9952 crtc = to_intel_crtc(obj_to_crtc(drmmode_obj));
@@ -9573,7 +10053,13 @@ static void intel_setup_outputs(struct drm_device *dev)
9573 if (I915_READ(PCH_DP_D) & DP_DETECTED) 10053 if (I915_READ(PCH_DP_D) & DP_DETECTED)
9574 intel_dp_init(dev, PCH_DP_D, PORT_D); 10054 intel_dp_init(dev, PCH_DP_D, PORT_D);
9575 } else if (IS_VALLEYVIEW(dev)) { 10055 } else if (IS_VALLEYVIEW(dev)) {
9576 /* Check for built-in panel first. Shares lanes with HDMI on SDVOC */ 10056 if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIB) & SDVO_DETECTED) {
10057 intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIB,
10058 PORT_B);
10059 if (I915_READ(VLV_DISPLAY_BASE + DP_B) & DP_DETECTED)
10060 intel_dp_init(dev, VLV_DISPLAY_BASE + DP_B, PORT_B);
10061 }
10062
9577 if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIC) & SDVO_DETECTED) { 10063 if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIC) & SDVO_DETECTED) {
9578 intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIC, 10064 intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIC,
9579 PORT_C); 10065 PORT_C);
@@ -9582,12 +10068,7 @@ static void intel_setup_outputs(struct drm_device *dev)
9582 PORT_C); 10068 PORT_C);
9583 } 10069 }
9584 10070
9585 if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIB) & SDVO_DETECTED) { 10071 intel_dsi_init(dev);
9586 intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIB,
9587 PORT_B);
9588 if (I915_READ(VLV_DISPLAY_BASE + DP_B) & DP_DETECTED)
9589 intel_dp_init(dev, VLV_DISPLAY_BASE + DP_B, PORT_B);
9590 }
9591 } else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) { 10072 } else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
9592 bool found = false; 10073 bool found = false;
9593 10074
@@ -9643,6 +10124,7 @@ static void intel_setup_outputs(struct drm_device *dev)
9643void intel_framebuffer_fini(struct intel_framebuffer *fb) 10124void intel_framebuffer_fini(struct intel_framebuffer *fb)
9644{ 10125{
9645 drm_framebuffer_cleanup(&fb->base); 10126 drm_framebuffer_cleanup(&fb->base);
10127 WARN_ON(!fb->obj->framebuffer_references--);
9646 drm_gem_object_unreference_unlocked(&fb->obj->base); 10128 drm_gem_object_unreference_unlocked(&fb->obj->base);
9647} 10129}
9648 10130
@@ -9674,9 +10156,12 @@ int intel_framebuffer_init(struct drm_device *dev,
9674 struct drm_mode_fb_cmd2 *mode_cmd, 10156 struct drm_mode_fb_cmd2 *mode_cmd,
9675 struct drm_i915_gem_object *obj) 10157 struct drm_i915_gem_object *obj)
9676{ 10158{
10159 int aligned_height, tile_height;
9677 int pitch_limit; 10160 int pitch_limit;
9678 int ret; 10161 int ret;
9679 10162
10163 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
10164
9680 if (obj->tiling_mode == I915_TILING_Y) { 10165 if (obj->tiling_mode == I915_TILING_Y) {
9681 DRM_DEBUG("hardware does not support tiling Y\n"); 10166 DRM_DEBUG("hardware does not support tiling Y\n");
9682 return -EINVAL; 10167 return -EINVAL;
@@ -9765,8 +10250,16 @@ int intel_framebuffer_init(struct drm_device *dev,
9765 if (mode_cmd->offsets[0] != 0) 10250 if (mode_cmd->offsets[0] != 0)
9766 return -EINVAL; 10251 return -EINVAL;
9767 10252
10253 tile_height = IS_GEN2(dev) ? 16 : 8;
10254 aligned_height = ALIGN(mode_cmd->height,
10255 obj->tiling_mode ? tile_height : 1);
10256 /* FIXME drm helper for size checks (especially planar formats)? */
10257 if (obj->base.size < aligned_height * mode_cmd->pitches[0])
10258 return -EINVAL;
10259
9768 drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd); 10260 drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
9769 intel_fb->obj = obj; 10261 intel_fb->obj = obj;
10262 intel_fb->obj->framebuffer_references++;
9770 10263
9771 ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs); 10264 ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
9772 if (ret) { 10265 if (ret) {
@@ -9792,9 +10285,15 @@ intel_user_framebuffer_create(struct drm_device *dev,
9792 return intel_framebuffer_create(dev, mode_cmd, obj); 10285 return intel_framebuffer_create(dev, mode_cmd, obj);
9793} 10286}
9794 10287
10288#ifndef CONFIG_DRM_I915_FBDEV
10289static inline void intel_fbdev_output_poll_changed(struct drm_device *dev)
10290{
10291}
10292#endif
10293
9795static const struct drm_mode_config_funcs intel_mode_funcs = { 10294static const struct drm_mode_config_funcs intel_mode_funcs = {
9796 .fb_create = intel_user_framebuffer_create, 10295 .fb_create = intel_user_framebuffer_create,
9797 .output_poll_changed = intel_fb_output_poll_changed, 10296 .output_poll_changed = intel_fbdev_output_poll_changed,
9798}; 10297};
9799 10298
9800/* Set up chip specific display functions */ 10299/* Set up chip specific display functions */
@@ -9820,7 +10319,6 @@ static void intel_init_display(struct drm_device *dev)
9820 dev_priv->display.update_plane = ironlake_update_plane; 10319 dev_priv->display.update_plane = ironlake_update_plane;
9821 } else if (HAS_PCH_SPLIT(dev)) { 10320 } else if (HAS_PCH_SPLIT(dev)) {
9822 dev_priv->display.get_pipe_config = ironlake_get_pipe_config; 10321 dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
9823 dev_priv->display.get_clock = ironlake_crtc_clock_get;
9824 dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set; 10322 dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set;
9825 dev_priv->display.crtc_enable = ironlake_crtc_enable; 10323 dev_priv->display.crtc_enable = ironlake_crtc_enable;
9826 dev_priv->display.crtc_disable = ironlake_crtc_disable; 10324 dev_priv->display.crtc_disable = ironlake_crtc_disable;
@@ -9828,7 +10326,6 @@ static void intel_init_display(struct drm_device *dev)
9828 dev_priv->display.update_plane = ironlake_update_plane; 10326 dev_priv->display.update_plane = ironlake_update_plane;
9829 } else if (IS_VALLEYVIEW(dev)) { 10327 } else if (IS_VALLEYVIEW(dev)) {
9830 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 10328 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
9831 dev_priv->display.get_clock = i9xx_crtc_clock_get;
9832 dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set; 10329 dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
9833 dev_priv->display.crtc_enable = valleyview_crtc_enable; 10330 dev_priv->display.crtc_enable = valleyview_crtc_enable;
9834 dev_priv->display.crtc_disable = i9xx_crtc_disable; 10331 dev_priv->display.crtc_disable = i9xx_crtc_disable;
@@ -9836,7 +10333,6 @@ static void intel_init_display(struct drm_device *dev)
9836 dev_priv->display.update_plane = i9xx_update_plane; 10333 dev_priv->display.update_plane = i9xx_update_plane;
9837 } else { 10334 } else {
9838 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 10335 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
9839 dev_priv->display.get_clock = i9xx_crtc_clock_get;
9840 dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set; 10336 dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
9841 dev_priv->display.crtc_enable = i9xx_crtc_enable; 10337 dev_priv->display.crtc_enable = i9xx_crtc_enable;
9842 dev_priv->display.crtc_disable = i9xx_crtc_disable; 10338 dev_priv->display.crtc_disable = i9xx_crtc_disable;
@@ -9886,7 +10382,7 @@ static void intel_init_display(struct drm_device *dev)
9886 dev_priv->display.write_eld = ironlake_write_eld; 10382 dev_priv->display.write_eld = ironlake_write_eld;
9887 dev_priv->display.modeset_global_resources = 10383 dev_priv->display.modeset_global_resources =
9888 ivb_modeset_global_resources; 10384 ivb_modeset_global_resources;
9889 } else if (IS_HASWELL(dev)) { 10385 } else if (IS_HASWELL(dev) || IS_GEN8(dev)) {
9890 dev_priv->display.fdi_link_train = hsw_fdi_link_train; 10386 dev_priv->display.fdi_link_train = hsw_fdi_link_train;
9891 dev_priv->display.write_eld = haswell_write_eld; 10387 dev_priv->display.write_eld = haswell_write_eld;
9892 dev_priv->display.modeset_global_resources = 10388 dev_priv->display.modeset_global_resources =
@@ -9894,7 +10390,8 @@ static void intel_init_display(struct drm_device *dev)
9894 } 10390 }
9895 } else if (IS_G4X(dev)) { 10391 } else if (IS_G4X(dev)) {
9896 dev_priv->display.write_eld = g4x_write_eld; 10392 dev_priv->display.write_eld = g4x_write_eld;
9897 } 10393 } else if (IS_VALLEYVIEW(dev))
10394 dev_priv->display.write_eld = ironlake_write_eld;
9898 10395
9899 /* Default just returns -ENODEV to indicate unsupported */ 10396 /* Default just returns -ENODEV to indicate unsupported */
9900 dev_priv->display.queue_flip = intel_default_queue_flip; 10397 dev_priv->display.queue_flip = intel_default_queue_flip;
@@ -9917,6 +10414,7 @@ static void intel_init_display(struct drm_device *dev)
9917 dev_priv->display.queue_flip = intel_gen6_queue_flip; 10414 dev_priv->display.queue_flip = intel_gen6_queue_flip;
9918 break; 10415 break;
9919 case 7: 10416 case 7:
10417 case 8: /* FIXME(BDW): Check that the gen8 RCS flip works. */
9920 dev_priv->display.queue_flip = intel_gen7_queue_flip; 10418 dev_priv->display.queue_flip = intel_gen7_queue_flip;
9921 break; 10419 break;
9922 } 10420 }
@@ -10012,8 +10510,7 @@ static struct intel_quirk intel_quirks[] = {
10012 /* ThinkPad T60 needs pipe A force quirk (bug #16494) */ 10510 /* ThinkPad T60 needs pipe A force quirk (bug #16494) */
10013 { 0x2782, 0x17aa, 0x201a, quirk_pipea_force }, 10511 { 0x2782, 0x17aa, 0x201a, quirk_pipea_force },
10014 10512
10015 /* 830/845 need to leave pipe A & dpll A up */ 10513 /* 830 needs to leave pipe A & dpll A up */
10016 { 0x2562, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
10017 { 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force }, 10514 { 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
10018 10515
10019 /* Lenovo U160 cannot use SSC on LVDS */ 10516 /* Lenovo U160 cannot use SSC on LVDS */
@@ -10022,20 +10519,11 @@ static struct intel_quirk intel_quirks[] = {
10022 /* Sony Vaio Y cannot use SSC on LVDS */ 10519 /* Sony Vaio Y cannot use SSC on LVDS */
10023 { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable }, 10520 { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
10024 10521
10025 /* Acer Aspire 5734Z must invert backlight brightness */ 10522 /*
10026 { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness }, 10523 * All GM45 Acer (and its brands eMachines and Packard Bell) laptops
10027 10524 * seem to use inverted backlight PWM.
10028 /* Acer/eMachines G725 */ 10525 */
10029 { 0x2a42, 0x1025, 0x0210, quirk_invert_brightness }, 10526 { 0x2a42, 0x1025, PCI_ANY_ID, quirk_invert_brightness },
10030
10031 /* Acer/eMachines e725 */
10032 { 0x2a42, 0x1025, 0x0212, quirk_invert_brightness },
10033
10034 /* Acer/Packard Bell NCL20 */
10035 { 0x2a42, 0x1025, 0x034b, quirk_invert_brightness },
10036
10037 /* Acer Aspire 4736Z */
10038 { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
10039 10527
10040 /* Dell XPS13 HD Sandy Bridge */ 10528 /* Dell XPS13 HD Sandy Bridge */
10041 { 0x0116, 0x1028, 0x052e, quirk_no_pcm_pwm_enable }, 10529 { 0x0116, 0x1028, 0x052e, quirk_no_pcm_pwm_enable },
@@ -10084,12 +10572,19 @@ static void i915_disable_vga(struct drm_device *dev)
10084 10572
10085void intel_modeset_init_hw(struct drm_device *dev) 10573void intel_modeset_init_hw(struct drm_device *dev)
10086{ 10574{
10087 intel_init_power_well(dev); 10575 struct drm_i915_private *dev_priv = dev->dev_private;
10088 10576
10089 intel_prepare_ddi(dev); 10577 intel_prepare_ddi(dev);
10090 10578
10091 intel_init_clock_gating(dev); 10579 intel_init_clock_gating(dev);
10092 10580
10581 /* Enable the CRI clock source so we can get at the display */
10582 if (IS_VALLEYVIEW(dev))
10583 I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
10584 DPLL_INTEGRATED_CRI_CLK_VLV);
10585
10586 intel_init_dpio(dev);
10587
10093 mutex_lock(&dev->struct_mutex); 10588 mutex_lock(&dev->struct_mutex);
10094 intel_enable_gt_powersave(dev); 10589 intel_enable_gt_powersave(dev);
10095 mutex_unlock(&dev->struct_mutex); 10590 mutex_unlock(&dev->struct_mutex);
@@ -10357,7 +10852,7 @@ void i915_redisable_vga(struct drm_device *dev)
10357 (I915_READ(HSW_PWR_WELL_DRIVER) & HSW_PWR_WELL_STATE_ENABLED) == 0) 10852 (I915_READ(HSW_PWR_WELL_DRIVER) & HSW_PWR_WELL_STATE_ENABLED) == 0)
10358 return; 10853 return;
10359 10854
10360 if (I915_READ(vga_reg) != VGA_DISP_DISABLE) { 10855 if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
10361 DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n"); 10856 DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
10362 i915_disable_vga(dev); 10857 i915_disable_vga(dev);
10363 } 10858 }
@@ -10380,6 +10875,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
10380 &crtc->config); 10875 &crtc->config);
10381 10876
10382 crtc->base.enabled = crtc->active; 10877 crtc->base.enabled = crtc->active;
10878 crtc->primary_enabled = crtc->active;
10383 10879
10384 DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n", 10880 DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n",
10385 crtc->base.base.id, 10881 crtc->base.base.id,
@@ -10420,20 +10916,11 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
10420 } 10916 }
10421 10917
10422 encoder->connectors_active = false; 10918 encoder->connectors_active = false;
10423 DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe=%i\n", 10919 DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
10424 encoder->base.base.id, 10920 encoder->base.base.id,
10425 drm_get_encoder_name(&encoder->base), 10921 drm_get_encoder_name(&encoder->base),
10426 encoder->base.crtc ? "enabled" : "disabled", 10922 encoder->base.crtc ? "enabled" : "disabled",
10427 pipe); 10923 pipe_name(pipe));
10428 }
10429
10430 list_for_each_entry(crtc, &dev->mode_config.crtc_list,
10431 base.head) {
10432 if (!crtc->active)
10433 continue;
10434 if (dev_priv->display.get_clock)
10435 dev_priv->display.get_clock(crtc,
10436 &crtc->config);
10437 } 10924 }
10438 10925
10439 list_for_each_entry(connector, &dev->mode_config.connector_list, 10926 list_for_each_entry(connector, &dev->mode_config.connector_list,
@@ -10460,7 +10947,6 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
10460{ 10947{
10461 struct drm_i915_private *dev_priv = dev->dev_private; 10948 struct drm_i915_private *dev_priv = dev->dev_private;
10462 enum pipe pipe; 10949 enum pipe pipe;
10463 struct drm_plane *plane;
10464 struct intel_crtc *crtc; 10950 struct intel_crtc *crtc;
10465 struct intel_encoder *encoder; 10951 struct intel_encoder *encoder;
10466 int i; 10952 int i;
@@ -10507,7 +10993,12 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
10507 pll->on = false; 10993 pll->on = false;
10508 } 10994 }
10509 10995
10996 if (IS_HASWELL(dev))
10997 ilk_wm_get_hw_state(dev);
10998
10510 if (force_restore) { 10999 if (force_restore) {
11000 i915_redisable_vga(dev);
11001
10511 /* 11002 /*
10512 * We need to use raw interfaces for restoring state to avoid 11003 * We need to use raw interfaces for restoring state to avoid
10513 * checking (bogus) intermediate states. 11004 * checking (bogus) intermediate states.
@@ -10519,10 +11010,6 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
10519 __intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, 11010 __intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y,
10520 crtc->fb); 11011 crtc->fb);
10521 } 11012 }
10522 list_for_each_entry(plane, &dev->mode_config.plane_list, head)
10523 intel_plane_restore(plane);
10524
10525 i915_redisable_vga(dev);
10526 } else { 11013 } else {
10527 intel_modeset_update_staged_output_state(dev); 11014 intel_modeset_update_staged_output_state(dev);
10528 } 11015 }
@@ -10545,6 +11032,7 @@ void intel_modeset_cleanup(struct drm_device *dev)
10545{ 11032{
10546 struct drm_i915_private *dev_priv = dev->dev_private; 11033 struct drm_i915_private *dev_priv = dev->dev_private;
10547 struct drm_crtc *crtc; 11034 struct drm_crtc *crtc;
11035 struct drm_connector *connector;
10548 11036
10549 /* 11037 /*
10550 * Interrupts and polling as the first thing to avoid creating havoc. 11038 * Interrupts and polling as the first thing to avoid creating havoc.
@@ -10585,6 +11073,10 @@ void intel_modeset_cleanup(struct drm_device *dev)
10585 /* destroy backlight, if any, before the connectors */ 11073 /* destroy backlight, if any, before the connectors */
10586 intel_panel_destroy_backlight(dev); 11074 intel_panel_destroy_backlight(dev);
10587 11075
11076 /* destroy the sysfs files before encoders/connectors */
11077 list_for_each_entry(connector, &dev->mode_config.connector_list, head)
11078 drm_sysfs_connector_remove(connector);
11079
10588 drm_mode_config_cleanup(dev); 11080 drm_mode_config_cleanup(dev);
10589 11081
10590 intel_cleanup_overlay(dev); 11082 intel_cleanup_overlay(dev);
@@ -10680,7 +11172,7 @@ intel_display_capture_error_state(struct drm_device *dev)
10680 if (INTEL_INFO(dev)->num_pipes == 0) 11172 if (INTEL_INFO(dev)->num_pipes == 0)
10681 return NULL; 11173 return NULL;
10682 11174
10683 error = kmalloc(sizeof(*error), GFP_ATOMIC); 11175 error = kzalloc(sizeof(*error), GFP_ATOMIC);
10684 if (error == NULL) 11176 if (error == NULL)
10685 return NULL; 11177 return NULL;
10686 11178
@@ -10688,6 +11180,9 @@ intel_display_capture_error_state(struct drm_device *dev)
10688 error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER); 11180 error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER);
10689 11181
10690 for_each_pipe(i) { 11182 for_each_pipe(i) {
11183 if (!intel_display_power_enabled(dev, POWER_DOMAIN_PIPE(i)))
11184 continue;
11185
10691 if (INTEL_INFO(dev)->gen <= 6 || IS_VALLEYVIEW(dev)) { 11186 if (INTEL_INFO(dev)->gen <= 6 || IS_VALLEYVIEW(dev)) {
10692 error->cursor[i].control = I915_READ(CURCNTR(i)); 11187 error->cursor[i].control = I915_READ(CURCNTR(i));
10693 error->cursor[i].position = I915_READ(CURPOS(i)); 11188 error->cursor[i].position = I915_READ(CURPOS(i));
@@ -10721,6 +11216,10 @@ intel_display_capture_error_state(struct drm_device *dev)
10721 for (i = 0; i < error->num_transcoders; i++) { 11216 for (i = 0; i < error->num_transcoders; i++) {
10722 enum transcoder cpu_transcoder = transcoders[i]; 11217 enum transcoder cpu_transcoder = transcoders[i];
10723 11218
11219 if (!intel_display_power_enabled(dev,
11220 POWER_DOMAIN_TRANSCODER(cpu_transcoder)))
11221 continue;
11222
10724 error->transcoder[i].cpu_transcoder = cpu_transcoder; 11223 error->transcoder[i].cpu_transcoder = cpu_transcoder;
10725 11224
10726 error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder)); 11225 error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder));
@@ -10732,12 +11231,6 @@ intel_display_capture_error_state(struct drm_device *dev)
10732 error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder)); 11231 error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder));
10733 } 11232 }
10734 11233
10735 /* In the code above we read the registers without checking if the power
10736 * well was on, so here we have to clear the FPGA_DBG_RM_NOCLAIM bit to
10737 * prevent the next I915_WRITE from detecting it and printing an error
10738 * message. */
10739 intel_uncore_clear_errors(dev);
10740
10741 return error; 11234 return error;
10742} 11235}
10743 11236
@@ -10782,7 +11275,7 @@ intel_display_print_error_state(struct drm_i915_error_state_buf *m,
10782 } 11275 }
10783 11276
10784 for (i = 0; i < error->num_transcoders; i++) { 11277 for (i = 0; i < error->num_transcoders; i++) {
10785 err_printf(m, " CPU transcoder: %c\n", 11278 err_printf(m, "CPU transcoder: %c\n",
10786 transcoder_name(error->transcoder[i].cpu_transcoder)); 11279 transcoder_name(error->transcoder[i].cpu_transcoder));
10787 err_printf(m, " CONF: %08x\n", error->transcoder[i].conf); 11280 err_printf(m, " CONF: %08x\n", error->transcoder[i].conf);
10788 err_printf(m, " HTOTAL: %08x\n", error->transcoder[i].htotal); 11281 err_printf(m, " HTOTAL: %08x\n", error->transcoder[i].htotal);
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 1a431377d83b..eb8139da9763 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -38,6 +38,32 @@
38 38
39#define DP_LINK_CHECK_TIMEOUT (10 * 1000) 39#define DP_LINK_CHECK_TIMEOUT (10 * 1000)
40 40
41struct dp_link_dpll {
42 int link_bw;
43 struct dpll dpll;
44};
45
46static const struct dp_link_dpll gen4_dpll[] = {
47 { DP_LINK_BW_1_62,
48 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
49 { DP_LINK_BW_2_7,
50 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
51};
52
53static const struct dp_link_dpll pch_dpll[] = {
54 { DP_LINK_BW_1_62,
55 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
56 { DP_LINK_BW_2_7,
57 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
58};
59
60static const struct dp_link_dpll vlv_dpll[] = {
61 { DP_LINK_BW_1_62,
62 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
63 { DP_LINK_BW_2_7,
64 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
65};
66
41/** 67/**
42 * is_edp - is the given port attached to an eDP panel (either CPU or PCH) 68 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
43 * @intel_dp: DP struct 69 * @intel_dp: DP struct
@@ -211,24 +237,77 @@ intel_hrawclk(struct drm_device *dev)
211 } 237 }
212} 238}
213 239
240static void
241intel_dp_init_panel_power_sequencer(struct drm_device *dev,
242 struct intel_dp *intel_dp,
243 struct edp_power_seq *out);
244static void
245intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
246 struct intel_dp *intel_dp,
247 struct edp_power_seq *out);
248
249static enum pipe
250vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
251{
252 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
253 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
254 struct drm_device *dev = intel_dig_port->base.base.dev;
255 struct drm_i915_private *dev_priv = dev->dev_private;
256 enum port port = intel_dig_port->port;
257 enum pipe pipe;
258
259 /* modeset should have pipe */
260 if (crtc)
261 return to_intel_crtc(crtc)->pipe;
262
263 /* init time, try to find a pipe with this port selected */
264 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
265 u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
266 PANEL_PORT_SELECT_MASK;
267 if (port_sel == PANEL_PORT_SELECT_DPB_VLV && port == PORT_B)
268 return pipe;
269 if (port_sel == PANEL_PORT_SELECT_DPC_VLV && port == PORT_C)
270 return pipe;
271 }
272
273 /* shrug */
274 return PIPE_A;
275}
276
277static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
278{
279 struct drm_device *dev = intel_dp_to_dev(intel_dp);
280
281 if (HAS_PCH_SPLIT(dev))
282 return PCH_PP_CONTROL;
283 else
284 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
285}
286
287static u32 _pp_stat_reg(struct intel_dp *intel_dp)
288{
289 struct drm_device *dev = intel_dp_to_dev(intel_dp);
290
291 if (HAS_PCH_SPLIT(dev))
292 return PCH_PP_STATUS;
293 else
294 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
295}
296
214static bool ironlake_edp_have_panel_power(struct intel_dp *intel_dp) 297static bool ironlake_edp_have_panel_power(struct intel_dp *intel_dp)
215{ 298{
216 struct drm_device *dev = intel_dp_to_dev(intel_dp); 299 struct drm_device *dev = intel_dp_to_dev(intel_dp);
217 struct drm_i915_private *dev_priv = dev->dev_private; 300 struct drm_i915_private *dev_priv = dev->dev_private;
218 u32 pp_stat_reg;
219 301
220 pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS; 302 return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
221 return (I915_READ(pp_stat_reg) & PP_ON) != 0;
222} 303}
223 304
224static bool ironlake_edp_have_panel_vdd(struct intel_dp *intel_dp) 305static bool ironlake_edp_have_panel_vdd(struct intel_dp *intel_dp)
225{ 306{
226 struct drm_device *dev = intel_dp_to_dev(intel_dp); 307 struct drm_device *dev = intel_dp_to_dev(intel_dp);
227 struct drm_i915_private *dev_priv = dev->dev_private; 308 struct drm_i915_private *dev_priv = dev->dev_private;
228 u32 pp_ctrl_reg;
229 309
230 pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL; 310 return (I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD) != 0;
231 return (I915_READ(pp_ctrl_reg) & EDP_FORCE_VDD) != 0;
232} 311}
233 312
234static void 313static void
@@ -236,19 +315,15 @@ intel_dp_check_edp(struct intel_dp *intel_dp)
236{ 315{
237 struct drm_device *dev = intel_dp_to_dev(intel_dp); 316 struct drm_device *dev = intel_dp_to_dev(intel_dp);
238 struct drm_i915_private *dev_priv = dev->dev_private; 317 struct drm_i915_private *dev_priv = dev->dev_private;
239 u32 pp_stat_reg, pp_ctrl_reg;
240 318
241 if (!is_edp(intel_dp)) 319 if (!is_edp(intel_dp))
242 return; 320 return;
243 321
244 pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS;
245 pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
246
247 if (!ironlake_edp_have_panel_power(intel_dp) && !ironlake_edp_have_panel_vdd(intel_dp)) { 322 if (!ironlake_edp_have_panel_power(intel_dp) && !ironlake_edp_have_panel_vdd(intel_dp)) {
248 WARN(1, "eDP powered off while attempting aux channel communication.\n"); 323 WARN(1, "eDP powered off while attempting aux channel communication.\n");
249 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n", 324 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
250 I915_READ(pp_stat_reg), 325 I915_READ(_pp_stat_reg(intel_dp)),
251 I915_READ(pp_ctrl_reg)); 326 I915_READ(_pp_ctrl_reg(intel_dp)));
252 } 327 }
253} 328}
254 329
@@ -330,6 +405,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
330 uint32_t status; 405 uint32_t status;
331 int try, precharge, clock = 0; 406 int try, precharge, clock = 0;
332 bool has_aux_irq = INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev); 407 bool has_aux_irq = INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev);
408 uint32_t timeout;
333 409
334 /* dp aux is extremely sensitive to irq latency, hence request the 410 /* dp aux is extremely sensitive to irq latency, hence request the
335 * lowest possible wakeup latency and so prevent the cpu from going into 411 * lowest possible wakeup latency and so prevent the cpu from going into
@@ -344,6 +420,11 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
344 else 420 else
345 precharge = 5; 421 precharge = 5;
346 422
423 if (IS_BROADWELL(dev) && ch_ctl == DPA_AUX_CH_CTL)
424 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
425 else
426 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
427
347 intel_aux_display_runtime_get(dev_priv); 428 intel_aux_display_runtime_get(dev_priv);
348 429
349 /* Try to wait for any previous AUX channel activity */ 430 /* Try to wait for any previous AUX channel activity */
@@ -361,6 +442,12 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
361 goto out; 442 goto out;
362 } 443 }
363 444
445 /* Only 5 data registers! */
446 if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
447 ret = -E2BIG;
448 goto out;
449 }
450
364 while ((aux_clock_divider = get_aux_clock_divider(intel_dp, clock++))) { 451 while ((aux_clock_divider = get_aux_clock_divider(intel_dp, clock++))) {
365 /* Must try at least 3 times according to DP spec */ 452 /* Must try at least 3 times according to DP spec */
366 for (try = 0; try < 5; try++) { 453 for (try = 0; try < 5; try++) {
@@ -373,7 +460,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
373 I915_WRITE(ch_ctl, 460 I915_WRITE(ch_ctl,
374 DP_AUX_CH_CTL_SEND_BUSY | 461 DP_AUX_CH_CTL_SEND_BUSY |
375 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) | 462 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
376 DP_AUX_CH_CTL_TIME_OUT_400us | 463 timeout |
377 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | 464 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
378 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) | 465 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
379 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) | 466 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) |
@@ -451,9 +538,10 @@ intel_dp_aux_native_write(struct intel_dp *intel_dp,
451 int msg_bytes; 538 int msg_bytes;
452 uint8_t ack; 539 uint8_t ack;
453 540
541 if (WARN_ON(send_bytes > 16))
542 return -E2BIG;
543
454 intel_dp_check_edp(intel_dp); 544 intel_dp_check_edp(intel_dp);
455 if (send_bytes > 16)
456 return -1;
457 msg[0] = AUX_NATIVE_WRITE << 4; 545 msg[0] = AUX_NATIVE_WRITE << 4;
458 msg[1] = address >> 8; 546 msg[1] = address >> 8;
459 msg[2] = address & 0xff; 547 msg[2] = address & 0xff;
@@ -494,6 +582,9 @@ intel_dp_aux_native_read(struct intel_dp *intel_dp,
494 uint8_t ack; 582 uint8_t ack;
495 int ret; 583 int ret;
496 584
585 if (WARN_ON(recv_bytes > 19))
586 return -E2BIG;
587
497 intel_dp_check_edp(intel_dp); 588 intel_dp_check_edp(intel_dp);
498 msg[0] = AUX_NATIVE_READ << 4; 589 msg[0] = AUX_NATIVE_READ << 4;
499 msg[1] = address >> 8; 590 msg[1] = address >> 8;
@@ -538,6 +629,7 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
538 int reply_bytes; 629 int reply_bytes;
539 int ret; 630 int ret;
540 631
632 ironlake_edp_panel_vdd_on(intel_dp);
541 intel_dp_check_edp(intel_dp); 633 intel_dp_check_edp(intel_dp);
542 /* Set up the command byte */ 634 /* Set up the command byte */
543 if (mode & MODE_I2C_READ) 635 if (mode & MODE_I2C_READ)
@@ -569,13 +661,18 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
569 break; 661 break;
570 } 662 }
571 663
572 for (retry = 0; retry < 5; retry++) { 664 /*
665 * DP1.2 sections 2.7.7.1.5.6.1 and 2.7.7.1.6.6.1: A DP Source device is
666 * required to retry at least seven times upon receiving AUX_DEFER
667 * before giving up the AUX transaction.
668 */
669 for (retry = 0; retry < 7; retry++) {
573 ret = intel_dp_aux_ch(intel_dp, 670 ret = intel_dp_aux_ch(intel_dp,
574 msg, msg_bytes, 671 msg, msg_bytes,
575 reply, reply_bytes); 672 reply, reply_bytes);
576 if (ret < 0) { 673 if (ret < 0) {
577 DRM_DEBUG_KMS("aux_ch failed %d\n", ret); 674 DRM_DEBUG_KMS("aux_ch failed %d\n", ret);
578 return ret; 675 goto out;
579 } 676 }
580 677
581 switch (reply[0] & AUX_NATIVE_REPLY_MASK) { 678 switch (reply[0] & AUX_NATIVE_REPLY_MASK) {
@@ -586,7 +683,8 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
586 break; 683 break;
587 case AUX_NATIVE_REPLY_NACK: 684 case AUX_NATIVE_REPLY_NACK:
588 DRM_DEBUG_KMS("aux_ch native nack\n"); 685 DRM_DEBUG_KMS("aux_ch native nack\n");
589 return -EREMOTEIO; 686 ret = -EREMOTEIO;
687 goto out;
590 case AUX_NATIVE_REPLY_DEFER: 688 case AUX_NATIVE_REPLY_DEFER:
591 /* 689 /*
592 * For now, just give more slack to branch devices. We 690 * For now, just give more slack to branch devices. We
@@ -604,7 +702,8 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
604 default: 702 default:
605 DRM_ERROR("aux_ch invalid native reply 0x%02x\n", 703 DRM_ERROR("aux_ch invalid native reply 0x%02x\n",
606 reply[0]); 704 reply[0]);
607 return -EREMOTEIO; 705 ret = -EREMOTEIO;
706 goto out;
608 } 707 }
609 708
610 switch (reply[0] & AUX_I2C_REPLY_MASK) { 709 switch (reply[0] & AUX_I2C_REPLY_MASK) {
@@ -612,22 +711,29 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
612 if (mode == MODE_I2C_READ) { 711 if (mode == MODE_I2C_READ) {
613 *read_byte = reply[1]; 712 *read_byte = reply[1];
614 } 713 }
615 return reply_bytes - 1; 714 ret = reply_bytes - 1;
715 goto out;
616 case AUX_I2C_REPLY_NACK: 716 case AUX_I2C_REPLY_NACK:
617 DRM_DEBUG_KMS("aux_i2c nack\n"); 717 DRM_DEBUG_KMS("aux_i2c nack\n");
618 return -EREMOTEIO; 718 ret = -EREMOTEIO;
719 goto out;
619 case AUX_I2C_REPLY_DEFER: 720 case AUX_I2C_REPLY_DEFER:
620 DRM_DEBUG_KMS("aux_i2c defer\n"); 721 DRM_DEBUG_KMS("aux_i2c defer\n");
621 udelay(100); 722 udelay(100);
622 break; 723 break;
623 default: 724 default:
624 DRM_ERROR("aux_i2c invalid reply 0x%02x\n", reply[0]); 725 DRM_ERROR("aux_i2c invalid reply 0x%02x\n", reply[0]);
625 return -EREMOTEIO; 726 ret = -EREMOTEIO;
727 goto out;
626 } 728 }
627 } 729 }
628 730
629 DRM_ERROR("too many retries, giving up\n"); 731 DRM_ERROR("too many retries, giving up\n");
630 return -EREMOTEIO; 732 ret = -EREMOTEIO;
733
734out:
735 ironlake_edp_panel_vdd_off(intel_dp, false);
736 return ret;
631} 737}
632 738
633static int 739static int
@@ -647,11 +753,9 @@ intel_dp_i2c_init(struct intel_dp *intel_dp,
647 strncpy(intel_dp->adapter.name, name, sizeof(intel_dp->adapter.name) - 1); 753 strncpy(intel_dp->adapter.name, name, sizeof(intel_dp->adapter.name) - 1);
648 intel_dp->adapter.name[sizeof(intel_dp->adapter.name) - 1] = '\0'; 754 intel_dp->adapter.name[sizeof(intel_dp->adapter.name) - 1] = '\0';
649 intel_dp->adapter.algo_data = &intel_dp->algo; 755 intel_dp->adapter.algo_data = &intel_dp->algo;
650 intel_dp->adapter.dev.parent = &intel_connector->base.kdev; 756 intel_dp->adapter.dev.parent = intel_connector->base.kdev;
651 757
652 ironlake_edp_panel_vdd_on(intel_dp);
653 ret = i2c_dp_aux_add_bus(&intel_dp->adapter); 758 ret = i2c_dp_aux_add_bus(&intel_dp->adapter);
654 ironlake_edp_panel_vdd_off(intel_dp, false);
655 return ret; 759 return ret;
656} 760}
657 761
@@ -660,41 +764,30 @@ intel_dp_set_clock(struct intel_encoder *encoder,
660 struct intel_crtc_config *pipe_config, int link_bw) 764 struct intel_crtc_config *pipe_config, int link_bw)
661{ 765{
662 struct drm_device *dev = encoder->base.dev; 766 struct drm_device *dev = encoder->base.dev;
767 const struct dp_link_dpll *divisor = NULL;
768 int i, count = 0;
663 769
664 if (IS_G4X(dev)) { 770 if (IS_G4X(dev)) {
665 if (link_bw == DP_LINK_BW_1_62) { 771 divisor = gen4_dpll;
666 pipe_config->dpll.p1 = 2; 772 count = ARRAY_SIZE(gen4_dpll);
667 pipe_config->dpll.p2 = 10;
668 pipe_config->dpll.n = 2;
669 pipe_config->dpll.m1 = 23;
670 pipe_config->dpll.m2 = 8;
671 } else {
672 pipe_config->dpll.p1 = 1;
673 pipe_config->dpll.p2 = 10;
674 pipe_config->dpll.n = 1;
675 pipe_config->dpll.m1 = 14;
676 pipe_config->dpll.m2 = 2;
677 }
678 pipe_config->clock_set = true;
679 } else if (IS_HASWELL(dev)) { 773 } else if (IS_HASWELL(dev)) {
680 /* Haswell has special-purpose DP DDI clocks. */ 774 /* Haswell has special-purpose DP DDI clocks. */
681 } else if (HAS_PCH_SPLIT(dev)) { 775 } else if (HAS_PCH_SPLIT(dev)) {
682 if (link_bw == DP_LINK_BW_1_62) { 776 divisor = pch_dpll;
683 pipe_config->dpll.n = 1; 777 count = ARRAY_SIZE(pch_dpll);
684 pipe_config->dpll.p1 = 2;
685 pipe_config->dpll.p2 = 10;
686 pipe_config->dpll.m1 = 12;
687 pipe_config->dpll.m2 = 9;
688 } else {
689 pipe_config->dpll.n = 2;
690 pipe_config->dpll.p1 = 1;
691 pipe_config->dpll.p2 = 10;
692 pipe_config->dpll.m1 = 14;
693 pipe_config->dpll.m2 = 8;
694 }
695 pipe_config->clock_set = true;
696 } else if (IS_VALLEYVIEW(dev)) { 778 } else if (IS_VALLEYVIEW(dev)) {
697 /* FIXME: Need to figure out optimized DP clocks for vlv. */ 779 divisor = vlv_dpll;
780 count = ARRAY_SIZE(vlv_dpll);
781 }
782
783 if (divisor && count) {
784 for (i = 0; i < count; i++) {
785 if (link_bw == divisor[i].link_bw) {
786 pipe_config->dpll = divisor[i].dpll;
787 pipe_config->clock_set = true;
788 break;
789 }
790 }
698 } 791 }
699} 792}
700 793
@@ -737,19 +830,22 @@ intel_dp_compute_config(struct intel_encoder *encoder,
737 830
738 DRM_DEBUG_KMS("DP link computation with max lane count %i " 831 DRM_DEBUG_KMS("DP link computation with max lane count %i "
739 "max bw %02x pixel clock %iKHz\n", 832 "max bw %02x pixel clock %iKHz\n",
740 max_lane_count, bws[max_clock], adjusted_mode->clock); 833 max_lane_count, bws[max_clock],
834 adjusted_mode->crtc_clock);
741 835
742 /* Walk through all bpp values. Luckily they're all nicely spaced with 2 836 /* Walk through all bpp values. Luckily they're all nicely spaced with 2
743 * bpc in between. */ 837 * bpc in between. */
744 bpp = pipe_config->pipe_bpp; 838 bpp = pipe_config->pipe_bpp;
745 if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp) { 839 if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
840 dev_priv->vbt.edp_bpp < bpp) {
746 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n", 841 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
747 dev_priv->vbt.edp_bpp); 842 dev_priv->vbt.edp_bpp);
748 bpp = min_t(int, bpp, dev_priv->vbt.edp_bpp); 843 bpp = dev_priv->vbt.edp_bpp;
749 } 844 }
750 845
751 for (; bpp >= 6*3; bpp -= 2*3) { 846 for (; bpp >= 6*3; bpp -= 2*3) {
752 mode_rate = intel_dp_link_required(adjusted_mode->clock, bpp); 847 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
848 bpp);
753 849
754 for (clock = 0; clock <= max_clock; clock++) { 850 for (clock = 0; clock <= max_clock; clock++) {
755 for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) { 851 for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
@@ -794,7 +890,8 @@ found:
794 mode_rate, link_avail); 890 mode_rate, link_avail);
795 891
796 intel_link_compute_m_n(bpp, lane_count, 892 intel_link_compute_m_n(bpp, lane_count,
797 adjusted_mode->clock, pipe_config->port_clock, 893 adjusted_mode->crtc_clock,
894 pipe_config->port_clock,
798 &pipe_config->dp_m_n); 895 &pipe_config->dp_m_n);
799 896
800 intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw); 897 intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
@@ -802,21 +899,6 @@ found:
802 return true; 899 return true;
803} 900}
804 901
805void intel_dp_init_link_config(struct intel_dp *intel_dp)
806{
807 memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE);
808 intel_dp->link_configuration[0] = intel_dp->link_bw;
809 intel_dp->link_configuration[1] = intel_dp->lane_count;
810 intel_dp->link_configuration[8] = DP_SET_ANSI_8B10B;
811 /*
812 * Check for DPCD version > 1.1 and enhanced framing support
813 */
814 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
815 (intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) {
816 intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
817 }
818}
819
820static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp) 902static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
821{ 903{
822 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 904 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
@@ -889,8 +971,6 @@ static void intel_dp_mode_set(struct intel_encoder *encoder)
889 intel_write_eld(&encoder->base, adjusted_mode); 971 intel_write_eld(&encoder->base, adjusted_mode);
890 } 972 }
891 973
892 intel_dp_init_link_config(intel_dp);
893
894 /* Split out the IBX/CPU vs CPT settings */ 974 /* Split out the IBX/CPU vs CPT settings */
895 975
896 if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) { 976 if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
@@ -900,7 +980,7 @@ static void intel_dp_mode_set(struct intel_encoder *encoder)
900 intel_dp->DP |= DP_SYNC_VS_HIGH; 980 intel_dp->DP |= DP_SYNC_VS_HIGH;
901 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; 981 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
902 982
903 if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN) 983 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
904 intel_dp->DP |= DP_ENHANCED_FRAMING; 984 intel_dp->DP |= DP_ENHANCED_FRAMING;
905 985
906 intel_dp->DP |= crtc->pipe << 29; 986 intel_dp->DP |= crtc->pipe << 29;
@@ -914,7 +994,7 @@ static void intel_dp_mode_set(struct intel_encoder *encoder)
914 intel_dp->DP |= DP_SYNC_VS_HIGH; 994 intel_dp->DP |= DP_SYNC_VS_HIGH;
915 intel_dp->DP |= DP_LINK_TRAIN_OFF; 995 intel_dp->DP |= DP_LINK_TRAIN_OFF;
916 996
917 if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN) 997 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
918 intel_dp->DP |= DP_ENHANCED_FRAMING; 998 intel_dp->DP |= DP_ENHANCED_FRAMING;
919 999
920 if (crtc->pipe == 1) 1000 if (crtc->pipe == 1)
@@ -944,8 +1024,8 @@ static void ironlake_wait_panel_status(struct intel_dp *intel_dp,
944 struct drm_i915_private *dev_priv = dev->dev_private; 1024 struct drm_i915_private *dev_priv = dev->dev_private;
945 u32 pp_stat_reg, pp_ctrl_reg; 1025 u32 pp_stat_reg, pp_ctrl_reg;
946 1026
947 pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS; 1027 pp_stat_reg = _pp_stat_reg(intel_dp);
948 pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL; 1028 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
949 1029
950 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n", 1030 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
951 mask, value, 1031 mask, value,
@@ -987,11 +1067,8 @@ static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
987 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1067 struct drm_device *dev = intel_dp_to_dev(intel_dp);
988 struct drm_i915_private *dev_priv = dev->dev_private; 1068 struct drm_i915_private *dev_priv = dev->dev_private;
989 u32 control; 1069 u32 control;
990 u32 pp_ctrl_reg;
991
992 pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
993 control = I915_READ(pp_ctrl_reg);
994 1070
1071 control = I915_READ(_pp_ctrl_reg(intel_dp));
995 control &= ~PANEL_UNLOCK_MASK; 1072 control &= ~PANEL_UNLOCK_MASK;
996 control |= PANEL_UNLOCK_REGS; 1073 control |= PANEL_UNLOCK_REGS;
997 return control; 1074 return control;
@@ -1006,17 +1083,16 @@ void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp)
1006 1083
1007 if (!is_edp(intel_dp)) 1084 if (!is_edp(intel_dp))
1008 return; 1085 return;
1009 DRM_DEBUG_KMS("Turn eDP VDD on\n");
1010 1086
1011 WARN(intel_dp->want_panel_vdd, 1087 WARN(intel_dp->want_panel_vdd,
1012 "eDP VDD already requested on\n"); 1088 "eDP VDD already requested on\n");
1013 1089
1014 intel_dp->want_panel_vdd = true; 1090 intel_dp->want_panel_vdd = true;
1015 1091
1016 if (ironlake_edp_have_panel_vdd(intel_dp)) { 1092 if (ironlake_edp_have_panel_vdd(intel_dp))
1017 DRM_DEBUG_KMS("eDP VDD already on\n");
1018 return; 1093 return;
1019 } 1094
1095 DRM_DEBUG_KMS("Turning eDP VDD on\n");
1020 1096
1021 if (!ironlake_edp_have_panel_power(intel_dp)) 1097 if (!ironlake_edp_have_panel_power(intel_dp))
1022 ironlake_wait_panel_power_cycle(intel_dp); 1098 ironlake_wait_panel_power_cycle(intel_dp);
@@ -1024,8 +1100,8 @@ void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp)
1024 pp = ironlake_get_pp_control(intel_dp); 1100 pp = ironlake_get_pp_control(intel_dp);
1025 pp |= EDP_FORCE_VDD; 1101 pp |= EDP_FORCE_VDD;
1026 1102
1027 pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS; 1103 pp_stat_reg = _pp_stat_reg(intel_dp);
1028 pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL; 1104 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1029 1105
1030 I915_WRITE(pp_ctrl_reg, pp); 1106 I915_WRITE(pp_ctrl_reg, pp);
1031 POSTING_READ(pp_ctrl_reg); 1107 POSTING_READ(pp_ctrl_reg);
@@ -1050,11 +1126,13 @@ static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp)
1050 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); 1126 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
1051 1127
1052 if (!intel_dp->want_panel_vdd && ironlake_edp_have_panel_vdd(intel_dp)) { 1128 if (!intel_dp->want_panel_vdd && ironlake_edp_have_panel_vdd(intel_dp)) {
1129 DRM_DEBUG_KMS("Turning eDP VDD off\n");
1130
1053 pp = ironlake_get_pp_control(intel_dp); 1131 pp = ironlake_get_pp_control(intel_dp);
1054 pp &= ~EDP_FORCE_VDD; 1132 pp &= ~EDP_FORCE_VDD;
1055 1133
1056 pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS; 1134 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1057 pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL; 1135 pp_stat_reg = _pp_stat_reg(intel_dp);
1058 1136
1059 I915_WRITE(pp_ctrl_reg, pp); 1137 I915_WRITE(pp_ctrl_reg, pp);
1060 POSTING_READ(pp_ctrl_reg); 1138 POSTING_READ(pp_ctrl_reg);
@@ -1082,7 +1160,6 @@ void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
1082 if (!is_edp(intel_dp)) 1160 if (!is_edp(intel_dp))
1083 return; 1161 return;
1084 1162
1085 DRM_DEBUG_KMS("Turn eDP VDD off %d\n", intel_dp->want_panel_vdd);
1086 WARN(!intel_dp->want_panel_vdd, "eDP VDD not forced on"); 1163 WARN(!intel_dp->want_panel_vdd, "eDP VDD not forced on");
1087 1164
1088 intel_dp->want_panel_vdd = false; 1165 intel_dp->want_panel_vdd = false;
@@ -1119,20 +1196,19 @@ void ironlake_edp_panel_on(struct intel_dp *intel_dp)
1119 1196
1120 ironlake_wait_panel_power_cycle(intel_dp); 1197 ironlake_wait_panel_power_cycle(intel_dp);
1121 1198
1199 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1122 pp = ironlake_get_pp_control(intel_dp); 1200 pp = ironlake_get_pp_control(intel_dp);
1123 if (IS_GEN5(dev)) { 1201 if (IS_GEN5(dev)) {
1124 /* ILK workaround: disable reset around power sequence */ 1202 /* ILK workaround: disable reset around power sequence */
1125 pp &= ~PANEL_POWER_RESET; 1203 pp &= ~PANEL_POWER_RESET;
1126 I915_WRITE(PCH_PP_CONTROL, pp); 1204 I915_WRITE(pp_ctrl_reg, pp);
1127 POSTING_READ(PCH_PP_CONTROL); 1205 POSTING_READ(pp_ctrl_reg);
1128 } 1206 }
1129 1207
1130 pp |= POWER_TARGET_ON; 1208 pp |= POWER_TARGET_ON;
1131 if (!IS_GEN5(dev)) 1209 if (!IS_GEN5(dev))
1132 pp |= PANEL_POWER_RESET; 1210 pp |= PANEL_POWER_RESET;
1133 1211
1134 pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
1135
1136 I915_WRITE(pp_ctrl_reg, pp); 1212 I915_WRITE(pp_ctrl_reg, pp);
1137 POSTING_READ(pp_ctrl_reg); 1213 POSTING_READ(pp_ctrl_reg);
1138 1214
@@ -1140,8 +1216,8 @@ void ironlake_edp_panel_on(struct intel_dp *intel_dp)
1140 1216
1141 if (IS_GEN5(dev)) { 1217 if (IS_GEN5(dev)) {
1142 pp |= PANEL_POWER_RESET; /* restore panel reset bit */ 1218 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
1143 I915_WRITE(PCH_PP_CONTROL, pp); 1219 I915_WRITE(pp_ctrl_reg, pp);
1144 POSTING_READ(PCH_PP_CONTROL); 1220 POSTING_READ(pp_ctrl_reg);
1145 } 1221 }
1146} 1222}
1147 1223
@@ -1164,7 +1240,7 @@ void ironlake_edp_panel_off(struct intel_dp *intel_dp)
1164 * panels get very unhappy and cease to work. */ 1240 * panels get very unhappy and cease to work. */
1165 pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE); 1241 pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE);
1166 1242
1167 pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL; 1243 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1168 1244
1169 I915_WRITE(pp_ctrl_reg, pp); 1245 I915_WRITE(pp_ctrl_reg, pp);
1170 POSTING_READ(pp_ctrl_reg); 1246 POSTING_READ(pp_ctrl_reg);
@@ -1179,7 +1255,6 @@ void ironlake_edp_backlight_on(struct intel_dp *intel_dp)
1179 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 1255 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1180 struct drm_device *dev = intel_dig_port->base.base.dev; 1256 struct drm_device *dev = intel_dig_port->base.base.dev;
1181 struct drm_i915_private *dev_priv = dev->dev_private; 1257 struct drm_i915_private *dev_priv = dev->dev_private;
1182 int pipe = to_intel_crtc(intel_dig_port->base.base.crtc)->pipe;
1183 u32 pp; 1258 u32 pp;
1184 u32 pp_ctrl_reg; 1259 u32 pp_ctrl_reg;
1185 1260
@@ -1197,12 +1272,12 @@ void ironlake_edp_backlight_on(struct intel_dp *intel_dp)
1197 pp = ironlake_get_pp_control(intel_dp); 1272 pp = ironlake_get_pp_control(intel_dp);
1198 pp |= EDP_BLC_ENABLE; 1273 pp |= EDP_BLC_ENABLE;
1199 1274
1200 pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL; 1275 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1201 1276
1202 I915_WRITE(pp_ctrl_reg, pp); 1277 I915_WRITE(pp_ctrl_reg, pp);
1203 POSTING_READ(pp_ctrl_reg); 1278 POSTING_READ(pp_ctrl_reg);
1204 1279
1205 intel_panel_enable_backlight(dev, pipe); 1280 intel_panel_enable_backlight(intel_dp->attached_connector);
1206} 1281}
1207 1282
1208void ironlake_edp_backlight_off(struct intel_dp *intel_dp) 1283void ironlake_edp_backlight_off(struct intel_dp *intel_dp)
@@ -1215,13 +1290,13 @@ void ironlake_edp_backlight_off(struct intel_dp *intel_dp)
1215 if (!is_edp(intel_dp)) 1290 if (!is_edp(intel_dp))
1216 return; 1291 return;
1217 1292
1218 intel_panel_disable_backlight(dev); 1293 intel_panel_disable_backlight(intel_dp->attached_connector);
1219 1294
1220 DRM_DEBUG_KMS("\n"); 1295 DRM_DEBUG_KMS("\n");
1221 pp = ironlake_get_pp_control(intel_dp); 1296 pp = ironlake_get_pp_control(intel_dp);
1222 pp &= ~EDP_BLC_ENABLE; 1297 pp &= ~EDP_BLC_ENABLE;
1223 1298
1224 pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL; 1299 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1225 1300
1226 I915_WRITE(pp_ctrl_reg, pp); 1301 I915_WRITE(pp_ctrl_reg, pp);
1227 POSTING_READ(pp_ctrl_reg); 1302 POSTING_READ(pp_ctrl_reg);
@@ -1368,6 +1443,7 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
1368 struct drm_i915_private *dev_priv = dev->dev_private; 1443 struct drm_i915_private *dev_priv = dev->dev_private;
1369 enum port port = dp_to_dig_port(intel_dp)->port; 1444 enum port port = dp_to_dig_port(intel_dp)->port;
1370 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); 1445 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
1446 int dotclock;
1371 1447
1372 if ((port == PORT_A) || !HAS_PCH_CPT(dev)) { 1448 if ((port == PORT_A) || !HAS_PCH_CPT(dev)) {
1373 tmp = I915_READ(intel_dp->output_reg); 1449 tmp = I915_READ(intel_dp->output_reg);
@@ -1395,13 +1471,25 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
1395 1471
1396 pipe_config->adjusted_mode.flags |= flags; 1472 pipe_config->adjusted_mode.flags |= flags;
1397 1473
1398 if (dp_to_dig_port(intel_dp)->port == PORT_A) { 1474 pipe_config->has_dp_encoder = true;
1475
1476 intel_dp_get_m_n(crtc, pipe_config);
1477
1478 if (port == PORT_A) {
1399 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ) 1479 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ)
1400 pipe_config->port_clock = 162000; 1480 pipe_config->port_clock = 162000;
1401 else 1481 else
1402 pipe_config->port_clock = 270000; 1482 pipe_config->port_clock = 270000;
1403 } 1483 }
1404 1484
1485 dotclock = intel_dotclock_calculate(pipe_config->port_clock,
1486 &pipe_config->dp_m_n);
1487
1488 if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
1489 ironlake_check_encoder_dotclock(pipe_config, dotclock);
1490
1491 pipe_config->adjusted_mode.crtc_clock = dotclock;
1492
1405 if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp && 1493 if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
1406 pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) { 1494 pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
1407 /* 1495 /*
@@ -1423,20 +1511,21 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
1423 } 1511 }
1424} 1512}
1425 1513
1426static bool is_edp_psr(struct intel_dp *intel_dp) 1514static bool is_edp_psr(struct drm_device *dev)
1427{ 1515{
1428 return is_edp(intel_dp) && 1516 struct drm_i915_private *dev_priv = dev->dev_private;
1429 intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED; 1517
1518 return dev_priv->psr.sink_support;
1430} 1519}
1431 1520
1432static bool intel_edp_is_psr_enabled(struct drm_device *dev) 1521static bool intel_edp_is_psr_enabled(struct drm_device *dev)
1433{ 1522{
1434 struct drm_i915_private *dev_priv = dev->dev_private; 1523 struct drm_i915_private *dev_priv = dev->dev_private;
1435 1524
1436 if (!IS_HASWELL(dev)) 1525 if (!HAS_PSR(dev))
1437 return false; 1526 return false;
1438 1527
1439 return I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE; 1528 return I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE;
1440} 1529}
1441 1530
1442static void intel_edp_psr_write_vsc(struct intel_dp *intel_dp, 1531static void intel_edp_psr_write_vsc(struct intel_dp *intel_dp,
@@ -1486,7 +1575,7 @@ static void intel_edp_psr_setup(struct intel_dp *intel_dp)
1486 intel_edp_psr_write_vsc(intel_dp, &psr_vsc); 1575 intel_edp_psr_write_vsc(intel_dp, &psr_vsc);
1487 1576
1488 /* Avoid continuous PSR exit by masking memup and hpd */ 1577 /* Avoid continuous PSR exit by masking memup and hpd */
1489 I915_WRITE(EDP_PSR_DEBUG_CTL, EDP_PSR_DEBUG_MASK_MEMUP | 1578 I915_WRITE(EDP_PSR_DEBUG_CTL(dev), EDP_PSR_DEBUG_MASK_MEMUP |
1490 EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP); 1579 EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP);
1491 1580
1492 intel_dp->psr_setup_done = true; 1581 intel_dp->psr_setup_done = true;
@@ -1511,9 +1600,9 @@ static void intel_edp_psr_enable_sink(struct intel_dp *intel_dp)
1511 DP_PSR_MAIN_LINK_ACTIVE); 1600 DP_PSR_MAIN_LINK_ACTIVE);
1512 1601
1513 /* Setup AUX registers */ 1602 /* Setup AUX registers */
1514 I915_WRITE(EDP_PSR_AUX_DATA1, EDP_PSR_DPCD_COMMAND); 1603 I915_WRITE(EDP_PSR_AUX_DATA1(dev), EDP_PSR_DPCD_COMMAND);
1515 I915_WRITE(EDP_PSR_AUX_DATA2, EDP_PSR_DPCD_NORMAL_OPERATION); 1604 I915_WRITE(EDP_PSR_AUX_DATA2(dev), EDP_PSR_DPCD_NORMAL_OPERATION);
1516 I915_WRITE(EDP_PSR_AUX_CTL, 1605 I915_WRITE(EDP_PSR_AUX_CTL(dev),
1517 DP_AUX_CH_CTL_TIME_OUT_400us | 1606 DP_AUX_CH_CTL_TIME_OUT_400us |
1518 (msg_size << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | 1607 (msg_size << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
1519 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) | 1608 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
@@ -1527,6 +1616,7 @@ static void intel_edp_psr_enable_source(struct intel_dp *intel_dp)
1527 uint32_t max_sleep_time = 0x1f; 1616 uint32_t max_sleep_time = 0x1f;
1528 uint32_t idle_frames = 1; 1617 uint32_t idle_frames = 1;
1529 uint32_t val = 0x0; 1618 uint32_t val = 0x0;
1619 const uint32_t link_entry_time = EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
1530 1620
1531 if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT) { 1621 if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT) {
1532 val |= EDP_PSR_LINK_STANDBY; 1622 val |= EDP_PSR_LINK_STANDBY;
@@ -1536,8 +1626,8 @@ static void intel_edp_psr_enable_source(struct intel_dp *intel_dp)
1536 } else 1626 } else
1537 val |= EDP_PSR_LINK_DISABLE; 1627 val |= EDP_PSR_LINK_DISABLE;
1538 1628
1539 I915_WRITE(EDP_PSR_CTL, val | 1629 I915_WRITE(EDP_PSR_CTL(dev), val |
1540 EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES | 1630 IS_BROADWELL(dev) ? 0 : link_entry_time |
1541 max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT | 1631 max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT |
1542 idle_frames << EDP_PSR_IDLE_FRAME_SHIFT | 1632 idle_frames << EDP_PSR_IDLE_FRAME_SHIFT |
1543 EDP_PSR_ENABLE); 1633 EDP_PSR_ENABLE);
@@ -1553,42 +1643,33 @@ static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp)
1553 struct drm_i915_gem_object *obj = to_intel_framebuffer(crtc->fb)->obj; 1643 struct drm_i915_gem_object *obj = to_intel_framebuffer(crtc->fb)->obj;
1554 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base; 1644 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
1555 1645
1556 if (!IS_HASWELL(dev)) { 1646 dev_priv->psr.source_ok = false;
1647
1648 if (!HAS_PSR(dev)) {
1557 DRM_DEBUG_KMS("PSR not supported on this platform\n"); 1649 DRM_DEBUG_KMS("PSR not supported on this platform\n");
1558 dev_priv->no_psr_reason = PSR_NO_SOURCE;
1559 return false; 1650 return false;
1560 } 1651 }
1561 1652
1562 if ((intel_encoder->type != INTEL_OUTPUT_EDP) || 1653 if ((intel_encoder->type != INTEL_OUTPUT_EDP) ||
1563 (dig_port->port != PORT_A)) { 1654 (dig_port->port != PORT_A)) {
1564 DRM_DEBUG_KMS("HSW ties PSR to DDI A (eDP)\n"); 1655 DRM_DEBUG_KMS("HSW ties PSR to DDI A (eDP)\n");
1565 dev_priv->no_psr_reason = PSR_HSW_NOT_DDIA;
1566 return false;
1567 }
1568
1569 if (!is_edp_psr(intel_dp)) {
1570 DRM_DEBUG_KMS("PSR not supported by this panel\n");
1571 dev_priv->no_psr_reason = PSR_NO_SINK;
1572 return false; 1656 return false;
1573 } 1657 }
1574 1658
1575 if (!i915_enable_psr) { 1659 if (!i915_enable_psr) {
1576 DRM_DEBUG_KMS("PSR disable by flag\n"); 1660 DRM_DEBUG_KMS("PSR disable by flag\n");
1577 dev_priv->no_psr_reason = PSR_MODULE_PARAM;
1578 return false; 1661 return false;
1579 } 1662 }
1580 1663
1581 crtc = dig_port->base.base.crtc; 1664 crtc = dig_port->base.base.crtc;
1582 if (crtc == NULL) { 1665 if (crtc == NULL) {
1583 DRM_DEBUG_KMS("crtc not active for PSR\n"); 1666 DRM_DEBUG_KMS("crtc not active for PSR\n");
1584 dev_priv->no_psr_reason = PSR_CRTC_NOT_ACTIVE;
1585 return false; 1667 return false;
1586 } 1668 }
1587 1669
1588 intel_crtc = to_intel_crtc(crtc); 1670 intel_crtc = to_intel_crtc(crtc);
1589 if (!intel_crtc->active || !crtc->fb || !crtc->mode.clock) { 1671 if (!intel_crtc_active(crtc)) {
1590 DRM_DEBUG_KMS("crtc not active for PSR\n"); 1672 DRM_DEBUG_KMS("crtc not active for PSR\n");
1591 dev_priv->no_psr_reason = PSR_CRTC_NOT_ACTIVE;
1592 return false; 1673 return false;
1593 } 1674 }
1594 1675
@@ -1596,29 +1677,26 @@ static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp)
1596 if (obj->tiling_mode != I915_TILING_X || 1677 if (obj->tiling_mode != I915_TILING_X ||
1597 obj->fence_reg == I915_FENCE_REG_NONE) { 1678 obj->fence_reg == I915_FENCE_REG_NONE) {
1598 DRM_DEBUG_KMS("PSR condition failed: fb not tiled or fenced\n"); 1679 DRM_DEBUG_KMS("PSR condition failed: fb not tiled or fenced\n");
1599 dev_priv->no_psr_reason = PSR_NOT_TILED;
1600 return false; 1680 return false;
1601 } 1681 }
1602 1682
1603 if (I915_READ(SPRCTL(intel_crtc->pipe)) & SPRITE_ENABLE) { 1683 if (I915_READ(SPRCTL(intel_crtc->pipe)) & SPRITE_ENABLE) {
1604 DRM_DEBUG_KMS("PSR condition failed: Sprite is Enabled\n"); 1684 DRM_DEBUG_KMS("PSR condition failed: Sprite is Enabled\n");
1605 dev_priv->no_psr_reason = PSR_SPRITE_ENABLED;
1606 return false; 1685 return false;
1607 } 1686 }
1608 1687
1609 if (I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config.cpu_transcoder)) & 1688 if (I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config.cpu_transcoder)) &
1610 S3D_ENABLE) { 1689 S3D_ENABLE) {
1611 DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n"); 1690 DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n");
1612 dev_priv->no_psr_reason = PSR_S3D_ENABLED;
1613 return false; 1691 return false;
1614 } 1692 }
1615 1693
1616 if (crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) { 1694 if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
1617 DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n"); 1695 DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n");
1618 dev_priv->no_psr_reason = PSR_INTERLACED_ENABLED;
1619 return false; 1696 return false;
1620 } 1697 }
1621 1698
1699 dev_priv->psr.source_ok = true;
1622 return true; 1700 return true;
1623} 1701}
1624 1702
@@ -1657,10 +1735,11 @@ void intel_edp_psr_disable(struct intel_dp *intel_dp)
1657 if (!intel_edp_is_psr_enabled(dev)) 1735 if (!intel_edp_is_psr_enabled(dev))
1658 return; 1736 return;
1659 1737
1660 I915_WRITE(EDP_PSR_CTL, I915_READ(EDP_PSR_CTL) & ~EDP_PSR_ENABLE); 1738 I915_WRITE(EDP_PSR_CTL(dev),
1739 I915_READ(EDP_PSR_CTL(dev)) & ~EDP_PSR_ENABLE);
1661 1740
1662 /* Wait till PSR is idle */ 1741 /* Wait till PSR is idle */
1663 if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL) & 1742 if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev)) &
1664 EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10)) 1743 EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10))
1665 DRM_ERROR("Timed out waiting for PSR Idle State\n"); 1744 DRM_ERROR("Timed out waiting for PSR Idle State\n");
1666} 1745}
@@ -1674,7 +1753,7 @@ void intel_edp_psr_update(struct drm_device *dev)
1674 if (encoder->type == INTEL_OUTPUT_EDP) { 1753 if (encoder->type == INTEL_OUTPUT_EDP) {
1675 intel_dp = enc_to_intel_dp(&encoder->base); 1754 intel_dp = enc_to_intel_dp(&encoder->base);
1676 1755
1677 if (!is_edp_psr(intel_dp)) 1756 if (!is_edp_psr(dev))
1678 return; 1757 return;
1679 1758
1680 if (!intel_edp_psr_match_conditions(intel_dp)) 1759 if (!intel_edp_psr_match_conditions(intel_dp))
@@ -1733,14 +1812,24 @@ static void intel_enable_dp(struct intel_encoder *encoder)
1733 ironlake_edp_panel_vdd_off(intel_dp, true); 1812 ironlake_edp_panel_vdd_off(intel_dp, true);
1734 intel_dp_complete_link_train(intel_dp); 1813 intel_dp_complete_link_train(intel_dp);
1735 intel_dp_stop_link_train(intel_dp); 1814 intel_dp_stop_link_train(intel_dp);
1815}
1816
1817static void g4x_enable_dp(struct intel_encoder *encoder)
1818{
1819 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1820
1821 intel_enable_dp(encoder);
1736 ironlake_edp_backlight_on(intel_dp); 1822 ironlake_edp_backlight_on(intel_dp);
1737} 1823}
1738 1824
1739static void vlv_enable_dp(struct intel_encoder *encoder) 1825static void vlv_enable_dp(struct intel_encoder *encoder)
1740{ 1826{
1827 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1828
1829 ironlake_edp_backlight_on(intel_dp);
1741} 1830}
1742 1831
1743static void intel_pre_enable_dp(struct intel_encoder *encoder) 1832static void g4x_pre_enable_dp(struct intel_encoder *encoder)
1744{ 1833{
1745 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 1834 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1746 struct intel_digital_port *dport = dp_to_dig_port(intel_dp); 1835 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
@@ -1758,53 +1847,59 @@ static void vlv_pre_enable_dp(struct intel_encoder *encoder)
1758 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); 1847 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
1759 int port = vlv_dport_to_channel(dport); 1848 int port = vlv_dport_to_channel(dport);
1760 int pipe = intel_crtc->pipe; 1849 int pipe = intel_crtc->pipe;
1850 struct edp_power_seq power_seq;
1761 u32 val; 1851 u32 val;
1762 1852
1763 mutex_lock(&dev_priv->dpio_lock); 1853 mutex_lock(&dev_priv->dpio_lock);
1764 1854
1765 val = vlv_dpio_read(dev_priv, DPIO_DATA_LANE_A(port)); 1855 val = vlv_dpio_read(dev_priv, pipe, DPIO_DATA_LANE_A(port));
1766 val = 0; 1856 val = 0;
1767 if (pipe) 1857 if (pipe)
1768 val |= (1<<21); 1858 val |= (1<<21);
1769 else 1859 else
1770 val &= ~(1<<21); 1860 val &= ~(1<<21);
1771 val |= 0x001000c4; 1861 val |= 0x001000c4;
1772 vlv_dpio_write(dev_priv, DPIO_DATA_CHANNEL(port), val); 1862 vlv_dpio_write(dev_priv, pipe, DPIO_DATA_CHANNEL(port), val);
1773 vlv_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF0(port), 0x00760018); 1863 vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLOCKBUF0(port), 0x00760018);
1774 vlv_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF8(port), 0x00400888); 1864 vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLOCKBUF8(port), 0x00400888);
1775 1865
1776 mutex_unlock(&dev_priv->dpio_lock); 1866 mutex_unlock(&dev_priv->dpio_lock);
1777 1867
1868 /* init power sequencer on this pipe and port */
1869 intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
1870 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
1871 &power_seq);
1872
1778 intel_enable_dp(encoder); 1873 intel_enable_dp(encoder);
1779 1874
1780 vlv_wait_port_ready(dev_priv, port); 1875 vlv_wait_port_ready(dev_priv, port);
1781} 1876}
1782 1877
1783static void intel_dp_pre_pll_enable(struct intel_encoder *encoder) 1878static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
1784{ 1879{
1785 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); 1880 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
1786 struct drm_device *dev = encoder->base.dev; 1881 struct drm_device *dev = encoder->base.dev;
1787 struct drm_i915_private *dev_priv = dev->dev_private; 1882 struct drm_i915_private *dev_priv = dev->dev_private;
1883 struct intel_crtc *intel_crtc =
1884 to_intel_crtc(encoder->base.crtc);
1788 int port = vlv_dport_to_channel(dport); 1885 int port = vlv_dport_to_channel(dport);
1789 1886 int pipe = intel_crtc->pipe;
1790 if (!IS_VALLEYVIEW(dev))
1791 return;
1792 1887
1793 /* Program Tx lane resets to default */ 1888 /* Program Tx lane resets to default */
1794 mutex_lock(&dev_priv->dpio_lock); 1889 mutex_lock(&dev_priv->dpio_lock);
1795 vlv_dpio_write(dev_priv, DPIO_PCS_TX(port), 1890 vlv_dpio_write(dev_priv, pipe, DPIO_PCS_TX(port),
1796 DPIO_PCS_TX_LANE2_RESET | 1891 DPIO_PCS_TX_LANE2_RESET |
1797 DPIO_PCS_TX_LANE1_RESET); 1892 DPIO_PCS_TX_LANE1_RESET);
1798 vlv_dpio_write(dev_priv, DPIO_PCS_CLK(port), 1893 vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLK(port),
1799 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN | 1894 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
1800 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN | 1895 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
1801 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) | 1896 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
1802 DPIO_PCS_CLK_SOFT_RESET); 1897 DPIO_PCS_CLK_SOFT_RESET);
1803 1898
1804 /* Fix up inter-pair skew failure */ 1899 /* Fix up inter-pair skew failure */
1805 vlv_dpio_write(dev_priv, DPIO_PCS_STAGGER1(port), 0x00750f00); 1900 vlv_dpio_write(dev_priv, pipe, DPIO_PCS_STAGGER1(port), 0x00750f00);
1806 vlv_dpio_write(dev_priv, DPIO_TX_CTL(port), 0x00001500); 1901 vlv_dpio_write(dev_priv, pipe, DPIO_TX_CTL(port), 0x00001500);
1807 vlv_dpio_write(dev_priv, DPIO_TX_LANE(port), 0x40400000); 1902 vlv_dpio_write(dev_priv, pipe, DPIO_TX_LANE(port), 0x40400000);
1808 mutex_unlock(&dev_priv->dpio_lock); 1903 mutex_unlock(&dev_priv->dpio_lock);
1809} 1904}
1810 1905
@@ -1869,7 +1964,7 @@ intel_dp_voltage_max(struct intel_dp *intel_dp)
1869 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1964 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1870 enum port port = dp_to_dig_port(intel_dp)->port; 1965 enum port port = dp_to_dig_port(intel_dp)->port;
1871 1966
1872 if (IS_VALLEYVIEW(dev)) 1967 if (IS_VALLEYVIEW(dev) || IS_BROADWELL(dev))
1873 return DP_TRAIN_VOLTAGE_SWING_1200; 1968 return DP_TRAIN_VOLTAGE_SWING_1200;
1874 else if (IS_GEN7(dev) && port == PORT_A) 1969 else if (IS_GEN7(dev) && port == PORT_A)
1875 return DP_TRAIN_VOLTAGE_SWING_800; 1970 return DP_TRAIN_VOLTAGE_SWING_800;
@@ -1885,7 +1980,18 @@ intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
1885 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1980 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1886 enum port port = dp_to_dig_port(intel_dp)->port; 1981 enum port port = dp_to_dig_port(intel_dp)->port;
1887 1982
1888 if (HAS_DDI(dev)) { 1983 if (IS_BROADWELL(dev)) {
1984 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
1985 case DP_TRAIN_VOLTAGE_SWING_400:
1986 case DP_TRAIN_VOLTAGE_SWING_600:
1987 return DP_TRAIN_PRE_EMPHASIS_6;
1988 case DP_TRAIN_VOLTAGE_SWING_800:
1989 return DP_TRAIN_PRE_EMPHASIS_3_5;
1990 case DP_TRAIN_VOLTAGE_SWING_1200:
1991 default:
1992 return DP_TRAIN_PRE_EMPHASIS_0;
1993 }
1994 } else if (IS_HASWELL(dev)) {
1889 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { 1995 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
1890 case DP_TRAIN_VOLTAGE_SWING_400: 1996 case DP_TRAIN_VOLTAGE_SWING_400:
1891 return DP_TRAIN_PRE_EMPHASIS_9_5; 1997 return DP_TRAIN_PRE_EMPHASIS_9_5;
@@ -1939,10 +2045,13 @@ static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp)
1939 struct drm_device *dev = intel_dp_to_dev(intel_dp); 2045 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1940 struct drm_i915_private *dev_priv = dev->dev_private; 2046 struct drm_i915_private *dev_priv = dev->dev_private;
1941 struct intel_digital_port *dport = dp_to_dig_port(intel_dp); 2047 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2048 struct intel_crtc *intel_crtc =
2049 to_intel_crtc(dport->base.base.crtc);
1942 unsigned long demph_reg_value, preemph_reg_value, 2050 unsigned long demph_reg_value, preemph_reg_value,
1943 uniqtranscale_reg_value; 2051 uniqtranscale_reg_value;
1944 uint8_t train_set = intel_dp->train_set[0]; 2052 uint8_t train_set = intel_dp->train_set[0];
1945 int port = vlv_dport_to_channel(dport); 2053 int port = vlv_dport_to_channel(dport);
2054 int pipe = intel_crtc->pipe;
1946 2055
1947 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { 2056 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
1948 case DP_TRAIN_PRE_EMPHASIS_0: 2057 case DP_TRAIN_PRE_EMPHASIS_0:
@@ -2018,21 +2127,22 @@ static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp)
2018 } 2127 }
2019 2128
2020 mutex_lock(&dev_priv->dpio_lock); 2129 mutex_lock(&dev_priv->dpio_lock);
2021 vlv_dpio_write(dev_priv, DPIO_TX_OCALINIT(port), 0x00000000); 2130 vlv_dpio_write(dev_priv, pipe, DPIO_TX_OCALINIT(port), 0x00000000);
2022 vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL4(port), demph_reg_value); 2131 vlv_dpio_write(dev_priv, pipe, DPIO_TX_SWING_CTL4(port), demph_reg_value);
2023 vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL2(port), 2132 vlv_dpio_write(dev_priv, pipe, DPIO_TX_SWING_CTL2(port),
2024 uniqtranscale_reg_value); 2133 uniqtranscale_reg_value);
2025 vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL3(port), 0x0C782040); 2134 vlv_dpio_write(dev_priv, pipe, DPIO_TX_SWING_CTL3(port), 0x0C782040);
2026 vlv_dpio_write(dev_priv, DPIO_PCS_STAGGER0(port), 0x00030000); 2135 vlv_dpio_write(dev_priv, pipe, DPIO_PCS_STAGGER0(port), 0x00030000);
2027 vlv_dpio_write(dev_priv, DPIO_PCS_CTL_OVER1(port), preemph_reg_value); 2136 vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CTL_OVER1(port), preemph_reg_value);
2028 vlv_dpio_write(dev_priv, DPIO_TX_OCALINIT(port), 0x80000000); 2137 vlv_dpio_write(dev_priv, pipe, DPIO_TX_OCALINIT(port), 0x80000000);
2029 mutex_unlock(&dev_priv->dpio_lock); 2138 mutex_unlock(&dev_priv->dpio_lock);
2030 2139
2031 return 0; 2140 return 0;
2032} 2141}
2033 2142
2034static void 2143static void
2035intel_get_adjust_train(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]) 2144intel_get_adjust_train(struct intel_dp *intel_dp,
2145 const uint8_t link_status[DP_LINK_STATUS_SIZE])
2036{ 2146{
2037 uint8_t v = 0; 2147 uint8_t v = 0;
2038 uint8_t p = 0; 2148 uint8_t p = 0;
@@ -2193,6 +2303,41 @@ intel_hsw_signal_levels(uint8_t train_set)
2193 } 2303 }
2194} 2304}
2195 2305
2306static uint32_t
2307intel_bdw_signal_levels(uint8_t train_set)
2308{
2309 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
2310 DP_TRAIN_PRE_EMPHASIS_MASK);
2311 switch (signal_levels) {
2312 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
2313 return DDI_BUF_EMP_400MV_0DB_BDW; /* Sel0 */
2314 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
2315 return DDI_BUF_EMP_400MV_3_5DB_BDW; /* Sel1 */
2316 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
2317 return DDI_BUF_EMP_400MV_6DB_BDW; /* Sel2 */
2318
2319 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
2320 return DDI_BUF_EMP_600MV_0DB_BDW; /* Sel3 */
2321 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
2322 return DDI_BUF_EMP_600MV_3_5DB_BDW; /* Sel4 */
2323 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6:
2324 return DDI_BUF_EMP_600MV_6DB_BDW; /* Sel5 */
2325
2326 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
2327 return DDI_BUF_EMP_800MV_0DB_BDW; /* Sel6 */
2328 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
2329 return DDI_BUF_EMP_800MV_3_5DB_BDW; /* Sel7 */
2330
2331 case DP_TRAIN_VOLTAGE_SWING_1200 | DP_TRAIN_PRE_EMPHASIS_0:
2332 return DDI_BUF_EMP_1200MV_0DB_BDW; /* Sel8 */
2333
2334 default:
2335 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
2336 "0x%x\n", signal_levels);
2337 return DDI_BUF_EMP_400MV_0DB_BDW; /* Sel0 */
2338 }
2339}
2340
2196/* Properly updates "DP" with the correct signal levels. */ 2341/* Properly updates "DP" with the correct signal levels. */
2197static void 2342static void
2198intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP) 2343intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
@@ -2203,7 +2348,10 @@ intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
2203 uint32_t signal_levels, mask; 2348 uint32_t signal_levels, mask;
2204 uint8_t train_set = intel_dp->train_set[0]; 2349 uint8_t train_set = intel_dp->train_set[0];
2205 2350
2206 if (HAS_DDI(dev)) { 2351 if (IS_BROADWELL(dev)) {
2352 signal_levels = intel_bdw_signal_levels(train_set);
2353 mask = DDI_BUF_EMP_MASK;
2354 } else if (IS_HASWELL(dev)) {
2207 signal_levels = intel_hsw_signal_levels(train_set); 2355 signal_levels = intel_hsw_signal_levels(train_set);
2208 mask = DDI_BUF_EMP_MASK; 2356 mask = DDI_BUF_EMP_MASK;
2209 } else if (IS_VALLEYVIEW(dev)) { 2357 } else if (IS_VALLEYVIEW(dev)) {
@@ -2227,14 +2375,15 @@ intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
2227 2375
2228static bool 2376static bool
2229intel_dp_set_link_train(struct intel_dp *intel_dp, 2377intel_dp_set_link_train(struct intel_dp *intel_dp,
2230 uint32_t dp_reg_value, 2378 uint32_t *DP,
2231 uint8_t dp_train_pat) 2379 uint8_t dp_train_pat)
2232{ 2380{
2233 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 2381 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2234 struct drm_device *dev = intel_dig_port->base.base.dev; 2382 struct drm_device *dev = intel_dig_port->base.base.dev;
2235 struct drm_i915_private *dev_priv = dev->dev_private; 2383 struct drm_i915_private *dev_priv = dev->dev_private;
2236 enum port port = intel_dig_port->port; 2384 enum port port = intel_dig_port->port;
2237 int ret; 2385 uint8_t buf[sizeof(intel_dp->train_set) + 1];
2386 int ret, len;
2238 2387
2239 if (HAS_DDI(dev)) { 2388 if (HAS_DDI(dev)) {
2240 uint32_t temp = I915_READ(DP_TP_CTL(port)); 2389 uint32_t temp = I915_READ(DP_TP_CTL(port));
@@ -2263,62 +2412,93 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
2263 I915_WRITE(DP_TP_CTL(port), temp); 2412 I915_WRITE(DP_TP_CTL(port), temp);
2264 2413
2265 } else if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) { 2414 } else if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
2266 dp_reg_value &= ~DP_LINK_TRAIN_MASK_CPT; 2415 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
2267 2416
2268 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { 2417 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2269 case DP_TRAINING_PATTERN_DISABLE: 2418 case DP_TRAINING_PATTERN_DISABLE:
2270 dp_reg_value |= DP_LINK_TRAIN_OFF_CPT; 2419 *DP |= DP_LINK_TRAIN_OFF_CPT;
2271 break; 2420 break;
2272 case DP_TRAINING_PATTERN_1: 2421 case DP_TRAINING_PATTERN_1:
2273 dp_reg_value |= DP_LINK_TRAIN_PAT_1_CPT; 2422 *DP |= DP_LINK_TRAIN_PAT_1_CPT;
2274 break; 2423 break;
2275 case DP_TRAINING_PATTERN_2: 2424 case DP_TRAINING_PATTERN_2:
2276 dp_reg_value |= DP_LINK_TRAIN_PAT_2_CPT; 2425 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2277 break; 2426 break;
2278 case DP_TRAINING_PATTERN_3: 2427 case DP_TRAINING_PATTERN_3:
2279 DRM_ERROR("DP training pattern 3 not supported\n"); 2428 DRM_ERROR("DP training pattern 3 not supported\n");
2280 dp_reg_value |= DP_LINK_TRAIN_PAT_2_CPT; 2429 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2281 break; 2430 break;
2282 } 2431 }
2283 2432
2284 } else { 2433 } else {
2285 dp_reg_value &= ~DP_LINK_TRAIN_MASK; 2434 *DP &= ~DP_LINK_TRAIN_MASK;
2286 2435
2287 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { 2436 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2288 case DP_TRAINING_PATTERN_DISABLE: 2437 case DP_TRAINING_PATTERN_DISABLE:
2289 dp_reg_value |= DP_LINK_TRAIN_OFF; 2438 *DP |= DP_LINK_TRAIN_OFF;
2290 break; 2439 break;
2291 case DP_TRAINING_PATTERN_1: 2440 case DP_TRAINING_PATTERN_1:
2292 dp_reg_value |= DP_LINK_TRAIN_PAT_1; 2441 *DP |= DP_LINK_TRAIN_PAT_1;
2293 break; 2442 break;
2294 case DP_TRAINING_PATTERN_2: 2443 case DP_TRAINING_PATTERN_2:
2295 dp_reg_value |= DP_LINK_TRAIN_PAT_2; 2444 *DP |= DP_LINK_TRAIN_PAT_2;
2296 break; 2445 break;
2297 case DP_TRAINING_PATTERN_3: 2446 case DP_TRAINING_PATTERN_3:
2298 DRM_ERROR("DP training pattern 3 not supported\n"); 2447 DRM_ERROR("DP training pattern 3 not supported\n");
2299 dp_reg_value |= DP_LINK_TRAIN_PAT_2; 2448 *DP |= DP_LINK_TRAIN_PAT_2;
2300 break; 2449 break;
2301 } 2450 }
2302 } 2451 }
2303 2452
2304 I915_WRITE(intel_dp->output_reg, dp_reg_value); 2453 I915_WRITE(intel_dp->output_reg, *DP);
2305 POSTING_READ(intel_dp->output_reg); 2454 POSTING_READ(intel_dp->output_reg);
2306 2455
2307 intel_dp_aux_native_write_1(intel_dp, 2456 buf[0] = dp_train_pat;
2308 DP_TRAINING_PATTERN_SET, 2457 if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) ==
2309 dp_train_pat);
2310
2311 if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) !=
2312 DP_TRAINING_PATTERN_DISABLE) { 2458 DP_TRAINING_PATTERN_DISABLE) {
2313 ret = intel_dp_aux_native_write(intel_dp, 2459 /* don't write DP_TRAINING_LANEx_SET on disable */
2314 DP_TRAINING_LANE0_SET, 2460 len = 1;
2315 intel_dp->train_set, 2461 } else {
2316 intel_dp->lane_count); 2462 /* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
2317 if (ret != intel_dp->lane_count) 2463 memcpy(buf + 1, intel_dp->train_set, intel_dp->lane_count);
2318 return false; 2464 len = intel_dp->lane_count + 1;
2319 } 2465 }
2320 2466
2321 return true; 2467 ret = intel_dp_aux_native_write(intel_dp, DP_TRAINING_PATTERN_SET,
2468 buf, len);
2469
2470 return ret == len;
2471}
2472
2473static bool
2474intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP,
2475 uint8_t dp_train_pat)
2476{
2477 memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
2478 intel_dp_set_signal_levels(intel_dp, DP);
2479 return intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
2480}
2481
2482static bool
2483intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP,
2484 const uint8_t link_status[DP_LINK_STATUS_SIZE])
2485{
2486 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2487 struct drm_device *dev = intel_dig_port->base.base.dev;
2488 struct drm_i915_private *dev_priv = dev->dev_private;
2489 int ret;
2490
2491 intel_get_adjust_train(intel_dp, link_status);
2492 intel_dp_set_signal_levels(intel_dp, DP);
2493
2494 I915_WRITE(intel_dp->output_reg, *DP);
2495 POSTING_READ(intel_dp->output_reg);
2496
2497 ret = intel_dp_aux_native_write(intel_dp, DP_TRAINING_LANE0_SET,
2498 intel_dp->train_set,
2499 intel_dp->lane_count);
2500
2501 return ret == intel_dp->lane_count;
2322} 2502}
2323 2503
2324static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp) 2504static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
@@ -2362,32 +2542,37 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
2362 uint8_t voltage; 2542 uint8_t voltage;
2363 int voltage_tries, loop_tries; 2543 int voltage_tries, loop_tries;
2364 uint32_t DP = intel_dp->DP; 2544 uint32_t DP = intel_dp->DP;
2545 uint8_t link_config[2];
2365 2546
2366 if (HAS_DDI(dev)) 2547 if (HAS_DDI(dev))
2367 intel_ddi_prepare_link_retrain(encoder); 2548 intel_ddi_prepare_link_retrain(encoder);
2368 2549
2369 /* Write the link configuration data */ 2550 /* Write the link configuration data */
2370 intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET, 2551 link_config[0] = intel_dp->link_bw;
2371 intel_dp->link_configuration, 2552 link_config[1] = intel_dp->lane_count;
2372 DP_LINK_CONFIGURATION_SIZE); 2553 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
2554 link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
2555 intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET, link_config, 2);
2556
2557 link_config[0] = 0;
2558 link_config[1] = DP_SET_ANSI_8B10B;
2559 intel_dp_aux_native_write(intel_dp, DP_DOWNSPREAD_CTRL, link_config, 2);
2373 2560
2374 DP |= DP_PORT_EN; 2561 DP |= DP_PORT_EN;
2375 2562
2376 memset(intel_dp->train_set, 0, 4); 2563 /* clock recovery */
2564 if (!intel_dp_reset_link_train(intel_dp, &DP,
2565 DP_TRAINING_PATTERN_1 |
2566 DP_LINK_SCRAMBLING_DISABLE)) {
2567 DRM_ERROR("failed to enable link training\n");
2568 return;
2569 }
2570
2377 voltage = 0xff; 2571 voltage = 0xff;
2378 voltage_tries = 0; 2572 voltage_tries = 0;
2379 loop_tries = 0; 2573 loop_tries = 0;
2380 for (;;) { 2574 for (;;) {
2381 /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */ 2575 uint8_t link_status[DP_LINK_STATUS_SIZE];
2382 uint8_t link_status[DP_LINK_STATUS_SIZE];
2383
2384 intel_dp_set_signal_levels(intel_dp, &DP);
2385
2386 /* Set training pattern 1 */
2387 if (!intel_dp_set_link_train(intel_dp, DP,
2388 DP_TRAINING_PATTERN_1 |
2389 DP_LINK_SCRAMBLING_DISABLE))
2390 break;
2391 2576
2392 drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd); 2577 drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
2393 if (!intel_dp_get_link_status(intel_dp, link_status)) { 2578 if (!intel_dp_get_link_status(intel_dp, link_status)) {
@@ -2407,10 +2592,12 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
2407 if (i == intel_dp->lane_count) { 2592 if (i == intel_dp->lane_count) {
2408 ++loop_tries; 2593 ++loop_tries;
2409 if (loop_tries == 5) { 2594 if (loop_tries == 5) {
2410 DRM_DEBUG_KMS("too many full retries, give up\n"); 2595 DRM_ERROR("too many full retries, give up\n");
2411 break; 2596 break;
2412 } 2597 }
2413 memset(intel_dp->train_set, 0, 4); 2598 intel_dp_reset_link_train(intel_dp, &DP,
2599 DP_TRAINING_PATTERN_1 |
2600 DP_LINK_SCRAMBLING_DISABLE);
2414 voltage_tries = 0; 2601 voltage_tries = 0;
2415 continue; 2602 continue;
2416 } 2603 }
@@ -2419,15 +2606,18 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
2419 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) { 2606 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
2420 ++voltage_tries; 2607 ++voltage_tries;
2421 if (voltage_tries == 5) { 2608 if (voltage_tries == 5) {
2422 DRM_DEBUG_KMS("too many voltage retries, give up\n"); 2609 DRM_ERROR("too many voltage retries, give up\n");
2423 break; 2610 break;
2424 } 2611 }
2425 } else 2612 } else
2426 voltage_tries = 0; 2613 voltage_tries = 0;
2427 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; 2614 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
2428 2615
2429 /* Compute new intel_dp->train_set as requested by target */ 2616 /* Update training set as requested by target */
2430 intel_get_adjust_train(intel_dp, link_status); 2617 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
2618 DRM_ERROR("failed to update link training\n");
2619 break;
2620 }
2431 } 2621 }
2432 2622
2433 intel_dp->DP = DP; 2623 intel_dp->DP = DP;
@@ -2441,11 +2631,18 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
2441 uint32_t DP = intel_dp->DP; 2631 uint32_t DP = intel_dp->DP;
2442 2632
2443 /* channel equalization */ 2633 /* channel equalization */
2634 if (!intel_dp_set_link_train(intel_dp, &DP,
2635 DP_TRAINING_PATTERN_2 |
2636 DP_LINK_SCRAMBLING_DISABLE)) {
2637 DRM_ERROR("failed to start channel equalization\n");
2638 return;
2639 }
2640
2444 tries = 0; 2641 tries = 0;
2445 cr_tries = 0; 2642 cr_tries = 0;
2446 channel_eq = false; 2643 channel_eq = false;
2447 for (;;) { 2644 for (;;) {
2448 uint8_t link_status[DP_LINK_STATUS_SIZE]; 2645 uint8_t link_status[DP_LINK_STATUS_SIZE];
2449 2646
2450 if (cr_tries > 5) { 2647 if (cr_tries > 5) {
2451 DRM_ERROR("failed to train DP, aborting\n"); 2648 DRM_ERROR("failed to train DP, aborting\n");
@@ -2453,21 +2650,18 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
2453 break; 2650 break;
2454 } 2651 }
2455 2652
2456 intel_dp_set_signal_levels(intel_dp, &DP);
2457
2458 /* channel eq pattern */
2459 if (!intel_dp_set_link_train(intel_dp, DP,
2460 DP_TRAINING_PATTERN_2 |
2461 DP_LINK_SCRAMBLING_DISABLE))
2462 break;
2463
2464 drm_dp_link_train_channel_eq_delay(intel_dp->dpcd); 2653 drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
2465 if (!intel_dp_get_link_status(intel_dp, link_status)) 2654 if (!intel_dp_get_link_status(intel_dp, link_status)) {
2655 DRM_ERROR("failed to get link status\n");
2466 break; 2656 break;
2657 }
2467 2658
2468 /* Make sure clock is still ok */ 2659 /* Make sure clock is still ok */
2469 if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) { 2660 if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
2470 intel_dp_start_link_train(intel_dp); 2661 intel_dp_start_link_train(intel_dp);
2662 intel_dp_set_link_train(intel_dp, &DP,
2663 DP_TRAINING_PATTERN_2 |
2664 DP_LINK_SCRAMBLING_DISABLE);
2471 cr_tries++; 2665 cr_tries++;
2472 continue; 2666 continue;
2473 } 2667 }
@@ -2481,13 +2675,19 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
2481 if (tries > 5) { 2675 if (tries > 5) {
2482 intel_dp_link_down(intel_dp); 2676 intel_dp_link_down(intel_dp);
2483 intel_dp_start_link_train(intel_dp); 2677 intel_dp_start_link_train(intel_dp);
2678 intel_dp_set_link_train(intel_dp, &DP,
2679 DP_TRAINING_PATTERN_2 |
2680 DP_LINK_SCRAMBLING_DISABLE);
2484 tries = 0; 2681 tries = 0;
2485 cr_tries++; 2682 cr_tries++;
2486 continue; 2683 continue;
2487 } 2684 }
2488 2685
2489 /* Compute new intel_dp->train_set as requested by target */ 2686 /* Update training set as requested by target */
2490 intel_get_adjust_train(intel_dp, link_status); 2687 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
2688 DRM_ERROR("failed to update link training\n");
2689 break;
2690 }
2491 ++tries; 2691 ++tries;
2492 } 2692 }
2493 2693
@@ -2502,7 +2702,7 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
2502 2702
2503void intel_dp_stop_link_train(struct intel_dp *intel_dp) 2703void intel_dp_stop_link_train(struct intel_dp *intel_dp)
2504{ 2704{
2505 intel_dp_set_link_train(intel_dp, intel_dp->DP, 2705 intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2506 DP_TRAINING_PATTERN_DISABLE); 2706 DP_TRAINING_PATTERN_DISABLE);
2507} 2707}
2508 2708
@@ -2589,6 +2789,10 @@ intel_dp_link_down(struct intel_dp *intel_dp)
2589static bool 2789static bool
2590intel_dp_get_dpcd(struct intel_dp *intel_dp) 2790intel_dp_get_dpcd(struct intel_dp *intel_dp)
2591{ 2791{
2792 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2793 struct drm_device *dev = dig_port->base.base.dev;
2794 struct drm_i915_private *dev_priv = dev->dev_private;
2795
2592 char dpcd_hex_dump[sizeof(intel_dp->dpcd) * 3]; 2796 char dpcd_hex_dump[sizeof(intel_dp->dpcd) * 3];
2593 2797
2594 if (intel_dp_aux_native_read_retry(intel_dp, 0x000, intel_dp->dpcd, 2798 if (intel_dp_aux_native_read_retry(intel_dp, 0x000, intel_dp->dpcd,
@@ -2604,11 +2808,16 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
2604 2808
2605 /* Check if the panel supports PSR */ 2809 /* Check if the panel supports PSR */
2606 memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd)); 2810 memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
2607 intel_dp_aux_native_read_retry(intel_dp, DP_PSR_SUPPORT, 2811 if (is_edp(intel_dp)) {
2608 intel_dp->psr_dpcd, 2812 intel_dp_aux_native_read_retry(intel_dp, DP_PSR_SUPPORT,
2609 sizeof(intel_dp->psr_dpcd)); 2813 intel_dp->psr_dpcd,
2610 if (is_edp_psr(intel_dp)) 2814 sizeof(intel_dp->psr_dpcd));
2611 DRM_DEBUG_KMS("Detected EDP PSR Panel.\n"); 2815 if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
2816 dev_priv->psr.sink_support = true;
2817 DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
2818 }
2819 }
2820
2612 if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & 2821 if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
2613 DP_DWN_STRM_PORT_PRESENT)) 2822 DP_DWN_STRM_PORT_PRESENT))
2614 return true; /* native DP sink */ 2823 return true; /* native DP sink */
@@ -2728,7 +2937,6 @@ static enum drm_connector_status
2728intel_dp_detect_dpcd(struct intel_dp *intel_dp) 2937intel_dp_detect_dpcd(struct intel_dp *intel_dp)
2729{ 2938{
2730 uint8_t *dpcd = intel_dp->dpcd; 2939 uint8_t *dpcd = intel_dp->dpcd;
2731 bool hpd;
2732 uint8_t type; 2940 uint8_t type;
2733 2941
2734 if (!intel_dp_get_dpcd(intel_dp)) 2942 if (!intel_dp_get_dpcd(intel_dp))
@@ -2739,8 +2947,8 @@ intel_dp_detect_dpcd(struct intel_dp *intel_dp)
2739 return connector_status_connected; 2947 return connector_status_connected;
2740 2948
2741 /* If we're HPD-aware, SINK_COUNT changes dynamically */ 2949 /* If we're HPD-aware, SINK_COUNT changes dynamically */
2742 hpd = !!(intel_dp->downstream_ports[0] & DP_DS_PORT_HPD); 2950 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
2743 if (hpd) { 2951 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
2744 uint8_t reg; 2952 uint8_t reg;
2745 if (!intel_dp_aux_native_read_retry(intel_dp, DP_SINK_COUNT, 2953 if (!intel_dp_aux_native_read_retry(intel_dp, DP_SINK_COUNT,
2746 &reg, 1)) 2954 &reg, 1))
@@ -2754,9 +2962,18 @@ intel_dp_detect_dpcd(struct intel_dp *intel_dp)
2754 return connector_status_connected; 2962 return connector_status_connected;
2755 2963
2756 /* Well we tried, say unknown for unreliable port types */ 2964 /* Well we tried, say unknown for unreliable port types */
2757 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK; 2965 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
2758 if (type == DP_DS_PORT_TYPE_VGA || type == DP_DS_PORT_TYPE_NON_EDID) 2966 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
2759 return connector_status_unknown; 2967 if (type == DP_DS_PORT_TYPE_VGA ||
2968 type == DP_DS_PORT_TYPE_NON_EDID)
2969 return connector_status_unknown;
2970 } else {
2971 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
2972 DP_DWN_STRM_PORT_TYPE_MASK;
2973 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
2974 type == DP_DWN_STRM_PORT_TYPE_OTHER)
2975 return connector_status_unknown;
2976 }
2760 2977
2761 /* Anything else is out of spec, warn and ignore */ 2978 /* Anything else is out of spec, warn and ignore */
2762 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n"); 2979 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
@@ -2830,19 +3047,11 @@ intel_dp_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
2830 3047
2831 /* use cached edid if we have one */ 3048 /* use cached edid if we have one */
2832 if (intel_connector->edid) { 3049 if (intel_connector->edid) {
2833 struct edid *edid;
2834 int size;
2835
2836 /* invalid edid */ 3050 /* invalid edid */
2837 if (IS_ERR(intel_connector->edid)) 3051 if (IS_ERR(intel_connector->edid))
2838 return NULL; 3052 return NULL;
2839 3053
2840 size = (intel_connector->edid->extensions + 1) * EDID_LENGTH; 3054 return drm_edid_duplicate(intel_connector->edid);
2841 edid = kmemdup(intel_connector->edid, size, GFP_KERNEL);
2842 if (!edid)
2843 return NULL;
2844
2845 return edid;
2846 } 3055 }
2847 3056
2848 return drm_get_edid(connector, adapter); 3057 return drm_get_edid(connector, adapter);
@@ -3050,7 +3259,6 @@ intel_dp_connector_destroy(struct drm_connector *connector)
3050 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) 3259 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
3051 intel_panel_fini(&intel_connector->panel); 3260 intel_panel_fini(&intel_connector->panel);
3052 3261
3053 drm_sysfs_connector_remove(connector);
3054 drm_connector_cleanup(connector); 3262 drm_connector_cleanup(connector);
3055 kfree(connector); 3263 kfree(connector);
3056} 3264}
@@ -3121,7 +3329,7 @@ intel_trans_dp_port_sel(struct drm_crtc *crtc)
3121bool intel_dpd_is_edp(struct drm_device *dev) 3329bool intel_dpd_is_edp(struct drm_device *dev)
3122{ 3330{
3123 struct drm_i915_private *dev_priv = dev->dev_private; 3331 struct drm_i915_private *dev_priv = dev->dev_private;
3124 struct child_device_config *p_child; 3332 union child_device_config *p_child;
3125 int i; 3333 int i;
3126 3334
3127 if (!dev_priv->vbt.child_dev_num) 3335 if (!dev_priv->vbt.child_dev_num)
@@ -3130,8 +3338,9 @@ bool intel_dpd_is_edp(struct drm_device *dev)
3130 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) { 3338 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
3131 p_child = dev_priv->vbt.child_dev + i; 3339 p_child = dev_priv->vbt.child_dev + i;
3132 3340
3133 if (p_child->dvo_port == PORT_IDPD && 3341 if (p_child->common.dvo_port == PORT_IDPD &&
3134 p_child->device_type == DEVICE_TYPE_eDP) 3342 (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
3343 (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
3135 return true; 3344 return true;
3136 } 3345 }
3137 return false; 3346 return false;
@@ -3164,24 +3373,26 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev,
3164 struct drm_i915_private *dev_priv = dev->dev_private; 3373 struct drm_i915_private *dev_priv = dev->dev_private;
3165 struct edp_power_seq cur, vbt, spec, final; 3374 struct edp_power_seq cur, vbt, spec, final;
3166 u32 pp_on, pp_off, pp_div, pp; 3375 u32 pp_on, pp_off, pp_div, pp;
3167 int pp_control_reg, pp_on_reg, pp_off_reg, pp_div_reg; 3376 int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
3168 3377
3169 if (HAS_PCH_SPLIT(dev)) { 3378 if (HAS_PCH_SPLIT(dev)) {
3170 pp_control_reg = PCH_PP_CONTROL; 3379 pp_ctrl_reg = PCH_PP_CONTROL;
3171 pp_on_reg = PCH_PP_ON_DELAYS; 3380 pp_on_reg = PCH_PP_ON_DELAYS;
3172 pp_off_reg = PCH_PP_OFF_DELAYS; 3381 pp_off_reg = PCH_PP_OFF_DELAYS;
3173 pp_div_reg = PCH_PP_DIVISOR; 3382 pp_div_reg = PCH_PP_DIVISOR;
3174 } else { 3383 } else {
3175 pp_control_reg = PIPEA_PP_CONTROL; 3384 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
3176 pp_on_reg = PIPEA_PP_ON_DELAYS; 3385
3177 pp_off_reg = PIPEA_PP_OFF_DELAYS; 3386 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
3178 pp_div_reg = PIPEA_PP_DIVISOR; 3387 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
3388 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
3389 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
3179 } 3390 }
3180 3391
3181 /* Workaround: Need to write PP_CONTROL with the unlock key as 3392 /* Workaround: Need to write PP_CONTROL with the unlock key as
3182 * the very first thing. */ 3393 * the very first thing. */
3183 pp = ironlake_get_pp_control(intel_dp); 3394 pp = ironlake_get_pp_control(intel_dp);
3184 I915_WRITE(pp_control_reg, pp); 3395 I915_WRITE(pp_ctrl_reg, pp);
3185 3396
3186 pp_on = I915_READ(pp_on_reg); 3397 pp_on = I915_READ(pp_on_reg);
3187 pp_off = I915_READ(pp_off_reg); 3398 pp_off = I915_READ(pp_off_reg);
@@ -3269,9 +3480,11 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
3269 pp_off_reg = PCH_PP_OFF_DELAYS; 3480 pp_off_reg = PCH_PP_OFF_DELAYS;
3270 pp_div_reg = PCH_PP_DIVISOR; 3481 pp_div_reg = PCH_PP_DIVISOR;
3271 } else { 3482 } else {
3272 pp_on_reg = PIPEA_PP_ON_DELAYS; 3483 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
3273 pp_off_reg = PIPEA_PP_OFF_DELAYS; 3484
3274 pp_div_reg = PIPEA_PP_DIVISOR; 3485 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
3486 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
3487 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
3275 } 3488 }
3276 3489
3277 /* And finally store the new values in the power sequencer. */ 3490 /* And finally store the new values in the power sequencer. */
@@ -3288,12 +3501,15 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
3288 /* Haswell doesn't have any port selection bits for the panel 3501 /* Haswell doesn't have any port selection bits for the panel
3289 * power sequencer any more. */ 3502 * power sequencer any more. */
3290 if (IS_VALLEYVIEW(dev)) { 3503 if (IS_VALLEYVIEW(dev)) {
3291 port_sel = I915_READ(pp_on_reg) & 0xc0000000; 3504 if (dp_to_dig_port(intel_dp)->port == PORT_B)
3505 port_sel = PANEL_PORT_SELECT_DPB_VLV;
3506 else
3507 port_sel = PANEL_PORT_SELECT_DPC_VLV;
3292 } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) { 3508 } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
3293 if (dp_to_dig_port(intel_dp)->port == PORT_A) 3509 if (dp_to_dig_port(intel_dp)->port == PORT_A)
3294 port_sel = PANEL_POWER_PORT_DP_A; 3510 port_sel = PANEL_PORT_SELECT_DPA;
3295 else 3511 else
3296 port_sel = PANEL_POWER_PORT_DP_D; 3512 port_sel = PANEL_PORT_SELECT_DPD;
3297 } 3513 }
3298 3514
3299 pp_on |= port_sel; 3515 pp_on |= port_sel;
@@ -3346,7 +3562,6 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
3346 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, 3562 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
3347 &power_seq); 3563 &power_seq);
3348 3564
3349 ironlake_edp_panel_vdd_on(intel_dp);
3350 edid = drm_get_edid(connector, &intel_dp->adapter); 3565 edid = drm_get_edid(connector, &intel_dp->adapter);
3351 if (edid) { 3566 if (edid) {
3352 if (drm_add_edid_modes(connector, edid)) { 3567 if (drm_add_edid_modes(connector, edid)) {
@@ -3378,8 +3593,6 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
3378 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED; 3593 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
3379 } 3594 }
3380 3595
3381 ironlake_edp_panel_vdd_off(intel_dp, false);
3382
3383 intel_panel_init(&intel_connector->panel, fixed_mode); 3596 intel_panel_init(&intel_connector->panel, fixed_mode);
3384 intel_panel_setup_backlight(connector); 3597 intel_panel_setup_backlight(connector);
3385 3598
@@ -3536,11 +3749,11 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
3536 struct drm_encoder *encoder; 3749 struct drm_encoder *encoder;
3537 struct intel_connector *intel_connector; 3750 struct intel_connector *intel_connector;
3538 3751
3539 intel_dig_port = kzalloc(sizeof(struct intel_digital_port), GFP_KERNEL); 3752 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
3540 if (!intel_dig_port) 3753 if (!intel_dig_port)
3541 return; 3754 return;
3542 3755
3543 intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); 3756 intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
3544 if (!intel_connector) { 3757 if (!intel_connector) {
3545 kfree(intel_dig_port); 3758 kfree(intel_dig_port);
3546 return; 3759 return;
@@ -3559,12 +3772,12 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
3559 intel_encoder->get_hw_state = intel_dp_get_hw_state; 3772 intel_encoder->get_hw_state = intel_dp_get_hw_state;
3560 intel_encoder->get_config = intel_dp_get_config; 3773 intel_encoder->get_config = intel_dp_get_config;
3561 if (IS_VALLEYVIEW(dev)) { 3774 if (IS_VALLEYVIEW(dev)) {
3562 intel_encoder->pre_pll_enable = intel_dp_pre_pll_enable; 3775 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
3563 intel_encoder->pre_enable = vlv_pre_enable_dp; 3776 intel_encoder->pre_enable = vlv_pre_enable_dp;
3564 intel_encoder->enable = vlv_enable_dp; 3777 intel_encoder->enable = vlv_enable_dp;
3565 } else { 3778 } else {
3566 intel_encoder->pre_enable = intel_pre_enable_dp; 3779 intel_encoder->pre_enable = g4x_pre_enable_dp;
3567 intel_encoder->enable = intel_enable_dp; 3780 intel_encoder->enable = g4x_enable_dp;
3568 } 3781 }
3569 3782
3570 intel_dig_port->port = port; 3783 intel_dig_port->port = port;
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 7f2b384ac939..1e49aa8f5377 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -77,7 +77,6 @@
77/* the i915, i945 have a single sDVO i2c bus - which is different */ 77/* the i915, i945 have a single sDVO i2c bus - which is different */
78#define MAX_OUTPUTS 6 78#define MAX_OUTPUTS 6
79/* maximum connectors per crtcs in the mode set */ 79/* maximum connectors per crtcs in the mode set */
80#define INTELFB_CONN_LIMIT 4
81 80
82#define INTEL_I2C_BUS_DVO 1 81#define INTEL_I2C_BUS_DVO 1
83#define INTEL_I2C_BUS_SDVO 2 82#define INTEL_I2C_BUS_SDVO 2
@@ -93,13 +92,17 @@
93#define INTEL_OUTPUT_HDMI 6 92#define INTEL_OUTPUT_HDMI 6
94#define INTEL_OUTPUT_DISPLAYPORT 7 93#define INTEL_OUTPUT_DISPLAYPORT 7
95#define INTEL_OUTPUT_EDP 8 94#define INTEL_OUTPUT_EDP 8
96#define INTEL_OUTPUT_UNKNOWN 9 95#define INTEL_OUTPUT_DSI 9
96#define INTEL_OUTPUT_UNKNOWN 10
97 97
98#define INTEL_DVO_CHIP_NONE 0 98#define INTEL_DVO_CHIP_NONE 0
99#define INTEL_DVO_CHIP_LVDS 1 99#define INTEL_DVO_CHIP_LVDS 1
100#define INTEL_DVO_CHIP_TMDS 2 100#define INTEL_DVO_CHIP_TMDS 2
101#define INTEL_DVO_CHIP_TVOUT 4 101#define INTEL_DVO_CHIP_TVOUT 4
102 102
103#define INTEL_DSI_COMMAND_MODE 0
104#define INTEL_DSI_VIDEO_MODE 1
105
103struct intel_framebuffer { 106struct intel_framebuffer {
104 struct drm_framebuffer base; 107 struct drm_framebuffer base;
105 struct drm_i915_gem_object *obj; 108 struct drm_i915_gem_object *obj;
@@ -207,8 +210,21 @@ struct intel_crtc_config {
207#define PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS (1<<0) /* unreliable sync mode.flags */ 210#define PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS (1<<0) /* unreliable sync mode.flags */
208 unsigned long quirks; 211 unsigned long quirks;
209 212
213 /* User requested mode, only valid as a starting point to
214 * compute adjusted_mode, except in the case of (S)DVO where
215 * it's also for the output timings of the (S)DVO chip.
216 * adjusted_mode will then correspond to the S(DVO) chip's
217 * preferred input timings. */
210 struct drm_display_mode requested_mode; 218 struct drm_display_mode requested_mode;
219 /* Actual pipe timings ie. what we program into the pipe timing
220 * registers. adjusted_mode.crtc_clock is the pipe pixel clock. */
211 struct drm_display_mode adjusted_mode; 221 struct drm_display_mode adjusted_mode;
222
223 /* Pipe source size (ie. panel fitter input size)
224 * All planes will be positioned inside this space,
225 * and get clipped at the edges. */
226 int pipe_src_w, pipe_src_h;
227
212 /* Whether to set up the PCH/FDI. Note that we never allow sharing 228 /* Whether to set up the PCH/FDI. Note that we never allow sharing
213 * between pch encoders and cpu encoders. */ 229 * between pch encoders and cpu encoders. */
214 bool has_pch_encoder; 230 bool has_pch_encoder;
@@ -262,7 +278,8 @@ struct intel_crtc_config {
262 278
263 /* 279 /*
264 * Frequence the dpll for the port should run at. Differs from the 280 * Frequence the dpll for the port should run at. Differs from the
265 * adjusted dotclock e.g. for DP or 12bpc hdmi mode. 281 * adjusted dotclock e.g. for DP or 12bpc hdmi mode. This is also
282 * already multiplied by pixel_multiplier.
266 */ 283 */
267 int port_clock; 284 int port_clock;
268 285
@@ -288,6 +305,14 @@ struct intel_crtc_config {
288 struct intel_link_m_n fdi_m_n; 305 struct intel_link_m_n fdi_m_n;
289 306
290 bool ips_enabled; 307 bool ips_enabled;
308
309 bool double_wide;
310};
311
312struct intel_pipe_wm {
313 struct intel_wm_level wm[5];
314 uint32_t linetime;
315 bool fbc_wm_enabled;
291}; 316};
292 317
293struct intel_crtc { 318struct intel_crtc {
@@ -301,8 +326,9 @@ struct intel_crtc {
301 * some outputs connected to this crtc. 326 * some outputs connected to this crtc.
302 */ 327 */
303 bool active; 328 bool active;
329 unsigned long enabled_power_domains;
304 bool eld_vld; 330 bool eld_vld;
305 bool primary_disabled; /* is the crtc obscured by a plane? */ 331 bool primary_enabled; /* is the primary plane (partially) visible? */
306 bool lowfreq_avail; 332 bool lowfreq_avail;
307 struct intel_overlay *overlay; 333 struct intel_overlay *overlay;
308 struct intel_unpin_work *unpin_work; 334 struct intel_unpin_work *unpin_work;
@@ -330,6 +356,12 @@ struct intel_crtc {
330 /* Access to these should be protected by dev_priv->irq_lock. */ 356 /* Access to these should be protected by dev_priv->irq_lock. */
331 bool cpu_fifo_underrun_disabled; 357 bool cpu_fifo_underrun_disabled;
332 bool pch_fifo_underrun_disabled; 358 bool pch_fifo_underrun_disabled;
359
360 /* per-pipe watermark state */
361 struct {
362 /* watermarks currently being used */
363 struct intel_pipe_wm active;
364 } wm;
333}; 365};
334 366
335struct intel_plane_wm_parameters { 367struct intel_plane_wm_parameters {
@@ -417,13 +449,11 @@ struct intel_hdmi {
417}; 449};
418 450
419#define DP_MAX_DOWNSTREAM_PORTS 0x10 451#define DP_MAX_DOWNSTREAM_PORTS 0x10
420#define DP_LINK_CONFIGURATION_SIZE 9
421 452
422struct intel_dp { 453struct intel_dp {
423 uint32_t output_reg; 454 uint32_t output_reg;
424 uint32_t aux_ch_ctl_reg; 455 uint32_t aux_ch_ctl_reg;
425 uint32_t DP; 456 uint32_t DP;
426 uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE];
427 bool has_audio; 457 bool has_audio;
428 enum hdmi_force_audio force_audio; 458 enum hdmi_force_audio force_audio;
429 uint32_t color_range; 459 uint32_t color_range;
@@ -495,80 +525,6 @@ struct intel_unpin_work {
495 bool enable_stall_check; 525 bool enable_stall_check;
496}; 526};
497 527
498int intel_pch_rawclk(struct drm_device *dev);
499
500int intel_connector_update_modes(struct drm_connector *connector,
501 struct edid *edid);
502int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
503
504extern void intel_attach_force_audio_property(struct drm_connector *connector);
505extern void intel_attach_broadcast_rgb_property(struct drm_connector *connector);
506
507extern bool intel_pipe_has_type(struct drm_crtc *crtc, int type);
508extern void intel_crt_init(struct drm_device *dev);
509extern void intel_hdmi_init(struct drm_device *dev,
510 int hdmi_reg, enum port port);
511extern void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
512 struct intel_connector *intel_connector);
513extern struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder);
514extern bool intel_hdmi_compute_config(struct intel_encoder *encoder,
515 struct intel_crtc_config *pipe_config);
516extern bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg,
517 bool is_sdvob);
518extern void intel_dvo_init(struct drm_device *dev);
519extern void intel_tv_init(struct drm_device *dev);
520extern void intel_mark_busy(struct drm_device *dev);
521extern void intel_mark_fb_busy(struct drm_i915_gem_object *obj,
522 struct intel_ring_buffer *ring);
523extern void intel_mark_idle(struct drm_device *dev);
524extern void intel_lvds_init(struct drm_device *dev);
525extern bool intel_is_dual_link_lvds(struct drm_device *dev);
526extern void intel_dp_init(struct drm_device *dev, int output_reg,
527 enum port port);
528extern bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
529 struct intel_connector *intel_connector);
530extern void intel_dp_init_link_config(struct intel_dp *intel_dp);
531extern void intel_dp_start_link_train(struct intel_dp *intel_dp);
532extern void intel_dp_complete_link_train(struct intel_dp *intel_dp);
533extern void intel_dp_stop_link_train(struct intel_dp *intel_dp);
534extern void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
535extern void intel_dp_encoder_destroy(struct drm_encoder *encoder);
536extern void intel_dp_check_link_status(struct intel_dp *intel_dp);
537extern bool intel_dp_compute_config(struct intel_encoder *encoder,
538 struct intel_crtc_config *pipe_config);
539extern bool intel_dpd_is_edp(struct drm_device *dev);
540extern void ironlake_edp_backlight_on(struct intel_dp *intel_dp);
541extern void ironlake_edp_backlight_off(struct intel_dp *intel_dp);
542extern void ironlake_edp_panel_on(struct intel_dp *intel_dp);
543extern void ironlake_edp_panel_off(struct intel_dp *intel_dp);
544extern void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp);
545extern void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
546extern int intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane);
547extern void intel_flush_display_plane(struct drm_i915_private *dev_priv,
548 enum plane plane);
549
550/* intel_panel.c */
551extern int intel_panel_init(struct intel_panel *panel,
552 struct drm_display_mode *fixed_mode);
553extern void intel_panel_fini(struct intel_panel *panel);
554
555extern void intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode,
556 struct drm_display_mode *adjusted_mode);
557extern void intel_pch_panel_fitting(struct intel_crtc *crtc,
558 struct intel_crtc_config *pipe_config,
559 int fitting_mode);
560extern void intel_gmch_panel_fitting(struct intel_crtc *crtc,
561 struct intel_crtc_config *pipe_config,
562 int fitting_mode);
563extern void intel_panel_set_backlight(struct drm_device *dev,
564 u32 level, u32 max);
565extern int intel_panel_setup_backlight(struct drm_connector *connector);
566extern void intel_panel_enable_backlight(struct drm_device *dev,
567 enum pipe pipe);
568extern void intel_panel_disable_backlight(struct drm_device *dev);
569extern void intel_panel_destroy_backlight(struct drm_device *dev);
570extern enum drm_connector_status intel_panel_detect(struct drm_device *dev);
571
572struct intel_set_config { 528struct intel_set_config {
573 struct drm_encoder **save_connector_encoders; 529 struct drm_encoder **save_connector_encoders;
574 struct drm_crtc **save_encoder_crtcs; 530 struct drm_crtc **save_encoder_crtcs;
@@ -577,18 +533,14 @@ struct intel_set_config {
577 bool mode_changed; 533 bool mode_changed;
578}; 534};
579 535
580extern void intel_crtc_restore_mode(struct drm_crtc *crtc); 536struct intel_load_detect_pipe {
581extern void intel_crtc_load_lut(struct drm_crtc *crtc); 537 struct drm_framebuffer *release_fb;
582extern void intel_crtc_update_dpms(struct drm_crtc *crtc); 538 bool load_detect_temp;
583extern void intel_encoder_destroy(struct drm_encoder *encoder); 539 int dpms_mode;
584extern void intel_connector_dpms(struct drm_connector *, int mode); 540};
585extern bool intel_connector_get_hw_state(struct intel_connector *connector);
586extern void intel_modeset_check_state(struct drm_device *dev);
587extern void intel_plane_restore(struct drm_plane *plane);
588extern void intel_plane_disable(struct drm_plane *plane);
589
590 541
591static inline struct intel_encoder *intel_attached_encoder(struct drm_connector *connector) 542static inline struct intel_encoder *
543intel_attached_encoder(struct drm_connector *connector)
592{ 544{
593 return to_intel_connector(connector)->encoder; 545 return to_intel_connector(connector)->encoder;
594} 546}
@@ -616,73 +568,95 @@ hdmi_to_dig_port(struct intel_hdmi *intel_hdmi)
616 return container_of(intel_hdmi, struct intel_digital_port, hdmi); 568 return container_of(intel_hdmi, struct intel_digital_port, hdmi);
617} 569}
618 570
571
572/* i915_irq.c */
573bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
574 enum pipe pipe, bool enable);
575bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
576 enum transcoder pch_transcoder,
577 bool enable);
578void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
579void ilk_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
580void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
581void snb_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
582void hsw_pc8_disable_interrupts(struct drm_device *dev);
583void hsw_pc8_restore_interrupts(struct drm_device *dev);
584
585
586/* intel_crt.c */
587void intel_crt_init(struct drm_device *dev);
588
589
590/* intel_ddi.c */
591void intel_prepare_ddi(struct drm_device *dev);
592void hsw_fdi_link_train(struct drm_crtc *crtc);
593void intel_ddi_init(struct drm_device *dev, enum port port);
594enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder);
595bool intel_ddi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe);
596int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv);
597void intel_ddi_pll_init(struct drm_device *dev);
598void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc);
599void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
600 enum transcoder cpu_transcoder);
601void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc);
602void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc);
603void intel_ddi_setup_hw_pll_state(struct drm_device *dev);
604bool intel_ddi_pll_mode_set(struct drm_crtc *crtc);
605void intel_ddi_put_crtc_pll(struct drm_crtc *crtc);
606void intel_ddi_set_pipe_settings(struct drm_crtc *crtc);
607void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder);
608bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
609void intel_ddi_fdi_disable(struct drm_crtc *crtc);
610void intel_ddi_get_config(struct intel_encoder *encoder,
611 struct intel_crtc_config *pipe_config);
612
613
614/* intel_display.c */
615int intel_pch_rawclk(struct drm_device *dev);
616void intel_mark_busy(struct drm_device *dev);
617void intel_mark_fb_busy(struct drm_i915_gem_object *obj,
618 struct intel_ring_buffer *ring);
619void intel_mark_idle(struct drm_device *dev);
620void intel_crtc_restore_mode(struct drm_crtc *crtc);
621void intel_crtc_update_dpms(struct drm_crtc *crtc);
622void intel_encoder_destroy(struct drm_encoder *encoder);
623void intel_connector_dpms(struct drm_connector *, int mode);
624bool intel_connector_get_hw_state(struct intel_connector *connector);
625void intel_modeset_check_state(struct drm_device *dev);
619bool ibx_digital_port_connected(struct drm_i915_private *dev_priv, 626bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
620 struct intel_digital_port *port); 627 struct intel_digital_port *port);
621 628void intel_connector_attach_encoder(struct intel_connector *connector,
622extern void intel_connector_attach_encoder(struct intel_connector *connector, 629 struct intel_encoder *encoder);
623 struct intel_encoder *encoder); 630struct drm_encoder *intel_best_encoder(struct drm_connector *connector);
624extern struct drm_encoder *intel_best_encoder(struct drm_connector *connector); 631struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
625 632 struct drm_crtc *crtc);
626extern struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, 633enum pipe intel_get_pipe_from_connector(struct intel_connector *connector);
627 struct drm_crtc *crtc);
628int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, 634int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
629 struct drm_file *file_priv); 635 struct drm_file *file_priv);
630extern enum transcoder 636enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
631intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv, 637 enum pipe pipe);
632 enum pipe pipe); 638void intel_wait_for_vblank(struct drm_device *dev, int pipe);
633extern void intel_wait_for_vblank(struct drm_device *dev, int pipe); 639void intel_wait_for_pipe_off(struct drm_device *dev, int pipe);
634extern void intel_wait_for_pipe_off(struct drm_device *dev, int pipe); 640int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp);
635extern int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp); 641void vlv_wait_port_ready(struct drm_i915_private *dev_priv, int port);
636extern void vlv_wait_port_ready(struct drm_i915_private *dev_priv, int port); 642bool intel_get_load_detect_pipe(struct drm_connector *connector,
637 643 struct drm_display_mode *mode,
638struct intel_load_detect_pipe { 644 struct intel_load_detect_pipe *old);
639 struct drm_framebuffer *release_fb; 645void intel_release_load_detect_pipe(struct drm_connector *connector,
640 bool load_detect_temp; 646 struct intel_load_detect_pipe *old);
641 int dpms_mode; 647int intel_pin_and_fence_fb_obj(struct drm_device *dev,
642}; 648 struct drm_i915_gem_object *obj,
643extern bool intel_get_load_detect_pipe(struct drm_connector *connector, 649 struct intel_ring_buffer *pipelined);
644 struct drm_display_mode *mode, 650void intel_unpin_fb_obj(struct drm_i915_gem_object *obj);
645 struct intel_load_detect_pipe *old); 651int intel_framebuffer_init(struct drm_device *dev,
646extern void intel_release_load_detect_pipe(struct drm_connector *connector, 652 struct intel_framebuffer *ifb,
647 struct intel_load_detect_pipe *old); 653 struct drm_mode_fb_cmd2 *mode_cmd,
648 654 struct drm_i915_gem_object *obj);
649extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, 655void intel_framebuffer_fini(struct intel_framebuffer *fb);
650 u16 blue, int regno); 656void intel_prepare_page_flip(struct drm_device *dev, int plane);
651extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, 657void intel_finish_page_flip(struct drm_device *dev, int pipe);
652 u16 *blue, int regno); 658void intel_finish_page_flip_plane(struct drm_device *dev, int plane);
653 659struct intel_shared_dpll *intel_crtc_to_shared_dpll(struct intel_crtc *crtc);
654extern int intel_pin_and_fence_fb_obj(struct drm_device *dev,
655 struct drm_i915_gem_object *obj,
656 struct intel_ring_buffer *pipelined);
657extern void intel_unpin_fb_obj(struct drm_i915_gem_object *obj);
658
659extern int intel_framebuffer_init(struct drm_device *dev,
660 struct intel_framebuffer *ifb,
661 struct drm_mode_fb_cmd2 *mode_cmd,
662 struct drm_i915_gem_object *obj);
663extern void intel_framebuffer_fini(struct intel_framebuffer *fb);
664extern int intel_fbdev_init(struct drm_device *dev);
665extern void intel_fbdev_initial_config(struct drm_device *dev);
666extern void intel_fbdev_fini(struct drm_device *dev);
667extern void intel_fbdev_set_suspend(struct drm_device *dev, int state);
668extern void intel_prepare_page_flip(struct drm_device *dev, int plane);
669extern void intel_finish_page_flip(struct drm_device *dev, int pipe);
670extern void intel_finish_page_flip_plane(struct drm_device *dev, int plane);
671
672extern void intel_setup_overlay(struct drm_device *dev);
673extern void intel_cleanup_overlay(struct drm_device *dev);
674extern int intel_overlay_switch_off(struct intel_overlay *overlay);
675extern int intel_overlay_put_image(struct drm_device *dev, void *data,
676 struct drm_file *file_priv);
677extern int intel_overlay_attrs(struct drm_device *dev, void *data,
678 struct drm_file *file_priv);
679
680extern void intel_fb_output_poll_changed(struct drm_device *dev);
681extern void intel_fb_restore_mode(struct drm_device *dev);
682
683struct intel_shared_dpll *
684intel_crtc_to_shared_dpll(struct intel_crtc *crtc);
685
686void assert_shared_dpll(struct drm_i915_private *dev_priv, 660void assert_shared_dpll(struct drm_i915_private *dev_priv,
687 struct intel_shared_dpll *pll, 661 struct intel_shared_dpll *pll,
688 bool state); 662 bool state);
@@ -696,104 +670,199 @@ void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
696 enum pipe pipe, bool state); 670 enum pipe pipe, bool state);
697#define assert_fdi_rx_pll_enabled(d, p) assert_fdi_rx_pll(d, p, true) 671#define assert_fdi_rx_pll_enabled(d, p) assert_fdi_rx_pll(d, p, true)
698#define assert_fdi_rx_pll_disabled(d, p) assert_fdi_rx_pll(d, p, false) 672#define assert_fdi_rx_pll_disabled(d, p) assert_fdi_rx_pll(d, p, false)
699extern void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, 673void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, bool state);
700 bool state);
701#define assert_pipe_enabled(d, p) assert_pipe(d, p, true) 674#define assert_pipe_enabled(d, p) assert_pipe(d, p, true)
702#define assert_pipe_disabled(d, p) assert_pipe(d, p, false) 675#define assert_pipe_disabled(d, p) assert_pipe(d, p, false)
676void intel_write_eld(struct drm_encoder *encoder,
677 struct drm_display_mode *mode);
678unsigned long intel_gen4_compute_page_offset(int *x, int *y,
679 unsigned int tiling_mode,
680 unsigned int bpp,
681 unsigned int pitch);
682void intel_display_handle_reset(struct drm_device *dev);
683void hsw_enable_pc8_work(struct work_struct *__work);
684void hsw_enable_package_c8(struct drm_i915_private *dev_priv);
685void hsw_disable_package_c8(struct drm_i915_private *dev_priv);
686void intel_dp_get_m_n(struct intel_crtc *crtc,
687 struct intel_crtc_config *pipe_config);
688int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n);
689void
690ironlake_check_encoder_dotclock(const struct intel_crtc_config *pipe_config,
691 int dotclock);
692bool intel_crtc_active(struct drm_crtc *crtc);
693void i915_disable_vga_mem(struct drm_device *dev);
694void hsw_enable_ips(struct intel_crtc *crtc);
695void hsw_disable_ips(struct intel_crtc *crtc);
696void intel_display_set_init_power(struct drm_device *dev, bool enable);
697
698
699/* intel_dp.c */
700void intel_dp_init(struct drm_device *dev, int output_reg, enum port port);
701bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
702 struct intel_connector *intel_connector);
703void intel_dp_start_link_train(struct intel_dp *intel_dp);
704void intel_dp_complete_link_train(struct intel_dp *intel_dp);
705void intel_dp_stop_link_train(struct intel_dp *intel_dp);
706void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
707void intel_dp_encoder_destroy(struct drm_encoder *encoder);
708void intel_dp_check_link_status(struct intel_dp *intel_dp);
709bool intel_dp_compute_config(struct intel_encoder *encoder,
710 struct intel_crtc_config *pipe_config);
711bool intel_dpd_is_edp(struct drm_device *dev);
712void ironlake_edp_backlight_on(struct intel_dp *intel_dp);
713void ironlake_edp_backlight_off(struct intel_dp *intel_dp);
714void ironlake_edp_panel_on(struct intel_dp *intel_dp);
715void ironlake_edp_panel_off(struct intel_dp *intel_dp);
716void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp);
717void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
718void intel_edp_psr_enable(struct intel_dp *intel_dp);
719void intel_edp_psr_disable(struct intel_dp *intel_dp);
720void intel_edp_psr_update(struct drm_device *dev);
721
722
723/* intel_dsi.c */
724bool intel_dsi_init(struct drm_device *dev);
725
726
727/* intel_dvo.c */
728void intel_dvo_init(struct drm_device *dev);
729
730
731/* legacy fbdev emulation in intel_fbdev.c */
732#ifdef CONFIG_DRM_I915_FBDEV
733extern int intel_fbdev_init(struct drm_device *dev);
734extern void intel_fbdev_initial_config(struct drm_device *dev);
735extern void intel_fbdev_fini(struct drm_device *dev);
736extern void intel_fbdev_set_suspend(struct drm_device *dev, int state);
737extern void intel_fbdev_output_poll_changed(struct drm_device *dev);
738extern void intel_fbdev_restore_mode(struct drm_device *dev);
739#else
740static inline int intel_fbdev_init(struct drm_device *dev)
741{
742 return 0;
743}
703 744
704extern void intel_init_clock_gating(struct drm_device *dev); 745static inline void intel_fbdev_initial_config(struct drm_device *dev)
705extern void intel_suspend_hw(struct drm_device *dev); 746{
706extern void intel_write_eld(struct drm_encoder *encoder, 747}
707 struct drm_display_mode *mode); 748
708extern void intel_prepare_ddi(struct drm_device *dev); 749static inline void intel_fbdev_fini(struct drm_device *dev)
709extern void hsw_fdi_link_train(struct drm_crtc *crtc); 750{
710extern void intel_ddi_init(struct drm_device *dev, enum port port); 751}
711 752
712/* For use by IVB LP watermark workaround in intel_sprite.c */ 753static inline void intel_fbdev_set_suspend(struct drm_device *dev, int state)
713extern void intel_update_watermarks(struct drm_device *dev); 754{
714extern void intel_update_sprite_watermarks(struct drm_plane *plane, 755}
715 struct drm_crtc *crtc, 756
716 uint32_t sprite_width, int pixel_size, 757static inline void intel_fbdev_restore_mode(struct drm_device *dev)
717 bool enabled, bool scaled); 758{
718 759}
719extern unsigned long intel_gen4_compute_page_offset(int *x, int *y, 760#endif
720 unsigned int tiling_mode, 761
721 unsigned int bpp, 762/* intel_hdmi.c */
722 unsigned int pitch); 763void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port);
723 764void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
724extern int intel_sprite_set_colorkey(struct drm_device *dev, void *data, 765 struct intel_connector *intel_connector);
725 struct drm_file *file_priv); 766struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder);
726extern int intel_sprite_get_colorkey(struct drm_device *dev, void *data, 767bool intel_hdmi_compute_config(struct intel_encoder *encoder,
727 struct drm_file *file_priv); 768 struct intel_crtc_config *pipe_config);
728 769
729/* Power-related functions, located in intel_pm.c */ 770
730extern void intel_init_pm(struct drm_device *dev); 771/* intel_lvds.c */
731/* FBC */ 772void intel_lvds_init(struct drm_device *dev);
732extern bool intel_fbc_enabled(struct drm_device *dev); 773bool intel_is_dual_link_lvds(struct drm_device *dev);
733extern void intel_update_fbc(struct drm_device *dev); 774
734/* IPS */ 775
735extern void intel_gpu_ips_init(struct drm_i915_private *dev_priv); 776/* intel_modes.c */
736extern void intel_gpu_ips_teardown(void); 777int intel_connector_update_modes(struct drm_connector *connector,
737 778 struct edid *edid);
738/* Power well */ 779int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
739extern int i915_init_power_well(struct drm_device *dev); 780void intel_attach_force_audio_property(struct drm_connector *connector);
740extern void i915_remove_power_well(struct drm_device *dev); 781void intel_attach_broadcast_rgb_property(struct drm_connector *connector);
741 782
742extern bool intel_display_power_enabled(struct drm_device *dev, 783
743 enum intel_display_power_domain domain); 784/* intel_overlay.c */
744extern void intel_init_power_well(struct drm_device *dev); 785void intel_setup_overlay(struct drm_device *dev);
745extern void intel_set_power_well(struct drm_device *dev, bool enable); 786void intel_cleanup_overlay(struct drm_device *dev);
746extern void intel_enable_gt_powersave(struct drm_device *dev); 787int intel_overlay_switch_off(struct intel_overlay *overlay);
747extern void intel_disable_gt_powersave(struct drm_device *dev); 788int intel_overlay_put_image(struct drm_device *dev, void *data,
748extern void ironlake_teardown_rc6(struct drm_device *dev); 789 struct drm_file *file_priv);
790int intel_overlay_attrs(struct drm_device *dev, void *data,
791 struct drm_file *file_priv);
792
793
794/* intel_panel.c */
795int intel_panel_init(struct intel_panel *panel,
796 struct drm_display_mode *fixed_mode);
797void intel_panel_fini(struct intel_panel *panel);
798void intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode,
799 struct drm_display_mode *adjusted_mode);
800void intel_pch_panel_fitting(struct intel_crtc *crtc,
801 struct intel_crtc_config *pipe_config,
802 int fitting_mode);
803void intel_gmch_panel_fitting(struct intel_crtc *crtc,
804 struct intel_crtc_config *pipe_config,
805 int fitting_mode);
806void intel_panel_set_backlight(struct intel_connector *connector, u32 level,
807 u32 max);
808int intel_panel_setup_backlight(struct drm_connector *connector);
809void intel_panel_enable_backlight(struct intel_connector *connector);
810void intel_panel_disable_backlight(struct intel_connector *connector);
811void intel_panel_destroy_backlight(struct drm_device *dev);
812enum drm_connector_status intel_panel_detect(struct drm_device *dev);
813
814
815/* intel_pm.c */
816void intel_init_clock_gating(struct drm_device *dev);
817void intel_suspend_hw(struct drm_device *dev);
818void intel_update_watermarks(struct drm_crtc *crtc);
819void intel_update_sprite_watermarks(struct drm_plane *plane,
820 struct drm_crtc *crtc,
821 uint32_t sprite_width, int pixel_size,
822 bool enabled, bool scaled);
823void intel_init_pm(struct drm_device *dev);
824bool intel_fbc_enabled(struct drm_device *dev);
825void intel_update_fbc(struct drm_device *dev);
826void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
827void intel_gpu_ips_teardown(void);
828int intel_power_domains_init(struct drm_device *dev);
829void intel_power_domains_remove(struct drm_device *dev);
830bool intel_display_power_enabled(struct drm_device *dev,
831 enum intel_display_power_domain domain);
832void intel_display_power_get(struct drm_device *dev,
833 enum intel_display_power_domain domain);
834void intel_display_power_put(struct drm_device *dev,
835 enum intel_display_power_domain domain);
836void intel_power_domains_init_hw(struct drm_device *dev);
837void intel_set_power_well(struct drm_device *dev, bool enable);
838void intel_enable_gt_powersave(struct drm_device *dev);
839void intel_disable_gt_powersave(struct drm_device *dev);
840void ironlake_teardown_rc6(struct drm_device *dev);
749void gen6_update_ring_freq(struct drm_device *dev); 841void gen6_update_ring_freq(struct drm_device *dev);
842void gen6_rps_idle(struct drm_i915_private *dev_priv);
843void gen6_rps_boost(struct drm_i915_private *dev_priv);
844void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv);
845void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv);
846void ilk_wm_get_hw_state(struct drm_device *dev);
847
848
849/* intel_sdvo.c */
850bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob);
851
852
853/* intel_sprite.c */
854int intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane);
855void intel_flush_primary_plane(struct drm_i915_private *dev_priv,
856 enum plane plane);
857void intel_plane_restore(struct drm_plane *plane);
858void intel_plane_disable(struct drm_plane *plane);
859int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
860 struct drm_file *file_priv);
861int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
862 struct drm_file *file_priv);
863
750 864
751extern bool intel_ddi_get_hw_state(struct intel_encoder *encoder, 865/* intel_tv.c */
752 enum pipe *pipe); 866void intel_tv_init(struct drm_device *dev);
753extern int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv);
754extern void intel_ddi_pll_init(struct drm_device *dev);
755extern void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc);
756extern void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
757 enum transcoder cpu_transcoder);
758extern void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc);
759extern void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc);
760extern void intel_ddi_setup_hw_pll_state(struct drm_device *dev);
761extern bool intel_ddi_pll_mode_set(struct drm_crtc *crtc);
762extern void intel_ddi_put_crtc_pll(struct drm_crtc *crtc);
763extern void intel_ddi_set_pipe_settings(struct drm_crtc *crtc);
764extern void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder);
765extern bool
766intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
767extern void intel_ddi_fdi_disable(struct drm_crtc *crtc);
768extern void intel_ddi_get_config(struct intel_encoder *encoder,
769 struct intel_crtc_config *pipe_config);
770
771extern void intel_display_handle_reset(struct drm_device *dev);
772extern bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
773 enum pipe pipe,
774 bool enable);
775extern bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
776 enum transcoder pch_transcoder,
777 bool enable);
778
779extern void intel_edp_psr_enable(struct intel_dp *intel_dp);
780extern void intel_edp_psr_disable(struct intel_dp *intel_dp);
781extern void intel_edp_psr_update(struct drm_device *dev);
782extern void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
783 bool switch_to_fclk, bool allow_power_down);
784extern void hsw_restore_lcpll(struct drm_i915_private *dev_priv);
785extern void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
786extern void ilk_disable_gt_irq(struct drm_i915_private *dev_priv,
787 uint32_t mask);
788extern void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
789extern void snb_disable_pm_irq(struct drm_i915_private *dev_priv,
790 uint32_t mask);
791extern void hsw_enable_pc8_work(struct work_struct *__work);
792extern void hsw_enable_package_c8(struct drm_i915_private *dev_priv);
793extern void hsw_disable_package_c8(struct drm_i915_private *dev_priv);
794extern void hsw_pc8_disable_interrupts(struct drm_device *dev);
795extern void hsw_pc8_restore_interrupts(struct drm_device *dev);
796extern void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv);
797extern void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv);
798 867
799#endif /* __INTEL_DRV_H__ */ 868#endif /* __INTEL_DRV_H__ */
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
new file mode 100644
index 000000000000..d257b093ca68
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -0,0 +1,620 @@
1/*
2 * Copyright © 2013 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Author: Jani Nikula <jani.nikula@intel.com>
24 */
25
26#include <drm/drmP.h>
27#include <drm/drm_crtc.h>
28#include <drm/drm_edid.h>
29#include <drm/i915_drm.h>
30#include <linux/slab.h>
31#include "i915_drv.h"
32#include "intel_drv.h"
33#include "intel_dsi.h"
34#include "intel_dsi_cmd.h"
35
36/* the sub-encoders aka panel drivers */
37static const struct intel_dsi_device intel_dsi_devices[] = {
38};
39
40
41static void vlv_cck_modify(struct drm_i915_private *dev_priv, u32 reg, u32 val,
42 u32 mask)
43{
44 u32 tmp = vlv_cck_read(dev_priv, reg);
45 tmp &= ~mask;
46 tmp |= val;
47 vlv_cck_write(dev_priv, reg, tmp);
48}
49
50static void band_gap_wa(struct drm_i915_private *dev_priv)
51{
52 mutex_lock(&dev_priv->dpio_lock);
53
54 /* Enable bandgap fix in GOP driver */
55 vlv_cck_modify(dev_priv, 0x6D, 0x00010000, 0x00030000);
56 msleep(20);
57 vlv_cck_modify(dev_priv, 0x6E, 0x00010000, 0x00030000);
58 msleep(20);
59 vlv_cck_modify(dev_priv, 0x6F, 0x00010000, 0x00030000);
60 msleep(20);
61 vlv_cck_modify(dev_priv, 0x00, 0x00008000, 0x00008000);
62 msleep(20);
63 vlv_cck_modify(dev_priv, 0x00, 0x00000000, 0x00008000);
64 msleep(20);
65
66 /* Turn Display Trunk on */
67 vlv_cck_modify(dev_priv, 0x6B, 0x00020000, 0x00030000);
68 msleep(20);
69
70 vlv_cck_modify(dev_priv, 0x6C, 0x00020000, 0x00030000);
71 msleep(20);
72
73 vlv_cck_modify(dev_priv, 0x6D, 0x00020000, 0x00030000);
74 msleep(20);
75 vlv_cck_modify(dev_priv, 0x6E, 0x00020000, 0x00030000);
76 msleep(20);
77 vlv_cck_modify(dev_priv, 0x6F, 0x00020000, 0x00030000);
78
79 mutex_unlock(&dev_priv->dpio_lock);
80
81 /* Need huge delay, otherwise clock is not stable */
82 msleep(100);
83}
84
85static struct intel_dsi *intel_attached_dsi(struct drm_connector *connector)
86{
87 return container_of(intel_attached_encoder(connector),
88 struct intel_dsi, base);
89}
90
91static inline bool is_vid_mode(struct intel_dsi *intel_dsi)
92{
93 return intel_dsi->dev.type == INTEL_DSI_VIDEO_MODE;
94}
95
96static inline bool is_cmd_mode(struct intel_dsi *intel_dsi)
97{
98 return intel_dsi->dev.type == INTEL_DSI_COMMAND_MODE;
99}
100
101static void intel_dsi_hot_plug(struct intel_encoder *encoder)
102{
103 DRM_DEBUG_KMS("\n");
104}
105
106static bool intel_dsi_compute_config(struct intel_encoder *encoder,
107 struct intel_crtc_config *config)
108{
109 struct intel_dsi *intel_dsi = container_of(encoder, struct intel_dsi,
110 base);
111 struct intel_connector *intel_connector = intel_dsi->attached_connector;
112 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
113 struct drm_display_mode *adjusted_mode = &config->adjusted_mode;
114 struct drm_display_mode *mode = &config->requested_mode;
115
116 DRM_DEBUG_KMS("\n");
117
118 if (fixed_mode)
119 intel_fixed_panel_mode(fixed_mode, adjusted_mode);
120
121 if (intel_dsi->dev.dev_ops->mode_fixup)
122 return intel_dsi->dev.dev_ops->mode_fixup(&intel_dsi->dev,
123 mode, adjusted_mode);
124
125 return true;
126}
127
128static void intel_dsi_pre_pll_enable(struct intel_encoder *encoder)
129{
130 DRM_DEBUG_KMS("\n");
131
132 vlv_enable_dsi_pll(encoder);
133}
134
135static void intel_dsi_pre_enable(struct intel_encoder *encoder)
136{
137 DRM_DEBUG_KMS("\n");
138}
139
140static void intel_dsi_enable(struct intel_encoder *encoder)
141{
142 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
143 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
144 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
145 int pipe = intel_crtc->pipe;
146 u32 temp;
147
148 DRM_DEBUG_KMS("\n");
149
150 temp = I915_READ(MIPI_DEVICE_READY(pipe));
151 if ((temp & DEVICE_READY) == 0) {
152 temp &= ~ULPS_STATE_MASK;
153 I915_WRITE(MIPI_DEVICE_READY(pipe), temp | DEVICE_READY);
154 } else if (temp & ULPS_STATE_MASK) {
155 temp &= ~ULPS_STATE_MASK;
156 I915_WRITE(MIPI_DEVICE_READY(pipe), temp | ULPS_STATE_EXIT);
157 /*
158 * We need to ensure that there is a minimum of 1 ms time
159 * available before clearing the UPLS exit state.
160 */
161 msleep(2);
162 I915_WRITE(MIPI_DEVICE_READY(pipe), temp);
163 }
164
165 if (is_cmd_mode(intel_dsi))
166 I915_WRITE(MIPI_MAX_RETURN_PKT_SIZE(pipe), 8 * 4);
167
168 if (is_vid_mode(intel_dsi)) {
169 msleep(20); /* XXX */
170 dpi_send_cmd(intel_dsi, TURN_ON);
171 msleep(100);
172
173 /* assert ip_tg_enable signal */
174 temp = I915_READ(MIPI_PORT_CTRL(pipe));
175 I915_WRITE(MIPI_PORT_CTRL(pipe), temp | DPI_ENABLE);
176 POSTING_READ(MIPI_PORT_CTRL(pipe));
177 }
178
179 intel_dsi->dev.dev_ops->enable(&intel_dsi->dev);
180}
181
182static void intel_dsi_disable(struct intel_encoder *encoder)
183{
184 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
185 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
186 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
187 int pipe = intel_crtc->pipe;
188 u32 temp;
189
190 DRM_DEBUG_KMS("\n");
191
192 intel_dsi->dev.dev_ops->disable(&intel_dsi->dev);
193
194 if (is_vid_mode(intel_dsi)) {
195 dpi_send_cmd(intel_dsi, SHUTDOWN);
196 msleep(10);
197
198 /* de-assert ip_tg_enable signal */
199 temp = I915_READ(MIPI_PORT_CTRL(pipe));
200 I915_WRITE(MIPI_PORT_CTRL(pipe), temp & ~DPI_ENABLE);
201 POSTING_READ(MIPI_PORT_CTRL(pipe));
202
203 msleep(2);
204 }
205
206 temp = I915_READ(MIPI_DEVICE_READY(pipe));
207 if (temp & DEVICE_READY) {
208 temp &= ~DEVICE_READY;
209 temp &= ~ULPS_STATE_MASK;
210 I915_WRITE(MIPI_DEVICE_READY(pipe), temp);
211 }
212}
213
214static void intel_dsi_post_disable(struct intel_encoder *encoder)
215{
216 DRM_DEBUG_KMS("\n");
217
218 vlv_disable_dsi_pll(encoder);
219}
220
221static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
222 enum pipe *pipe)
223{
224 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
225 u32 port, func;
226 enum pipe p;
227
228 DRM_DEBUG_KMS("\n");
229
230 /* XXX: this only works for one DSI output */
231 for (p = PIPE_A; p <= PIPE_B; p++) {
232 port = I915_READ(MIPI_PORT_CTRL(p));
233 func = I915_READ(MIPI_DSI_FUNC_PRG(p));
234
235 if ((port & DPI_ENABLE) || (func & CMD_MODE_DATA_WIDTH_MASK)) {
236 if (I915_READ(MIPI_DEVICE_READY(p)) & DEVICE_READY) {
237 *pipe = p;
238 return true;
239 }
240 }
241 }
242
243 return false;
244}
245
246static void intel_dsi_get_config(struct intel_encoder *encoder,
247 struct intel_crtc_config *pipe_config)
248{
249 DRM_DEBUG_KMS("\n");
250
251 /* XXX: read flags, set to adjusted_mode */
252}
253
254static int intel_dsi_mode_valid(struct drm_connector *connector,
255 struct drm_display_mode *mode)
256{
257 struct intel_connector *intel_connector = to_intel_connector(connector);
258 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
259 struct intel_dsi *intel_dsi = intel_attached_dsi(connector);
260
261 DRM_DEBUG_KMS("\n");
262
263 if (mode->flags & DRM_MODE_FLAG_DBLSCAN) {
264 DRM_DEBUG_KMS("MODE_NO_DBLESCAN\n");
265 return MODE_NO_DBLESCAN;
266 }
267
268 if (fixed_mode) {
269 if (mode->hdisplay > fixed_mode->hdisplay)
270 return MODE_PANEL;
271 if (mode->vdisplay > fixed_mode->vdisplay)
272 return MODE_PANEL;
273 }
274
275 return intel_dsi->dev.dev_ops->mode_valid(&intel_dsi->dev, mode);
276}
277
278/* return txclkesc cycles in terms of divider and duration in us */
279static u16 txclkesc(u32 divider, unsigned int us)
280{
281 switch (divider) {
282 case ESCAPE_CLOCK_DIVIDER_1:
283 default:
284 return 20 * us;
285 case ESCAPE_CLOCK_DIVIDER_2:
286 return 10 * us;
287 case ESCAPE_CLOCK_DIVIDER_4:
288 return 5 * us;
289 }
290}
291
292/* return pixels in terms of txbyteclkhs */
293static u16 txbyteclkhs(u16 pixels, int bpp, int lane_count)
294{
295 return DIV_ROUND_UP(DIV_ROUND_UP(pixels * bpp, 8), lane_count);
296}
297
298static void set_dsi_timings(struct drm_encoder *encoder,
299 const struct drm_display_mode *mode)
300{
301 struct drm_device *dev = encoder->dev;
302 struct drm_i915_private *dev_priv = dev->dev_private;
303 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
304 struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
305 int pipe = intel_crtc->pipe;
306 unsigned int bpp = intel_crtc->config.pipe_bpp;
307 unsigned int lane_count = intel_dsi->lane_count;
308
309 u16 hactive, hfp, hsync, hbp, vfp, vsync, vbp;
310
311 hactive = mode->hdisplay;
312 hfp = mode->hsync_start - mode->hdisplay;
313 hsync = mode->hsync_end - mode->hsync_start;
314 hbp = mode->htotal - mode->hsync_end;
315
316 vfp = mode->vsync_start - mode->vdisplay;
317 vsync = mode->vsync_end - mode->vsync_start;
318 vbp = mode->vtotal - mode->vsync_end;
319
320 /* horizontal values are in terms of high speed byte clock */
321 hactive = txbyteclkhs(hactive, bpp, lane_count);
322 hfp = txbyteclkhs(hfp, bpp, lane_count);
323 hsync = txbyteclkhs(hsync, bpp, lane_count);
324 hbp = txbyteclkhs(hbp, bpp, lane_count);
325
326 I915_WRITE(MIPI_HACTIVE_AREA_COUNT(pipe), hactive);
327 I915_WRITE(MIPI_HFP_COUNT(pipe), hfp);
328
329 /* meaningful for video mode non-burst sync pulse mode only, can be zero
330 * for non-burst sync events and burst modes */
331 I915_WRITE(MIPI_HSYNC_PADDING_COUNT(pipe), hsync);
332 I915_WRITE(MIPI_HBP_COUNT(pipe), hbp);
333
334 /* vertical values are in terms of lines */
335 I915_WRITE(MIPI_VFP_COUNT(pipe), vfp);
336 I915_WRITE(MIPI_VSYNC_PADDING_COUNT(pipe), vsync);
337 I915_WRITE(MIPI_VBP_COUNT(pipe), vbp);
338}
339
340static void intel_dsi_mode_set(struct intel_encoder *intel_encoder)
341{
342 struct drm_encoder *encoder = &intel_encoder->base;
343 struct drm_device *dev = encoder->dev;
344 struct drm_i915_private *dev_priv = dev->dev_private;
345 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
346 struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
347 struct drm_display_mode *adjusted_mode =
348 &intel_crtc->config.adjusted_mode;
349 int pipe = intel_crtc->pipe;
350 unsigned int bpp = intel_crtc->config.pipe_bpp;
351 u32 val, tmp;
352
353 DRM_DEBUG_KMS("pipe %c\n", pipe_name(pipe));
354
355 /* Update the DSI PLL */
356 vlv_enable_dsi_pll(intel_encoder);
357
358 /* XXX: Location of the call */
359 band_gap_wa(dev_priv);
360
361 /* escape clock divider, 20MHz, shared for A and C. device ready must be
362 * off when doing this! txclkesc? */
363 tmp = I915_READ(MIPI_CTRL(0));
364 tmp &= ~ESCAPE_CLOCK_DIVIDER_MASK;
365 I915_WRITE(MIPI_CTRL(0), tmp | ESCAPE_CLOCK_DIVIDER_1);
366
367 /* read request priority is per pipe */
368 tmp = I915_READ(MIPI_CTRL(pipe));
369 tmp &= ~READ_REQUEST_PRIORITY_MASK;
370 I915_WRITE(MIPI_CTRL(pipe), tmp | READ_REQUEST_PRIORITY_HIGH);
371
372 /* XXX: why here, why like this? handling in irq handler?! */
373 I915_WRITE(MIPI_INTR_STAT(pipe), 0xffffffff);
374 I915_WRITE(MIPI_INTR_EN(pipe), 0xffffffff);
375
376 I915_WRITE(MIPI_DPHY_PARAM(pipe),
377 0x3c << EXIT_ZERO_COUNT_SHIFT |
378 0x1f << TRAIL_COUNT_SHIFT |
379 0xc5 << CLK_ZERO_COUNT_SHIFT |
380 0x1f << PREPARE_COUNT_SHIFT);
381
382 I915_WRITE(MIPI_DPI_RESOLUTION(pipe),
383 adjusted_mode->vdisplay << VERTICAL_ADDRESS_SHIFT |
384 adjusted_mode->hdisplay << HORIZONTAL_ADDRESS_SHIFT);
385
386 set_dsi_timings(encoder, adjusted_mode);
387
388 val = intel_dsi->lane_count << DATA_LANES_PRG_REG_SHIFT;
389 if (is_cmd_mode(intel_dsi)) {
390 val |= intel_dsi->channel << CMD_MODE_CHANNEL_NUMBER_SHIFT;
391 val |= CMD_MODE_DATA_WIDTH_8_BIT; /* XXX */
392 } else {
393 val |= intel_dsi->channel << VID_MODE_CHANNEL_NUMBER_SHIFT;
394
395 /* XXX: cross-check bpp vs. pixel format? */
396 val |= intel_dsi->pixel_format;
397 }
398 I915_WRITE(MIPI_DSI_FUNC_PRG(pipe), val);
399
400 /* timeouts for recovery. one frame IIUC. if counter expires, EOT and
401 * stop state. */
402
403 /*
404 * In burst mode, value greater than one DPI line Time in byte clock
405 * (txbyteclkhs) To timeout this timer 1+ of the above said value is
406 * recommended.
407 *
408 * In non-burst mode, Value greater than one DPI frame time in byte
409 * clock(txbyteclkhs) To timeout this timer 1+ of the above said value
410 * is recommended.
411 *
412 * In DBI only mode, value greater than one DBI frame time in byte
413 * clock(txbyteclkhs) To timeout this timer 1+ of the above said value
414 * is recommended.
415 */
416
417 if (is_vid_mode(intel_dsi) &&
418 intel_dsi->video_mode_format == VIDEO_MODE_BURST) {
419 I915_WRITE(MIPI_HS_TX_TIMEOUT(pipe),
420 txbyteclkhs(adjusted_mode->htotal, bpp,
421 intel_dsi->lane_count) + 1);
422 } else {
423 I915_WRITE(MIPI_HS_TX_TIMEOUT(pipe),
424 txbyteclkhs(adjusted_mode->vtotal *
425 adjusted_mode->htotal,
426 bpp, intel_dsi->lane_count) + 1);
427 }
428 I915_WRITE(MIPI_LP_RX_TIMEOUT(pipe), 8309); /* max */
429 I915_WRITE(MIPI_TURN_AROUND_TIMEOUT(pipe), 0x14); /* max */
430 I915_WRITE(MIPI_DEVICE_RESET_TIMER(pipe), 0xffff); /* max */
431
432 /* dphy stuff */
433
434 /* in terms of low power clock */
435 I915_WRITE(MIPI_INIT_COUNT(pipe), txclkesc(ESCAPE_CLOCK_DIVIDER_1, 100));
436
437 /* recovery disables */
438 I915_WRITE(MIPI_EOT_DISABLE(pipe), intel_dsi->eot_disable);
439
440 /* in terms of txbyteclkhs. actual high to low switch +
441 * MIPI_STOP_STATE_STALL * MIPI_LP_BYTECLK.
442 *
443 * XXX: write MIPI_STOP_STATE_STALL?
444 */
445 I915_WRITE(MIPI_HIGH_LOW_SWITCH_COUNT(pipe), 0x46);
446
447 /* XXX: low power clock equivalence in terms of byte clock. the number
448 * of byte clocks occupied in one low power clock. based on txbyteclkhs
449 * and txclkesc. txclkesc time / txbyteclk time * (105 +
450 * MIPI_STOP_STATE_STALL) / 105.???
451 */
452 I915_WRITE(MIPI_LP_BYTECLK(pipe), 4);
453
454 /* the bw essential for transmitting 16 long packets containing 252
455 * bytes meant for dcs write memory command is programmed in this
456 * register in terms of byte clocks. based on dsi transfer rate and the
457 * number of lanes configured the time taken to transmit 16 long packets
458 * in a dsi stream varies. */
459 I915_WRITE(MIPI_DBI_BW_CTRL(pipe), 0x820);
460
461 I915_WRITE(MIPI_CLK_LANE_SWITCH_TIME_CNT(pipe),
462 0xa << LP_HS_SSW_CNT_SHIFT |
463 0x14 << HS_LP_PWR_SW_CNT_SHIFT);
464
465 if (is_vid_mode(intel_dsi))
466 I915_WRITE(MIPI_VIDEO_MODE_FORMAT(pipe),
467 intel_dsi->video_mode_format);
468}
469
470static enum drm_connector_status
471intel_dsi_detect(struct drm_connector *connector, bool force)
472{
473 struct intel_dsi *intel_dsi = intel_attached_dsi(connector);
474 DRM_DEBUG_KMS("\n");
475 return intel_dsi->dev.dev_ops->detect(&intel_dsi->dev);
476}
477
478static int intel_dsi_get_modes(struct drm_connector *connector)
479{
480 struct intel_connector *intel_connector = to_intel_connector(connector);
481 struct drm_display_mode *mode;
482
483 DRM_DEBUG_KMS("\n");
484
485 if (!intel_connector->panel.fixed_mode) {
486 DRM_DEBUG_KMS("no fixed mode\n");
487 return 0;
488 }
489
490 mode = drm_mode_duplicate(connector->dev,
491 intel_connector->panel.fixed_mode);
492 if (!mode) {
493 DRM_DEBUG_KMS("drm_mode_duplicate failed\n");
494 return 0;
495 }
496
497 drm_mode_probed_add(connector, mode);
498 return 1;
499}
500
501static void intel_dsi_destroy(struct drm_connector *connector)
502{
503 struct intel_connector *intel_connector = to_intel_connector(connector);
504
505 DRM_DEBUG_KMS("\n");
506 intel_panel_fini(&intel_connector->panel);
507 drm_connector_cleanup(connector);
508 kfree(connector);
509}
510
511static const struct drm_encoder_funcs intel_dsi_funcs = {
512 .destroy = intel_encoder_destroy,
513};
514
515static const struct drm_connector_helper_funcs intel_dsi_connector_helper_funcs = {
516 .get_modes = intel_dsi_get_modes,
517 .mode_valid = intel_dsi_mode_valid,
518 .best_encoder = intel_best_encoder,
519};
520
521static const struct drm_connector_funcs intel_dsi_connector_funcs = {
522 .dpms = intel_connector_dpms,
523 .detect = intel_dsi_detect,
524 .destroy = intel_dsi_destroy,
525 .fill_modes = drm_helper_probe_single_connector_modes,
526};
527
528bool intel_dsi_init(struct drm_device *dev)
529{
530 struct intel_dsi *intel_dsi;
531 struct intel_encoder *intel_encoder;
532 struct drm_encoder *encoder;
533 struct intel_connector *intel_connector;
534 struct drm_connector *connector;
535 struct drm_display_mode *fixed_mode = NULL;
536 const struct intel_dsi_device *dsi;
537 unsigned int i;
538
539 DRM_DEBUG_KMS("\n");
540
541 intel_dsi = kzalloc(sizeof(*intel_dsi), GFP_KERNEL);
542 if (!intel_dsi)
543 return false;
544
545 intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
546 if (!intel_connector) {
547 kfree(intel_dsi);
548 return false;
549 }
550
551 intel_encoder = &intel_dsi->base;
552 encoder = &intel_encoder->base;
553 intel_dsi->attached_connector = intel_connector;
554
555 connector = &intel_connector->base;
556
557 drm_encoder_init(dev, encoder, &intel_dsi_funcs, DRM_MODE_ENCODER_DSI);
558
559 /* XXX: very likely not all of these are needed */
560 intel_encoder->hot_plug = intel_dsi_hot_plug;
561 intel_encoder->compute_config = intel_dsi_compute_config;
562 intel_encoder->pre_pll_enable = intel_dsi_pre_pll_enable;
563 intel_encoder->pre_enable = intel_dsi_pre_enable;
564 intel_encoder->enable = intel_dsi_enable;
565 intel_encoder->mode_set = intel_dsi_mode_set;
566 intel_encoder->disable = intel_dsi_disable;
567 intel_encoder->post_disable = intel_dsi_post_disable;
568 intel_encoder->get_hw_state = intel_dsi_get_hw_state;
569 intel_encoder->get_config = intel_dsi_get_config;
570
571 intel_connector->get_hw_state = intel_connector_get_hw_state;
572
573 for (i = 0; i < ARRAY_SIZE(intel_dsi_devices); i++) {
574 dsi = &intel_dsi_devices[i];
575 intel_dsi->dev = *dsi;
576
577 if (dsi->dev_ops->init(&intel_dsi->dev))
578 break;
579 }
580
581 if (i == ARRAY_SIZE(intel_dsi_devices)) {
582 DRM_DEBUG_KMS("no device found\n");
583 goto err;
584 }
585
586 intel_encoder->type = INTEL_OUTPUT_DSI;
587 intel_encoder->crtc_mask = (1 << 0); /* XXX */
588
589 intel_encoder->cloneable = false;
590 drm_connector_init(dev, connector, &intel_dsi_connector_funcs,
591 DRM_MODE_CONNECTOR_DSI);
592
593 drm_connector_helper_add(connector, &intel_dsi_connector_helper_funcs);
594
595 connector->display_info.subpixel_order = SubPixelHorizontalRGB; /*XXX*/
596 connector->interlace_allowed = false;
597 connector->doublescan_allowed = false;
598
599 intel_connector_attach_encoder(intel_connector, intel_encoder);
600
601 drm_sysfs_connector_add(connector);
602
603 fixed_mode = dsi->dev_ops->get_modes(&intel_dsi->dev);
604 if (!fixed_mode) {
605 DRM_DEBUG_KMS("no fixed mode\n");
606 goto err;
607 }
608
609 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
610 intel_panel_init(&intel_connector->panel, fixed_mode);
611
612 return true;
613
614err:
615 drm_encoder_cleanup(&intel_encoder->base);
616 kfree(intel_dsi);
617 kfree(intel_connector);
618
619 return false;
620}
diff --git a/drivers/gpu/drm/i915/intel_dsi.h b/drivers/gpu/drm/i915/intel_dsi.h
new file mode 100644
index 000000000000..c7765f33d524
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dsi.h
@@ -0,0 +1,102 @@
1/*
2 * Copyright © 2013 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24#ifndef _INTEL_DSI_H
25#define _INTEL_DSI_H
26
27#include <drm/drmP.h>
28#include <drm/drm_crtc.h>
29#include "intel_drv.h"
30
31struct intel_dsi_device {
32 unsigned int panel_id;
33 const char *name;
34 int type;
35 const struct intel_dsi_dev_ops *dev_ops;
36 void *dev_priv;
37};
38
39struct intel_dsi_dev_ops {
40 bool (*init)(struct intel_dsi_device *dsi);
41
42 /* This callback must be able to assume DSI commands can be sent */
43 void (*enable)(struct intel_dsi_device *dsi);
44
45 /* This callback must be able to assume DSI commands can be sent */
46 void (*disable)(struct intel_dsi_device *dsi);
47
48 int (*mode_valid)(struct intel_dsi_device *dsi,
49 struct drm_display_mode *mode);
50
51 bool (*mode_fixup)(struct intel_dsi_device *dsi,
52 const struct drm_display_mode *mode,
53 struct drm_display_mode *adjusted_mode);
54
55 void (*mode_set)(struct intel_dsi_device *dsi,
56 struct drm_display_mode *mode,
57 struct drm_display_mode *adjusted_mode);
58
59 enum drm_connector_status (*detect)(struct intel_dsi_device *dsi);
60
61 bool (*get_hw_state)(struct intel_dsi_device *dev);
62
63 struct drm_display_mode *(*get_modes)(struct intel_dsi_device *dsi);
64
65 void (*destroy) (struct intel_dsi_device *dsi);
66};
67
68struct intel_dsi {
69 struct intel_encoder base;
70
71 struct intel_dsi_device dev;
72
73 struct intel_connector *attached_connector;
74
75 /* if true, use HS mode, otherwise LP */
76 bool hs;
77
78 /* virtual channel */
79 int channel;
80
81 /* number of DSI lanes */
82 unsigned int lane_count;
83
84 /* video mode pixel format for MIPI_DSI_FUNC_PRG register */
85 u32 pixel_format;
86
87 /* video mode format for MIPI_VIDEO_MODE_FORMAT register */
88 u32 video_mode_format;
89
90 /* eot for MIPI_EOT_DISABLE register */
91 u32 eot_disable;
92};
93
94static inline struct intel_dsi *enc_to_intel_dsi(struct drm_encoder *encoder)
95{
96 return container_of(encoder, struct intel_dsi, base.base);
97}
98
99extern void vlv_enable_dsi_pll(struct intel_encoder *encoder);
100extern void vlv_disable_dsi_pll(struct intel_encoder *encoder);
101
102#endif /* _INTEL_DSI_H */
diff --git a/drivers/gpu/drm/i915/intel_dsi_cmd.c b/drivers/gpu/drm/i915/intel_dsi_cmd.c
new file mode 100644
index 000000000000..7c40f981d2c7
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dsi_cmd.c
@@ -0,0 +1,427 @@
1/*
2 * Copyright © 2013 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Author: Jani Nikula <jani.nikula@intel.com>
24 */
25
26#include <linux/export.h>
27#include <drm/drmP.h>
28#include <drm/drm_crtc.h>
29#include <video/mipi_display.h>
30#include "i915_drv.h"
31#include "intel_drv.h"
32#include "intel_dsi.h"
33#include "intel_dsi_cmd.h"
34
35/*
36 * XXX: MIPI_DATA_ADDRESS, MIPI_DATA_LENGTH, MIPI_COMMAND_LENGTH, and
37 * MIPI_COMMAND_ADDRESS registers.
38 *
39 * Apparently these registers provide a MIPI adapter level way to send (lots of)
40 * commands and data to the receiver, without having to write the commands and
41 * data to MIPI_{HS,LP}_GEN_{CTRL,DATA} registers word by word.
42 *
43 * Presumably for anything other than MIPI_DCS_WRITE_MEMORY_START and
44 * MIPI_DCS_WRITE_MEMORY_CONTINUE (which are used to update the external
45 * framebuffer in command mode displays) these are just an optimization that can
46 * come later.
47 *
48 * For memory writes, these should probably be used for performance.
49 */
50
51static void print_stat(struct intel_dsi *intel_dsi)
52{
53 struct drm_encoder *encoder = &intel_dsi->base.base;
54 struct drm_device *dev = encoder->dev;
55 struct drm_i915_private *dev_priv = dev->dev_private;
56 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
57 enum pipe pipe = intel_crtc->pipe;
58 u32 val;
59
60 val = I915_READ(MIPI_INTR_STAT(pipe));
61
62#define STAT_BIT(val, bit) (val) & (bit) ? " " #bit : ""
63 DRM_DEBUG_KMS("MIPI_INTR_STAT(%d) = %08x"
64 "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s"
65 "\n", pipe, val,
66 STAT_BIT(val, TEARING_EFFECT),
67 STAT_BIT(val, SPL_PKT_SENT_INTERRUPT),
68 STAT_BIT(val, GEN_READ_DATA_AVAIL),
69 STAT_BIT(val, LP_GENERIC_WR_FIFO_FULL),
70 STAT_BIT(val, HS_GENERIC_WR_FIFO_FULL),
71 STAT_BIT(val, RX_PROT_VIOLATION),
72 STAT_BIT(val, RX_INVALID_TX_LENGTH),
73 STAT_BIT(val, ACK_WITH_NO_ERROR),
74 STAT_BIT(val, TURN_AROUND_ACK_TIMEOUT),
75 STAT_BIT(val, LP_RX_TIMEOUT),
76 STAT_BIT(val, HS_TX_TIMEOUT),
77 STAT_BIT(val, DPI_FIFO_UNDERRUN),
78 STAT_BIT(val, LOW_CONTENTION),
79 STAT_BIT(val, HIGH_CONTENTION),
80 STAT_BIT(val, TXDSI_VC_ID_INVALID),
81 STAT_BIT(val, TXDSI_DATA_TYPE_NOT_RECOGNISED),
82 STAT_BIT(val, TXCHECKSUM_ERROR),
83 STAT_BIT(val, TXECC_MULTIBIT_ERROR),
84 STAT_BIT(val, TXECC_SINGLE_BIT_ERROR),
85 STAT_BIT(val, TXFALSE_CONTROL_ERROR),
86 STAT_BIT(val, RXDSI_VC_ID_INVALID),
87 STAT_BIT(val, RXDSI_DATA_TYPE_NOT_REGOGNISED),
88 STAT_BIT(val, RXCHECKSUM_ERROR),
89 STAT_BIT(val, RXECC_MULTIBIT_ERROR),
90 STAT_BIT(val, RXECC_SINGLE_BIT_ERROR),
91 STAT_BIT(val, RXFALSE_CONTROL_ERROR),
92 STAT_BIT(val, RXHS_RECEIVE_TIMEOUT_ERROR),
93 STAT_BIT(val, RX_LP_TX_SYNC_ERROR),
94 STAT_BIT(val, RXEXCAPE_MODE_ENTRY_ERROR),
95 STAT_BIT(val, RXEOT_SYNC_ERROR),
96 STAT_BIT(val, RXSOT_SYNC_ERROR),
97 STAT_BIT(val, RXSOT_ERROR));
98#undef STAT_BIT
99}
100
101enum dsi_type {
102 DSI_DCS,
103 DSI_GENERIC,
104};
105
106/* enable or disable command mode hs transmissions */
107void dsi_hs_mode_enable(struct intel_dsi *intel_dsi, bool enable)
108{
109 struct drm_encoder *encoder = &intel_dsi->base.base;
110 struct drm_device *dev = encoder->dev;
111 struct drm_i915_private *dev_priv = dev->dev_private;
112 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
113 enum pipe pipe = intel_crtc->pipe;
114 u32 temp;
115 u32 mask = DBI_FIFO_EMPTY;
116
117 if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(pipe)) & mask) == mask, 50))
118 DRM_ERROR("Timeout waiting for DBI FIFO empty\n");
119
120 temp = I915_READ(MIPI_HS_LP_DBI_ENABLE(pipe));
121 temp &= DBI_HS_LP_MODE_MASK;
122 I915_WRITE(MIPI_HS_LP_DBI_ENABLE(pipe), enable ? DBI_HS_MODE : DBI_LP_MODE);
123
124 intel_dsi->hs = enable;
125}
126
127static int dsi_vc_send_short(struct intel_dsi *intel_dsi, int channel,
128 u8 data_type, u16 data)
129{
130 struct drm_encoder *encoder = &intel_dsi->base.base;
131 struct drm_device *dev = encoder->dev;
132 struct drm_i915_private *dev_priv = dev->dev_private;
133 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
134 enum pipe pipe = intel_crtc->pipe;
135 u32 ctrl_reg;
136 u32 ctrl;
137 u32 mask;
138
139 DRM_DEBUG_KMS("channel %d, data_type %d, data %04x\n",
140 channel, data_type, data);
141
142 if (intel_dsi->hs) {
143 ctrl_reg = MIPI_HS_GEN_CTRL(pipe);
144 mask = HS_CTRL_FIFO_FULL;
145 } else {
146 ctrl_reg = MIPI_LP_GEN_CTRL(pipe);
147 mask = LP_CTRL_FIFO_FULL;
148 }
149
150 if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(pipe)) & mask) == 0, 50)) {
151 DRM_ERROR("Timeout waiting for HS/LP CTRL FIFO !full\n");
152 print_stat(intel_dsi);
153 }
154
155 /*
156 * Note: This function is also used for long packets, with length passed
157 * as data, since SHORT_PACKET_PARAM_SHIFT ==
158 * LONG_PACKET_WORD_COUNT_SHIFT.
159 */
160 ctrl = data << SHORT_PACKET_PARAM_SHIFT |
161 channel << VIRTUAL_CHANNEL_SHIFT |
162 data_type << DATA_TYPE_SHIFT;
163
164 I915_WRITE(ctrl_reg, ctrl);
165
166 return 0;
167}
168
169static int dsi_vc_send_long(struct intel_dsi *intel_dsi, int channel,
170 u8 data_type, const u8 *data, int len)
171{
172 struct drm_encoder *encoder = &intel_dsi->base.base;
173 struct drm_device *dev = encoder->dev;
174 struct drm_i915_private *dev_priv = dev->dev_private;
175 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
176 enum pipe pipe = intel_crtc->pipe;
177 u32 data_reg;
178 int i, j, n;
179 u32 mask;
180
181 DRM_DEBUG_KMS("channel %d, data_type %d, len %04x\n",
182 channel, data_type, len);
183
184 if (intel_dsi->hs) {
185 data_reg = MIPI_HS_GEN_DATA(pipe);
186 mask = HS_DATA_FIFO_FULL;
187 } else {
188 data_reg = MIPI_LP_GEN_DATA(pipe);
189 mask = LP_DATA_FIFO_FULL;
190 }
191
192 if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(pipe)) & mask) == 0, 50))
193 DRM_ERROR("Timeout waiting for HS/LP DATA FIFO !full\n");
194
195 for (i = 0; i < len; i += n) {
196 u32 val = 0;
197 n = min_t(int, len - i, 4);
198
199 for (j = 0; j < n; j++)
200 val |= *data++ << 8 * j;
201
202 I915_WRITE(data_reg, val);
203 /* XXX: check for data fifo full, once that is set, write 4
204 * dwords, then wait for not set, then continue. */
205 }
206
207 return dsi_vc_send_short(intel_dsi, channel, data_type, len);
208}
209
210static int dsi_vc_write_common(struct intel_dsi *intel_dsi,
211 int channel, const u8 *data, int len,
212 enum dsi_type type)
213{
214 int ret;
215
216 if (len == 0) {
217 BUG_ON(type == DSI_GENERIC);
218 ret = dsi_vc_send_short(intel_dsi, channel,
219 MIPI_DSI_GENERIC_SHORT_WRITE_0_PARAM,
220 0);
221 } else if (len == 1) {
222 ret = dsi_vc_send_short(intel_dsi, channel,
223 type == DSI_GENERIC ?
224 MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM :
225 MIPI_DSI_DCS_SHORT_WRITE, data[0]);
226 } else if (len == 2) {
227 ret = dsi_vc_send_short(intel_dsi, channel,
228 type == DSI_GENERIC ?
229 MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM :
230 MIPI_DSI_DCS_SHORT_WRITE_PARAM,
231 (data[1] << 8) | data[0]);
232 } else {
233 ret = dsi_vc_send_long(intel_dsi, channel,
234 type == DSI_GENERIC ?
235 MIPI_DSI_GENERIC_LONG_WRITE :
236 MIPI_DSI_DCS_LONG_WRITE, data, len);
237 }
238
239 return ret;
240}
241
242int dsi_vc_dcs_write(struct intel_dsi *intel_dsi, int channel,
243 const u8 *data, int len)
244{
245 return dsi_vc_write_common(intel_dsi, channel, data, len, DSI_DCS);
246}
247
248int dsi_vc_generic_write(struct intel_dsi *intel_dsi, int channel,
249 const u8 *data, int len)
250{
251 return dsi_vc_write_common(intel_dsi, channel, data, len, DSI_GENERIC);
252}
253
254static int dsi_vc_dcs_send_read_request(struct intel_dsi *intel_dsi,
255 int channel, u8 dcs_cmd)
256{
257 return dsi_vc_send_short(intel_dsi, channel, MIPI_DSI_DCS_READ,
258 dcs_cmd);
259}
260
261static int dsi_vc_generic_send_read_request(struct intel_dsi *intel_dsi,
262 int channel, u8 *reqdata,
263 int reqlen)
264{
265 u16 data;
266 u8 data_type;
267
268 switch (reqlen) {
269 case 0:
270 data_type = MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM;
271 data = 0;
272 break;
273 case 1:
274 data_type = MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM;
275 data = reqdata[0];
276 break;
277 case 2:
278 data_type = MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM;
279 data = (reqdata[1] << 8) | reqdata[0];
280 break;
281 default:
282 BUG();
283 }
284
285 return dsi_vc_send_short(intel_dsi, channel, data_type, data);
286}
287
288static int dsi_read_data_return(struct intel_dsi *intel_dsi,
289 u8 *buf, int buflen)
290{
291 struct drm_encoder *encoder = &intel_dsi->base.base;
292 struct drm_device *dev = encoder->dev;
293 struct drm_i915_private *dev_priv = dev->dev_private;
294 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
295 enum pipe pipe = intel_crtc->pipe;
296 int i, len = 0;
297 u32 data_reg, val;
298
299 if (intel_dsi->hs) {
300 data_reg = MIPI_HS_GEN_DATA(pipe);
301 } else {
302 data_reg = MIPI_LP_GEN_DATA(pipe);
303 }
304
305 while (len < buflen) {
306 val = I915_READ(data_reg);
307 for (i = 0; i < 4 && len < buflen; i++, len++)
308 buf[len] = val >> 8 * i;
309 }
310
311 return len;
312}
313
314int dsi_vc_dcs_read(struct intel_dsi *intel_dsi, int channel, u8 dcs_cmd,
315 u8 *buf, int buflen)
316{
317 struct drm_encoder *encoder = &intel_dsi->base.base;
318 struct drm_device *dev = encoder->dev;
319 struct drm_i915_private *dev_priv = dev->dev_private;
320 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
321 enum pipe pipe = intel_crtc->pipe;
322 u32 mask;
323 int ret;
324
325 /*
326 * XXX: should issue multiple read requests and reads if request is
327 * longer than MIPI_MAX_RETURN_PKT_SIZE
328 */
329
330 I915_WRITE(MIPI_INTR_STAT(pipe), GEN_READ_DATA_AVAIL);
331
332 ret = dsi_vc_dcs_send_read_request(intel_dsi, channel, dcs_cmd);
333 if (ret)
334 return ret;
335
336 mask = GEN_READ_DATA_AVAIL;
337 if (wait_for((I915_READ(MIPI_INTR_STAT(pipe)) & mask) == mask, 50))
338 DRM_ERROR("Timeout waiting for read data.\n");
339
340 ret = dsi_read_data_return(intel_dsi, buf, buflen);
341 if (ret < 0)
342 return ret;
343
344 if (ret != buflen)
345 return -EIO;
346
347 return 0;
348}
349
350int dsi_vc_generic_read(struct intel_dsi *intel_dsi, int channel,
351 u8 *reqdata, int reqlen, u8 *buf, int buflen)
352{
353 struct drm_encoder *encoder = &intel_dsi->base.base;
354 struct drm_device *dev = encoder->dev;
355 struct drm_i915_private *dev_priv = dev->dev_private;
356 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
357 enum pipe pipe = intel_crtc->pipe;
358 u32 mask;
359 int ret;
360
361 /*
362 * XXX: should issue multiple read requests and reads if request is
363 * longer than MIPI_MAX_RETURN_PKT_SIZE
364 */
365
366 I915_WRITE(MIPI_INTR_STAT(pipe), GEN_READ_DATA_AVAIL);
367
368 ret = dsi_vc_generic_send_read_request(intel_dsi, channel, reqdata,
369 reqlen);
370 if (ret)
371 return ret;
372
373 mask = GEN_READ_DATA_AVAIL;
374 if (wait_for((I915_READ(MIPI_INTR_STAT(pipe)) & mask) == mask, 50))
375 DRM_ERROR("Timeout waiting for read data.\n");
376
377 ret = dsi_read_data_return(intel_dsi, buf, buflen);
378 if (ret < 0)
379 return ret;
380
381 if (ret != buflen)
382 return -EIO;
383
384 return 0;
385}
386
387/*
388 * send a video mode command
389 *
390 * XXX: commands with data in MIPI_DPI_DATA?
391 */
392int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd)
393{
394 struct drm_encoder *encoder = &intel_dsi->base.base;
395 struct drm_device *dev = encoder->dev;
396 struct drm_i915_private *dev_priv = dev->dev_private;
397 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
398 enum pipe pipe = intel_crtc->pipe;
399 u32 mask;
400
401 /* XXX: pipe, hs */
402 if (intel_dsi->hs)
403 cmd &= ~DPI_LP_MODE;
404 else
405 cmd |= DPI_LP_MODE;
406
407 /* DPI virtual channel?! */
408
409 mask = DPI_FIFO_EMPTY;
410 if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(pipe)) & mask) == mask, 50))
411 DRM_ERROR("Timeout waiting for DPI FIFO empty.\n");
412
413 /* clear bit */
414 I915_WRITE(MIPI_INTR_STAT(pipe), SPL_PKT_SENT_INTERRUPT);
415
416 /* XXX: old code skips write if control unchanged */
417 if (cmd == I915_READ(MIPI_DPI_CONTROL(pipe)))
418 DRM_ERROR("Same special packet %02x twice in a row.\n", cmd);
419
420 I915_WRITE(MIPI_DPI_CONTROL(pipe), cmd);
421
422 mask = SPL_PKT_SENT_INTERRUPT;
423 if (wait_for((I915_READ(MIPI_INTR_STAT(pipe)) & mask) == mask, 100))
424 DRM_ERROR("Video mode command 0x%08x send failed.\n", cmd);
425
426 return 0;
427}
diff --git a/drivers/gpu/drm/i915/intel_dsi_cmd.h b/drivers/gpu/drm/i915/intel_dsi_cmd.h
new file mode 100644
index 000000000000..54c8a234a2e0
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dsi_cmd.h
@@ -0,0 +1,109 @@
1/*
2 * Copyright © 2013 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Author: Jani Nikula <jani.nikula@intel.com>
24 */
25
26#ifndef _INTEL_DSI_DSI_H
27#define _INTEL_DSI_DSI_H
28
29#include <drm/drmP.h>
30#include <drm/drm_crtc.h>
31#include <video/mipi_display.h>
32#include "i915_drv.h"
33#include "intel_drv.h"
34#include "intel_dsi.h"
35
36void dsi_hs_mode_enable(struct intel_dsi *intel_dsi, bool enable);
37
38int dsi_vc_dcs_write(struct intel_dsi *intel_dsi, int channel,
39 const u8 *data, int len);
40
41int dsi_vc_generic_write(struct intel_dsi *intel_dsi, int channel,
42 const u8 *data, int len);
43
44int dsi_vc_dcs_read(struct intel_dsi *intel_dsi, int channel, u8 dcs_cmd,
45 u8 *buf, int buflen);
46
47int dsi_vc_generic_read(struct intel_dsi *intel_dsi, int channel,
48 u8 *reqdata, int reqlen, u8 *buf, int buflen);
49
50int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd);
51
52/* XXX: questionable write helpers */
53static inline int dsi_vc_dcs_write_0(struct intel_dsi *intel_dsi,
54 int channel, u8 dcs_cmd)
55{
56 return dsi_vc_dcs_write(intel_dsi, channel, &dcs_cmd, 1);
57}
58
59static inline int dsi_vc_dcs_write_1(struct intel_dsi *intel_dsi,
60 int channel, u8 dcs_cmd, u8 param)
61{
62 u8 buf[2] = { dcs_cmd, param };
63 return dsi_vc_dcs_write(intel_dsi, channel, buf, 2);
64}
65
66static inline int dsi_vc_generic_write_0(struct intel_dsi *intel_dsi,
67 int channel)
68{
69 return dsi_vc_generic_write(intel_dsi, channel, NULL, 0);
70}
71
72static inline int dsi_vc_generic_write_1(struct intel_dsi *intel_dsi,
73 int channel, u8 param)
74{
75 return dsi_vc_generic_write(intel_dsi, channel, &param, 1);
76}
77
78static inline int dsi_vc_generic_write_2(struct intel_dsi *intel_dsi,
79 int channel, u8 param1, u8 param2)
80{
81 u8 buf[2] = { param1, param2 };
82 return dsi_vc_generic_write(intel_dsi, channel, buf, 2);
83}
84
85/* XXX: questionable read helpers */
86static inline int dsi_vc_generic_read_0(struct intel_dsi *intel_dsi,
87 int channel, u8 *buf, int buflen)
88{
89 return dsi_vc_generic_read(intel_dsi, channel, NULL, 0, buf, buflen);
90}
91
92static inline int dsi_vc_generic_read_1(struct intel_dsi *intel_dsi,
93 int channel, u8 param, u8 *buf,
94 int buflen)
95{
96 return dsi_vc_generic_read(intel_dsi, channel, &param, 1, buf, buflen);
97}
98
99static inline int dsi_vc_generic_read_2(struct intel_dsi *intel_dsi,
100 int channel, u8 param1, u8 param2,
101 u8 *buf, int buflen)
102{
103 u8 req[2] = { param1, param2 };
104
105 return dsi_vc_generic_read(intel_dsi, channel, req, 2, buf, buflen);
106}
107
108
109#endif /* _INTEL_DSI_DSI_H */
diff --git a/drivers/gpu/drm/i915/intel_dsi_pll.c b/drivers/gpu/drm/i915/intel_dsi_pll.c
new file mode 100644
index 000000000000..44279b2ade88
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dsi_pll.c
@@ -0,0 +1,317 @@
1/*
2 * Copyright © 2013 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Shobhit Kumar <shobhit.kumar@intel.com>
25 * Yogesh Mohan Marimuthu <yogesh.mohan.marimuthu@intel.com>
26 */
27
28#include <linux/kernel.h>
29#include "intel_drv.h"
30#include "i915_drv.h"
31#include "intel_dsi.h"
32
33#define DSI_HSS_PACKET_SIZE 4
34#define DSI_HSE_PACKET_SIZE 4
35#define DSI_HSA_PACKET_EXTRA_SIZE 6
36#define DSI_HBP_PACKET_EXTRA_SIZE 6
37#define DSI_HACTIVE_PACKET_EXTRA_SIZE 6
38#define DSI_HFP_PACKET_EXTRA_SIZE 6
39#define DSI_EOTP_PACKET_SIZE 4
40
41struct dsi_mnp {
42 u32 dsi_pll_ctrl;
43 u32 dsi_pll_div;
44};
45
46static const u32 lfsr_converts[] = {
47 426, 469, 234, 373, 442, 221, 110, 311, 411, /* 62 - 70 */
48 461, 486, 243, 377, 188, 350, 175, 343, 427, 213, /* 71 - 80 */
49 106, 53, 282, 397, 354, 227, 113, 56, 284, 142, /* 81 - 90 */
50 71, 35 /* 91 - 92 */
51};
52
53static u32 dsi_rr_formula(const struct drm_display_mode *mode,
54 int pixel_format, int video_mode_format,
55 int lane_count, bool eotp)
56{
57 u32 bpp;
58 u32 hactive, vactive, hfp, hsync, hbp, vfp, vsync, vbp;
59 u32 hsync_bytes, hbp_bytes, hactive_bytes, hfp_bytes;
60 u32 bytes_per_line, bytes_per_frame;
61 u32 num_frames;
62 u32 bytes_per_x_frames, bytes_per_x_frames_x_lanes;
63 u32 dsi_bit_clock_hz;
64 u32 dsi_clk;
65
66 switch (pixel_format) {
67 default:
68 case VID_MODE_FORMAT_RGB888:
69 case VID_MODE_FORMAT_RGB666_LOOSE:
70 bpp = 24;
71 break;
72 case VID_MODE_FORMAT_RGB666:
73 bpp = 18;
74 break;
75 case VID_MODE_FORMAT_RGB565:
76 bpp = 16;
77 break;
78 }
79
80 hactive = mode->hdisplay;
81 vactive = mode->vdisplay;
82 hfp = mode->hsync_start - mode->hdisplay;
83 hsync = mode->hsync_end - mode->hsync_start;
84 hbp = mode->htotal - mode->hsync_end;
85
86 vfp = mode->vsync_start - mode->vdisplay;
87 vsync = mode->vsync_end - mode->vsync_start;
88 vbp = mode->vtotal - mode->vsync_end;
89
90 hsync_bytes = DIV_ROUND_UP(hsync * bpp, 8);
91 hbp_bytes = DIV_ROUND_UP(hbp * bpp, 8);
92 hactive_bytes = DIV_ROUND_UP(hactive * bpp, 8);
93 hfp_bytes = DIV_ROUND_UP(hfp * bpp, 8);
94
95 bytes_per_line = DSI_HSS_PACKET_SIZE + hsync_bytes +
96 DSI_HSA_PACKET_EXTRA_SIZE + DSI_HSE_PACKET_SIZE +
97 hbp_bytes + DSI_HBP_PACKET_EXTRA_SIZE +
98 hactive_bytes + DSI_HACTIVE_PACKET_EXTRA_SIZE +
99 hfp_bytes + DSI_HFP_PACKET_EXTRA_SIZE;
100
101 /*
102 * XXX: Need to accurately calculate LP to HS transition timeout and add
103 * it to bytes_per_line/bytes_per_frame.
104 */
105
106 if (eotp && video_mode_format == VIDEO_MODE_BURST)
107 bytes_per_line += DSI_EOTP_PACKET_SIZE;
108
109 bytes_per_frame = vsync * bytes_per_line + vbp * bytes_per_line +
110 vactive * bytes_per_line + vfp * bytes_per_line;
111
112 if (eotp &&
113 (video_mode_format == VIDEO_MODE_NON_BURST_WITH_SYNC_PULSE ||
114 video_mode_format == VIDEO_MODE_NON_BURST_WITH_SYNC_EVENTS))
115 bytes_per_frame += DSI_EOTP_PACKET_SIZE;
116
117 num_frames = drm_mode_vrefresh(mode);
118 bytes_per_x_frames = num_frames * bytes_per_frame;
119
120 bytes_per_x_frames_x_lanes = bytes_per_x_frames / lane_count;
121
122 /* the dsi clock is divided by 2 in the hardware to get dsi ddr clock */
123 dsi_bit_clock_hz = bytes_per_x_frames_x_lanes * 8;
124 dsi_clk = dsi_bit_clock_hz / (1000 * 1000);
125
126 if (eotp && video_mode_format == VIDEO_MODE_BURST)
127 dsi_clk *= 2;
128
129 return dsi_clk;
130}
131
132#ifdef MNP_FROM_TABLE
133
134struct dsi_clock_table {
135 u32 freq;
136 u8 m;
137 u8 p;
138};
139
140static const struct dsi_clock_table dsi_clk_tbl[] = {
141 {300, 72, 6}, {313, 75, 6}, {323, 78, 6}, {333, 80, 6},
142 {343, 82, 6}, {353, 85, 6}, {363, 87, 6}, {373, 90, 6},
143 {383, 92, 6}, {390, 78, 5}, {393, 79, 5}, {400, 80, 5},
144 {401, 80, 5}, {402, 80, 5}, {403, 81, 5}, {404, 81, 5},
145 {405, 81, 5}, {406, 81, 5}, {407, 81, 5}, {408, 82, 5},
146 {409, 82, 5}, {410, 82, 5}, {411, 82, 5}, {412, 82, 5},
147 {413, 83, 5}, {414, 83, 5}, {415, 83, 5}, {416, 83, 5},
148 {417, 83, 5}, {418, 84, 5}, {419, 84, 5}, {420, 84, 5},
149 {430, 86, 5}, {440, 88, 5}, {450, 90, 5}, {460, 92, 5},
150 {470, 75, 4}, {480, 77, 4}, {490, 78, 4}, {500, 80, 4},
151 {510, 82, 4}, {520, 83, 4}, {530, 85, 4}, {540, 86, 4},
152 {550, 88, 4}, {560, 90, 4}, {570, 91, 4}, {580, 70, 3},
153 {590, 71, 3}, {600, 72, 3}, {610, 73, 3}, {620, 74, 3},
154 {630, 76, 3}, {640, 77, 3}, {650, 78, 3}, {660, 79, 3},
155 {670, 80, 3}, {680, 82, 3}, {690, 83, 3}, {700, 84, 3},
156 {710, 85, 3}, {720, 86, 3}, {730, 88, 3}, {740, 89, 3},
157 {750, 90, 3}, {760, 91, 3}, {770, 92, 3}, {780, 62, 2},
158 {790, 63, 2}, {800, 64, 2}, {880, 70, 2}, {900, 72, 2},
159 {1000, 80, 2}, /* dsi clock frequency in Mhz*/
160};
161
162static int dsi_calc_mnp(u32 dsi_clk, struct dsi_mnp *dsi_mnp)
163{
164 unsigned int i;
165 u8 m;
166 u8 n;
167 u8 p;
168 u32 m_seed;
169
170 if (dsi_clk < 300 || dsi_clk > 1000)
171 return -ECHRNG;
172
173 for (i = 0; i <= ARRAY_SIZE(dsi_clk_tbl); i++) {
174 if (dsi_clk_tbl[i].freq > dsi_clk)
175 break;
176 }
177
178 m = dsi_clk_tbl[i].m;
179 p = dsi_clk_tbl[i].p;
180 m_seed = lfsr_converts[m - 62];
181 n = 1;
182 dsi_mnp->dsi_pll_ctrl = 1 << (DSI_PLL_P1_POST_DIV_SHIFT + p - 2);
183 dsi_mnp->dsi_pll_div = (n - 1) << DSI_PLL_N1_DIV_SHIFT |
184 m_seed << DSI_PLL_M1_DIV_SHIFT;
185
186 return 0;
187}
188
189#else
190
191static int dsi_calc_mnp(u32 dsi_clk, struct dsi_mnp *dsi_mnp)
192{
193 u32 m, n, p;
194 u32 ref_clk;
195 u32 error;
196 u32 tmp_error;
197 u32 target_dsi_clk;
198 u32 calc_dsi_clk;
199 u32 calc_m;
200 u32 calc_p;
201 u32 m_seed;
202
203 if (dsi_clk < 300 || dsi_clk > 1150) {
204 DRM_ERROR("DSI CLK Out of Range\n");
205 return -ECHRNG;
206 }
207
208 ref_clk = 25000;
209 target_dsi_clk = dsi_clk * 1000;
210 error = 0xFFFFFFFF;
211 calc_m = 0;
212 calc_p = 0;
213
214 for (m = 62; m <= 92; m++) {
215 for (p = 2; p <= 6; p++) {
216
217 calc_dsi_clk = (m * ref_clk) / p;
218 if (calc_dsi_clk >= target_dsi_clk) {
219 tmp_error = calc_dsi_clk - target_dsi_clk;
220 if (tmp_error < error) {
221 error = tmp_error;
222 calc_m = m;
223 calc_p = p;
224 }
225 }
226 }
227 }
228
229 m_seed = lfsr_converts[calc_m - 62];
230 n = 1;
231 dsi_mnp->dsi_pll_ctrl = 1 << (DSI_PLL_P1_POST_DIV_SHIFT + calc_p - 2);
232 dsi_mnp->dsi_pll_div = (n - 1) << DSI_PLL_N1_DIV_SHIFT |
233 m_seed << DSI_PLL_M1_DIV_SHIFT;
234
235 return 0;
236}
237
238#endif
239
240/*
241 * XXX: The muxing and gating is hard coded for now. Need to add support for
242 * sharing PLLs with two DSI outputs.
243 */
244static void vlv_configure_dsi_pll(struct intel_encoder *encoder)
245{
246 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
247 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
248 const struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode;
249 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
250 int ret;
251 struct dsi_mnp dsi_mnp;
252 u32 dsi_clk;
253
254 dsi_clk = dsi_rr_formula(mode, intel_dsi->pixel_format,
255 intel_dsi->video_mode_format,
256 intel_dsi->lane_count, !intel_dsi->eot_disable);
257
258 ret = dsi_calc_mnp(dsi_clk, &dsi_mnp);
259 if (ret) {
260 DRM_DEBUG_KMS("dsi_calc_mnp failed\n");
261 return;
262 }
263
264 dsi_mnp.dsi_pll_ctrl |= DSI_PLL_CLK_GATE_DSI0_DSIPLL;
265
266 DRM_DEBUG_KMS("dsi pll div %08x, ctrl %08x\n",
267 dsi_mnp.dsi_pll_div, dsi_mnp.dsi_pll_ctrl);
268
269 vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, 0);
270 vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_DIVIDER, dsi_mnp.dsi_pll_div);
271 vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, dsi_mnp.dsi_pll_ctrl);
272}
273
274void vlv_enable_dsi_pll(struct intel_encoder *encoder)
275{
276 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
277 u32 tmp;
278
279 DRM_DEBUG_KMS("\n");
280
281 mutex_lock(&dev_priv->dpio_lock);
282
283 vlv_configure_dsi_pll(encoder);
284
285 /* wait at least 0.5 us after ungating before enabling VCO */
286 usleep_range(1, 10);
287
288 tmp = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
289 tmp |= DSI_PLL_VCO_EN;
290 vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, tmp);
291
292 mutex_unlock(&dev_priv->dpio_lock);
293
294 if (wait_for(I915_READ(PIPECONF(PIPE_A)) & PIPECONF_DSI_PLL_LOCKED, 20)) {
295 DRM_ERROR("DSI PLL lock failed\n");
296 return;
297 }
298
299 DRM_DEBUG_KMS("DSI PLL locked\n");
300}
301
302void vlv_disable_dsi_pll(struct intel_encoder *encoder)
303{
304 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
305 u32 tmp;
306
307 DRM_DEBUG_KMS("\n");
308
309 mutex_lock(&dev_priv->dpio_lock);
310
311 tmp = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
312 tmp &= ~DSI_PLL_VCO_EN;
313 tmp |= DSI_PLL_LDO_GATE;
314 vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, tmp);
315
316 mutex_unlock(&dev_priv->dpio_lock);
317}
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index 7fa7df546c1e..3c7736546856 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -153,6 +153,8 @@ static void intel_dvo_get_config(struct intel_encoder *encoder,
153 flags |= DRM_MODE_FLAG_NVSYNC; 153 flags |= DRM_MODE_FLAG_NVSYNC;
154 154
155 pipe_config->adjusted_mode.flags |= flags; 155 pipe_config->adjusted_mode.flags |= flags;
156
157 pipe_config->adjusted_mode.crtc_clock = pipe_config->port_clock;
156} 158}
157 159
158static void intel_disable_dvo(struct intel_encoder *encoder) 160static void intel_disable_dvo(struct intel_encoder *encoder)
@@ -171,11 +173,16 @@ static void intel_enable_dvo(struct intel_encoder *encoder)
171{ 173{
172 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; 174 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
173 struct intel_dvo *intel_dvo = enc_to_dvo(encoder); 175 struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
176 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
174 u32 dvo_reg = intel_dvo->dev.dvo_reg; 177 u32 dvo_reg = intel_dvo->dev.dvo_reg;
175 u32 temp = I915_READ(dvo_reg); 178 u32 temp = I915_READ(dvo_reg);
176 179
177 I915_WRITE(dvo_reg, temp | DVO_ENABLE); 180 I915_WRITE(dvo_reg, temp | DVO_ENABLE);
178 I915_READ(dvo_reg); 181 I915_READ(dvo_reg);
182 intel_dvo->dev.dev_ops->mode_set(&intel_dvo->dev,
183 &crtc->config.requested_mode,
184 &crtc->config.adjusted_mode);
185
179 intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, true); 186 intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, true);
180} 187}
181 188
@@ -184,6 +191,7 @@ static void intel_dvo_dpms(struct drm_connector *connector, int mode)
184{ 191{
185 struct intel_dvo *intel_dvo = intel_attached_dvo(connector); 192 struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
186 struct drm_crtc *crtc; 193 struct drm_crtc *crtc;
194 struct intel_crtc_config *config;
187 195
188 /* dvo supports only 2 dpms states. */ 196 /* dvo supports only 2 dpms states. */
189 if (mode != DRM_MODE_DPMS_ON) 197 if (mode != DRM_MODE_DPMS_ON)
@@ -204,10 +212,16 @@ static void intel_dvo_dpms(struct drm_connector *connector, int mode)
204 /* We call connector dpms manually below in case pipe dpms doesn't 212 /* We call connector dpms manually below in case pipe dpms doesn't
205 * change due to cloning. */ 213 * change due to cloning. */
206 if (mode == DRM_MODE_DPMS_ON) { 214 if (mode == DRM_MODE_DPMS_ON) {
215 config = &to_intel_crtc(crtc)->config;
216
207 intel_dvo->base.connectors_active = true; 217 intel_dvo->base.connectors_active = true;
208 218
209 intel_crtc_update_dpms(crtc); 219 intel_crtc_update_dpms(crtc);
210 220
221 intel_dvo->dev.dev_ops->mode_set(&intel_dvo->dev,
222 &config->requested_mode,
223 &config->adjusted_mode);
224
211 intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, true); 225 intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, true);
212 } else { 226 } else {
213 intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, false); 227 intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, false);
@@ -267,11 +281,6 @@ static bool intel_dvo_compute_config(struct intel_encoder *encoder,
267 drm_mode_set_crtcinfo(adjusted_mode, 0); 281 drm_mode_set_crtcinfo(adjusted_mode, 0);
268 } 282 }
269 283
270 if (intel_dvo->dev.dev_ops->mode_fixup)
271 return intel_dvo->dev.dev_ops->mode_fixup(&intel_dvo->dev,
272 &pipe_config->requested_mode,
273 adjusted_mode);
274
275 return true; 284 return true;
276} 285}
277 286
@@ -299,10 +308,6 @@ static void intel_dvo_mode_set(struct intel_encoder *encoder)
299 break; 308 break;
300 } 309 }
301 310
302 intel_dvo->dev.dev_ops->mode_set(&intel_dvo->dev,
303 &crtc->config.requested_mode,
304 adjusted_mode);
305
306 /* Save the data order, since I don't know what it should be set to. */ 311 /* Save the data order, since I don't know what it should be set to. */
307 dvo_val = I915_READ(dvo_reg) & 312 dvo_val = I915_READ(dvo_reg) &
308 (DVO_PRESERVE_MASK | DVO_DATA_ORDER_GBRG); 313 (DVO_PRESERVE_MASK | DVO_DATA_ORDER_GBRG);
@@ -370,7 +375,6 @@ static int intel_dvo_get_modes(struct drm_connector *connector)
370 375
371static void intel_dvo_destroy(struct drm_connector *connector) 376static void intel_dvo_destroy(struct drm_connector *connector)
372{ 377{
373 drm_sysfs_connector_remove(connector);
374 drm_connector_cleanup(connector); 378 drm_connector_cleanup(connector);
375 kfree(connector); 379 kfree(connector);
376} 380}
@@ -451,11 +455,11 @@ void intel_dvo_init(struct drm_device *dev)
451 int i; 455 int i;
452 int encoder_type = DRM_MODE_ENCODER_NONE; 456 int encoder_type = DRM_MODE_ENCODER_NONE;
453 457
454 intel_dvo = kzalloc(sizeof(struct intel_dvo), GFP_KERNEL); 458 intel_dvo = kzalloc(sizeof(*intel_dvo), GFP_KERNEL);
455 if (!intel_dvo) 459 if (!intel_dvo)
456 return; 460 return;
457 461
458 intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); 462 intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
459 if (!intel_connector) { 463 if (!intel_connector) {
460 kfree(intel_dvo); 464 kfree(intel_dvo);
461 return; 465 return;
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fbdev.c
index bc2100007b21..895fcb4fbd94 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -78,8 +78,8 @@ static int intelfb_create(struct drm_fb_helper *helper,
78 mode_cmd.width = sizes->surface_width; 78 mode_cmd.width = sizes->surface_width;
79 mode_cmd.height = sizes->surface_height; 79 mode_cmd.height = sizes->surface_height;
80 80
81 mode_cmd.pitches[0] = ALIGN(mode_cmd.width * ((sizes->surface_bpp + 7) / 81 mode_cmd.pitches[0] = ALIGN(mode_cmd.width *
82 8), 64); 82 DIV_ROUND_UP(sizes->surface_bpp, 8), 64);
83 mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, 83 mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
84 sizes->surface_depth); 84 sizes->surface_depth);
85 85
@@ -184,6 +184,27 @@ out:
184 return ret; 184 return ret;
185} 185}
186 186
187/** Sets the color ramps on behalf of RandR */
188static void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
189 u16 blue, int regno)
190{
191 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
192
193 intel_crtc->lut_r[regno] = red >> 8;
194 intel_crtc->lut_g[regno] = green >> 8;
195 intel_crtc->lut_b[regno] = blue >> 8;
196}
197
198static void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
199 u16 *blue, int regno)
200{
201 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
202
203 *red = intel_crtc->lut_r[regno] << 8;
204 *green = intel_crtc->lut_g[regno] << 8;
205 *blue = intel_crtc->lut_b[regno] << 8;
206}
207
187static struct drm_fb_helper_funcs intel_fb_helper_funcs = { 208static struct drm_fb_helper_funcs intel_fb_helper_funcs = {
188 .gamma_set = intel_crtc_fb_gamma_set, 209 .gamma_set = intel_crtc_fb_gamma_set,
189 .gamma_get = intel_crtc_fb_gamma_get, 210 .gamma_get = intel_crtc_fb_gamma_get,
@@ -216,7 +237,7 @@ int intel_fbdev_init(struct drm_device *dev)
216 struct drm_i915_private *dev_priv = dev->dev_private; 237 struct drm_i915_private *dev_priv = dev->dev_private;
217 int ret; 238 int ret;
218 239
219 ifbdev = kzalloc(sizeof(struct intel_fbdev), GFP_KERNEL); 240 ifbdev = kzalloc(sizeof(*ifbdev), GFP_KERNEL);
220 if (!ifbdev) 241 if (!ifbdev)
221 return -ENOMEM; 242 return -ENOMEM;
222 243
@@ -225,7 +246,7 @@ int intel_fbdev_init(struct drm_device *dev)
225 246
226 ret = drm_fb_helper_init(dev, &ifbdev->helper, 247 ret = drm_fb_helper_init(dev, &ifbdev->helper,
227 INTEL_INFO(dev)->num_pipes, 248 INTEL_INFO(dev)->num_pipes,
228 INTELFB_CONN_LIMIT); 249 4);
229 if (ret) { 250 if (ret) {
230 kfree(ifbdev); 251 kfree(ifbdev);
231 return ret; 252 return ret;
@@ -278,13 +299,13 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state)
278 299
279MODULE_LICENSE("GPL and additional rights"); 300MODULE_LICENSE("GPL and additional rights");
280 301
281void intel_fb_output_poll_changed(struct drm_device *dev) 302void intel_fbdev_output_poll_changed(struct drm_device *dev)
282{ 303{
283 struct drm_i915_private *dev_priv = dev->dev_private; 304 struct drm_i915_private *dev_priv = dev->dev_private;
284 drm_fb_helper_hotplug_event(&dev_priv->fbdev->helper); 305 drm_fb_helper_hotplug_event(&dev_priv->fbdev->helper);
285} 306}
286 307
287void intel_fb_restore_mode(struct drm_device *dev) 308void intel_fbdev_restore_mode(struct drm_device *dev)
288{ 309{
289 int ret; 310 int ret;
290 struct drm_i915_private *dev_priv = dev->dev_private; 311 struct drm_i915_private *dev_priv = dev->dev_private;
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 4148cc85bf7f..03f9ca70530c 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -713,6 +713,7 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
713 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); 713 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
714 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; 714 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
715 u32 tmp, flags = 0; 715 u32 tmp, flags = 0;
716 int dotclock;
716 717
717 tmp = I915_READ(intel_hdmi->hdmi_reg); 718 tmp = I915_READ(intel_hdmi->hdmi_reg);
718 719
@@ -727,6 +728,16 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
727 flags |= DRM_MODE_FLAG_NVSYNC; 728 flags |= DRM_MODE_FLAG_NVSYNC;
728 729
729 pipe_config->adjusted_mode.flags |= flags; 730 pipe_config->adjusted_mode.flags |= flags;
731
732 if ((tmp & SDVO_COLOR_FORMAT_MASK) == HDMI_COLOR_FORMAT_12bpc)
733 dotclock = pipe_config->port_clock * 2 / 3;
734 else
735 dotclock = pipe_config->port_clock;
736
737 if (HAS_PCH_SPLIT(dev_priv->dev))
738 ironlake_check_encoder_dotclock(pipe_config, dotclock);
739
740 pipe_config->adjusted_mode.crtc_clock = dotclock;
730} 741}
731 742
732static void intel_enable_hdmi(struct intel_encoder *encoder) 743static void intel_enable_hdmi(struct intel_encoder *encoder)
@@ -836,7 +847,7 @@ static int hdmi_portclock_limit(struct intel_hdmi *hdmi)
836 847
837 if (IS_G4X(dev)) 848 if (IS_G4X(dev))
838 return 165000; 849 return 165000;
839 else if (IS_HASWELL(dev)) 850 else if (IS_HASWELL(dev) || INTEL_INFO(dev)->gen >= 8)
840 return 300000; 851 return 300000;
841 else 852 else
842 return 225000; 853 return 225000;
@@ -862,7 +873,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
862 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); 873 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
863 struct drm_device *dev = encoder->base.dev; 874 struct drm_device *dev = encoder->base.dev;
864 struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode; 875 struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
865 int clock_12bpc = pipe_config->requested_mode.clock * 3 / 2; 876 int clock_12bpc = pipe_config->adjusted_mode.crtc_clock * 3 / 2;
866 int portclock_limit = hdmi_portclock_limit(intel_hdmi); 877 int portclock_limit = hdmi_portclock_limit(intel_hdmi);
867 int desired_bpp; 878 int desired_bpp;
868 879
@@ -904,7 +915,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
904 pipe_config->pipe_bpp = desired_bpp; 915 pipe_config->pipe_bpp = desired_bpp;
905 } 916 }
906 917
907 if (adjusted_mode->clock > portclock_limit) { 918 if (adjusted_mode->crtc_clock > portclock_limit) {
908 DRM_DEBUG_KMS("too high HDMI clock, rejecting mode\n"); 919 DRM_DEBUG_KMS("too high HDMI clock, rejecting mode\n");
909 return false; 920 return false;
910 } 921 }
@@ -1063,7 +1074,7 @@ done:
1063 return 0; 1074 return 0;
1064} 1075}
1065 1076
1066static void intel_hdmi_pre_enable(struct intel_encoder *encoder) 1077static void vlv_hdmi_pre_enable(struct intel_encoder *encoder)
1067{ 1078{
1068 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); 1079 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
1069 struct drm_device *dev = encoder->base.dev; 1080 struct drm_device *dev = encoder->base.dev;
@@ -1079,35 +1090,35 @@ static void intel_hdmi_pre_enable(struct intel_encoder *encoder)
1079 1090
1080 /* Enable clock channels for this port */ 1091 /* Enable clock channels for this port */
1081 mutex_lock(&dev_priv->dpio_lock); 1092 mutex_lock(&dev_priv->dpio_lock);
1082 val = vlv_dpio_read(dev_priv, DPIO_DATA_LANE_A(port)); 1093 val = vlv_dpio_read(dev_priv, pipe, DPIO_DATA_LANE_A(port));
1083 val = 0; 1094 val = 0;
1084 if (pipe) 1095 if (pipe)
1085 val |= (1<<21); 1096 val |= (1<<21);
1086 else 1097 else
1087 val &= ~(1<<21); 1098 val &= ~(1<<21);
1088 val |= 0x001000c4; 1099 val |= 0x001000c4;
1089 vlv_dpio_write(dev_priv, DPIO_DATA_CHANNEL(port), val); 1100 vlv_dpio_write(dev_priv, pipe, DPIO_DATA_CHANNEL(port), val);
1090 1101
1091 /* HDMI 1.0V-2dB */ 1102 /* HDMI 1.0V-2dB */
1092 vlv_dpio_write(dev_priv, DPIO_TX_OCALINIT(port), 0); 1103 vlv_dpio_write(dev_priv, pipe, DPIO_TX_OCALINIT(port), 0);
1093 vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL4(port), 1104 vlv_dpio_write(dev_priv, pipe, DPIO_TX_SWING_CTL4(port),
1094 0x2b245f5f); 1105 0x2b245f5f);
1095 vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL2(port), 1106 vlv_dpio_write(dev_priv, pipe, DPIO_TX_SWING_CTL2(port),
1096 0x5578b83a); 1107 0x5578b83a);
1097 vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL3(port), 1108 vlv_dpio_write(dev_priv, pipe, DPIO_TX_SWING_CTL3(port),
1098 0x0c782040); 1109 0x0c782040);
1099 vlv_dpio_write(dev_priv, DPIO_TX3_SWING_CTL4(port), 1110 vlv_dpio_write(dev_priv, pipe, DPIO_TX3_SWING_CTL4(port),
1100 0x2b247878); 1111 0x2b247878);
1101 vlv_dpio_write(dev_priv, DPIO_PCS_STAGGER0(port), 0x00030000); 1112 vlv_dpio_write(dev_priv, pipe, DPIO_PCS_STAGGER0(port), 0x00030000);
1102 vlv_dpio_write(dev_priv, DPIO_PCS_CTL_OVER1(port), 1113 vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CTL_OVER1(port),
1103 0x00002000); 1114 0x00002000);
1104 vlv_dpio_write(dev_priv, DPIO_TX_OCALINIT(port), 1115 vlv_dpio_write(dev_priv, pipe, DPIO_TX_OCALINIT(port),
1105 DPIO_TX_OCALINIT_EN); 1116 DPIO_TX_OCALINIT_EN);
1106 1117
1107 /* Program lane clock */ 1118 /* Program lane clock */
1108 vlv_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF0(port), 1119 vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLOCKBUF0(port),
1109 0x00760018); 1120 0x00760018);
1110 vlv_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF8(port), 1121 vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLOCKBUF8(port),
1111 0x00400888); 1122 0x00400888);
1112 mutex_unlock(&dev_priv->dpio_lock); 1123 mutex_unlock(&dev_priv->dpio_lock);
1113 1124
@@ -1116,55 +1127,60 @@ static void intel_hdmi_pre_enable(struct intel_encoder *encoder)
1116 vlv_wait_port_ready(dev_priv, port); 1127 vlv_wait_port_ready(dev_priv, port);
1117} 1128}
1118 1129
1119static void intel_hdmi_pre_pll_enable(struct intel_encoder *encoder) 1130static void vlv_hdmi_pre_pll_enable(struct intel_encoder *encoder)
1120{ 1131{
1121 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); 1132 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
1122 struct drm_device *dev = encoder->base.dev; 1133 struct drm_device *dev = encoder->base.dev;
1123 struct drm_i915_private *dev_priv = dev->dev_private; 1134 struct drm_i915_private *dev_priv = dev->dev_private;
1135 struct intel_crtc *intel_crtc =
1136 to_intel_crtc(encoder->base.crtc);
1124 int port = vlv_dport_to_channel(dport); 1137 int port = vlv_dport_to_channel(dport);
1138 int pipe = intel_crtc->pipe;
1125 1139
1126 if (!IS_VALLEYVIEW(dev)) 1140 if (!IS_VALLEYVIEW(dev))
1127 return; 1141 return;
1128 1142
1129 /* Program Tx lane resets to default */ 1143 /* Program Tx lane resets to default */
1130 mutex_lock(&dev_priv->dpio_lock); 1144 mutex_lock(&dev_priv->dpio_lock);
1131 vlv_dpio_write(dev_priv, DPIO_PCS_TX(port), 1145 vlv_dpio_write(dev_priv, pipe, DPIO_PCS_TX(port),
1132 DPIO_PCS_TX_LANE2_RESET | 1146 DPIO_PCS_TX_LANE2_RESET |
1133 DPIO_PCS_TX_LANE1_RESET); 1147 DPIO_PCS_TX_LANE1_RESET);
1134 vlv_dpio_write(dev_priv, DPIO_PCS_CLK(port), 1148 vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLK(port),
1135 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN | 1149 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
1136 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN | 1150 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
1137 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) | 1151 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
1138 DPIO_PCS_CLK_SOFT_RESET); 1152 DPIO_PCS_CLK_SOFT_RESET);
1139 1153
1140 /* Fix up inter-pair skew failure */ 1154 /* Fix up inter-pair skew failure */
1141 vlv_dpio_write(dev_priv, DPIO_PCS_STAGGER1(port), 0x00750f00); 1155 vlv_dpio_write(dev_priv, pipe, DPIO_PCS_STAGGER1(port), 0x00750f00);
1142 vlv_dpio_write(dev_priv, DPIO_TX_CTL(port), 0x00001500); 1156 vlv_dpio_write(dev_priv, pipe, DPIO_TX_CTL(port), 0x00001500);
1143 vlv_dpio_write(dev_priv, DPIO_TX_LANE(port), 0x40400000); 1157 vlv_dpio_write(dev_priv, pipe, DPIO_TX_LANE(port), 0x40400000);
1144 1158
1145 vlv_dpio_write(dev_priv, DPIO_PCS_CTL_OVER1(port), 1159 vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CTL_OVER1(port),
1146 0x00002000); 1160 0x00002000);
1147 vlv_dpio_write(dev_priv, DPIO_TX_OCALINIT(port), 1161 vlv_dpio_write(dev_priv, pipe, DPIO_TX_OCALINIT(port),
1148 DPIO_TX_OCALINIT_EN); 1162 DPIO_TX_OCALINIT_EN);
1149 mutex_unlock(&dev_priv->dpio_lock); 1163 mutex_unlock(&dev_priv->dpio_lock);
1150} 1164}
1151 1165
1152static void intel_hdmi_post_disable(struct intel_encoder *encoder) 1166static void vlv_hdmi_post_disable(struct intel_encoder *encoder)
1153{ 1167{
1154 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); 1168 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
1155 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; 1169 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
1170 struct intel_crtc *intel_crtc =
1171 to_intel_crtc(encoder->base.crtc);
1156 int port = vlv_dport_to_channel(dport); 1172 int port = vlv_dport_to_channel(dport);
1173 int pipe = intel_crtc->pipe;
1157 1174
1158 /* Reset lanes to avoid HDMI flicker (VLV w/a) */ 1175 /* Reset lanes to avoid HDMI flicker (VLV w/a) */
1159 mutex_lock(&dev_priv->dpio_lock); 1176 mutex_lock(&dev_priv->dpio_lock);
1160 vlv_dpio_write(dev_priv, DPIO_PCS_TX(port), 0x00000000); 1177 vlv_dpio_write(dev_priv, pipe, DPIO_PCS_TX(port), 0x00000000);
1161 vlv_dpio_write(dev_priv, DPIO_PCS_CLK(port), 0x00e00060); 1178 vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLK(port), 0x00e00060);
1162 mutex_unlock(&dev_priv->dpio_lock); 1179 mutex_unlock(&dev_priv->dpio_lock);
1163} 1180}
1164 1181
1165static void intel_hdmi_destroy(struct drm_connector *connector) 1182static void intel_hdmi_destroy(struct drm_connector *connector)
1166{ 1183{
1167 drm_sysfs_connector_remove(connector);
1168 drm_connector_cleanup(connector); 1184 drm_connector_cleanup(connector);
1169 kfree(connector); 1185 kfree(connector);
1170} 1186}
@@ -1211,6 +1227,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
1211 1227
1212 connector->interlace_allowed = 1; 1228 connector->interlace_allowed = 1;
1213 connector->doublescan_allowed = 0; 1229 connector->doublescan_allowed = 0;
1230 connector->stereo_allowed = 1;
1214 1231
1215 switch (port) { 1232 switch (port) {
1216 case PORT_B: 1233 case PORT_B:
@@ -1275,11 +1292,11 @@ void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port)
1275 struct intel_encoder *intel_encoder; 1292 struct intel_encoder *intel_encoder;
1276 struct intel_connector *intel_connector; 1293 struct intel_connector *intel_connector;
1277 1294
1278 intel_dig_port = kzalloc(sizeof(struct intel_digital_port), GFP_KERNEL); 1295 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
1279 if (!intel_dig_port) 1296 if (!intel_dig_port)
1280 return; 1297 return;
1281 1298
1282 intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); 1299 intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
1283 if (!intel_connector) { 1300 if (!intel_connector) {
1284 kfree(intel_dig_port); 1301 kfree(intel_dig_port);
1285 return; 1302 return;
@@ -1296,10 +1313,10 @@ void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port)
1296 intel_encoder->get_hw_state = intel_hdmi_get_hw_state; 1313 intel_encoder->get_hw_state = intel_hdmi_get_hw_state;
1297 intel_encoder->get_config = intel_hdmi_get_config; 1314 intel_encoder->get_config = intel_hdmi_get_config;
1298 if (IS_VALLEYVIEW(dev)) { 1315 if (IS_VALLEYVIEW(dev)) {
1299 intel_encoder->pre_pll_enable = intel_hdmi_pre_pll_enable; 1316 intel_encoder->pre_pll_enable = vlv_hdmi_pre_pll_enable;
1300 intel_encoder->pre_enable = intel_hdmi_pre_enable; 1317 intel_encoder->pre_enable = vlv_hdmi_pre_enable;
1301 intel_encoder->enable = vlv_enable_hdmi; 1318 intel_encoder->enable = vlv_enable_hdmi;
1302 intel_encoder->post_disable = intel_hdmi_post_disable; 1319 intel_encoder->post_disable = vlv_hdmi_post_disable;
1303 } else { 1320 } else {
1304 intel_encoder->enable = intel_enable_hdmi; 1321 intel_encoder->enable = intel_enable_hdmi;
1305 } 1322 }
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index d1c1e0f7f262..2ca17b14b6c1 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -34,6 +34,11 @@
34#include <drm/i915_drm.h> 34#include <drm/i915_drm.h>
35#include "i915_drv.h" 35#include "i915_drv.h"
36 36
37enum disp_clk {
38 CDCLK,
39 CZCLK
40};
41
37struct gmbus_port { 42struct gmbus_port {
38 const char *name; 43 const char *name;
39 int reg; 44 int reg;
@@ -58,10 +63,69 @@ to_intel_gmbus(struct i2c_adapter *i2c)
58 return container_of(i2c, struct intel_gmbus, adapter); 63 return container_of(i2c, struct intel_gmbus, adapter);
59} 64}
60 65
66static int get_disp_clk_div(struct drm_i915_private *dev_priv,
67 enum disp_clk clk)
68{
69 u32 reg_val;
70 int clk_ratio;
71
72 reg_val = I915_READ(CZCLK_CDCLK_FREQ_RATIO);
73
74 if (clk == CDCLK)
75 clk_ratio =
76 ((reg_val & CDCLK_FREQ_MASK) >> CDCLK_FREQ_SHIFT) + 1;
77 else
78 clk_ratio = (reg_val & CZCLK_FREQ_MASK) + 1;
79
80 return clk_ratio;
81}
82
83static void gmbus_set_freq(struct drm_i915_private *dev_priv)
84{
85 int vco_freq[] = { 800, 1600, 2000, 2400 };
86 int gmbus_freq = 0, cdclk_div, hpll_freq;
87
88 BUG_ON(!IS_VALLEYVIEW(dev_priv->dev));
89
90 /* Skip setting the gmbus freq if BIOS has already programmed it */
91 if (I915_READ(GMBUSFREQ_VLV) != 0xA0)
92 return;
93
94 /* Obtain SKU information */
95 mutex_lock(&dev_priv->dpio_lock);
96 hpll_freq =
97 vlv_cck_read(dev_priv, CCK_FUSE_REG) & CCK_FUSE_HPLL_FREQ_MASK;
98 mutex_unlock(&dev_priv->dpio_lock);
99
100 /* Get the CDCLK divide ratio */
101 cdclk_div = get_disp_clk_div(dev_priv, CDCLK);
102
103 /*
104 * Program the gmbus_freq based on the cdclk frequency.
105 * BSpec erroneously claims we should aim for 4MHz, but
106 * in fact 1MHz is the correct frequency.
107 */
108 if (cdclk_div)
109 gmbus_freq = (vco_freq[hpll_freq] << 1) / cdclk_div;
110
111 if (WARN_ON(gmbus_freq == 0))
112 return;
113
114 I915_WRITE(GMBUSFREQ_VLV, gmbus_freq);
115}
116
61void 117void
62intel_i2c_reset(struct drm_device *dev) 118intel_i2c_reset(struct drm_device *dev)
63{ 119{
64 struct drm_i915_private *dev_priv = dev->dev_private; 120 struct drm_i915_private *dev_priv = dev->dev_private;
121
122 /*
123 * In BIOS-less system, program the correct gmbus frequency
124 * before reading edid.
125 */
126 if (IS_VALLEYVIEW(dev))
127 gmbus_set_freq(dev_priv);
128
65 I915_WRITE(dev_priv->gpio_mmio_base + GMBUS0, 0); 129 I915_WRITE(dev_priv->gpio_mmio_base + GMBUS0, 0);
66 I915_WRITE(dev_priv->gpio_mmio_base + GMBUS4, 0); 130 I915_WRITE(dev_priv->gpio_mmio_base + GMBUS4, 0);
67} 131}
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index b8af94a5be39..c3b4da7895ed 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -92,6 +92,7 @@ static void intel_lvds_get_config(struct intel_encoder *encoder,
92 struct drm_device *dev = encoder->base.dev; 92 struct drm_device *dev = encoder->base.dev;
93 struct drm_i915_private *dev_priv = dev->dev_private; 93 struct drm_i915_private *dev_priv = dev->dev_private;
94 u32 lvds_reg, tmp, flags = 0; 94 u32 lvds_reg, tmp, flags = 0;
95 int dotclock;
95 96
96 if (HAS_PCH_SPLIT(dev)) 97 if (HAS_PCH_SPLIT(dev))
97 lvds_reg = PCH_LVDS; 98 lvds_reg = PCH_LVDS;
@@ -116,6 +117,13 @@ static void intel_lvds_get_config(struct intel_encoder *encoder,
116 117
117 pipe_config->gmch_pfit.control |= tmp & PANEL_8TO6_DITHER_ENABLE; 118 pipe_config->gmch_pfit.control |= tmp & PANEL_8TO6_DITHER_ENABLE;
118 } 119 }
120
121 dotclock = pipe_config->port_clock;
122
123 if (HAS_PCH_SPLIT(dev_priv->dev))
124 ironlake_check_encoder_dotclock(pipe_config, dotclock);
125
126 pipe_config->adjusted_mode.crtc_clock = dotclock;
119} 127}
120 128
121/* The LVDS pin pair needs to be on before the DPLLs are enabled. 129/* The LVDS pin pair needs to be on before the DPLLs are enabled.
@@ -198,7 +206,8 @@ static void intel_enable_lvds(struct intel_encoder *encoder)
198{ 206{
199 struct drm_device *dev = encoder->base.dev; 207 struct drm_device *dev = encoder->base.dev;
200 struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base); 208 struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
201 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); 209 struct intel_connector *intel_connector =
210 &lvds_encoder->attached_connector->base;
202 struct drm_i915_private *dev_priv = dev->dev_private; 211 struct drm_i915_private *dev_priv = dev->dev_private;
203 u32 ctl_reg, stat_reg; 212 u32 ctl_reg, stat_reg;
204 213
@@ -217,13 +226,15 @@ static void intel_enable_lvds(struct intel_encoder *encoder)
217 if (wait_for((I915_READ(stat_reg) & PP_ON) != 0, 1000)) 226 if (wait_for((I915_READ(stat_reg) & PP_ON) != 0, 1000))
218 DRM_ERROR("timed out waiting for panel to power on\n"); 227 DRM_ERROR("timed out waiting for panel to power on\n");
219 228
220 intel_panel_enable_backlight(dev, intel_crtc->pipe); 229 intel_panel_enable_backlight(intel_connector);
221} 230}
222 231
223static void intel_disable_lvds(struct intel_encoder *encoder) 232static void intel_disable_lvds(struct intel_encoder *encoder)
224{ 233{
225 struct drm_device *dev = encoder->base.dev; 234 struct drm_device *dev = encoder->base.dev;
226 struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base); 235 struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
236 struct intel_connector *intel_connector =
237 &lvds_encoder->attached_connector->base;
227 struct drm_i915_private *dev_priv = dev->dev_private; 238 struct drm_i915_private *dev_priv = dev->dev_private;
228 u32 ctl_reg, stat_reg; 239 u32 ctl_reg, stat_reg;
229 240
@@ -235,7 +246,7 @@ static void intel_disable_lvds(struct intel_encoder *encoder)
235 stat_reg = PP_STATUS; 246 stat_reg = PP_STATUS;
236 } 247 }
237 248
238 intel_panel_disable_backlight(dev); 249 intel_panel_disable_backlight(intel_connector);
239 250
240 I915_WRITE(ctl_reg, I915_READ(ctl_reg) & ~POWER_TARGET_ON); 251 I915_WRITE(ctl_reg, I915_READ(ctl_reg) & ~POWER_TARGET_ON);
241 if (wait_for((I915_READ(stat_reg) & PP_ON) == 0, 1000)) 252 if (wait_for((I915_READ(stat_reg) & PP_ON) == 0, 1000))
@@ -466,7 +477,6 @@ static void intel_lvds_destroy(struct drm_connector *connector)
466 477
467 intel_panel_fini(&lvds_connector->base.panel); 478 intel_panel_fini(&lvds_connector->base.panel);
468 479
469 drm_sysfs_connector_remove(connector);
470 drm_connector_cleanup(connector); 480 drm_connector_cleanup(connector);
471 kfree(connector); 481 kfree(connector);
472} 482}
@@ -802,7 +812,8 @@ static bool lvds_is_present_in_vbt(struct drm_device *dev,
802 return true; 812 return true;
803 813
804 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) { 814 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
805 struct child_device_config *child = dev_priv->vbt.child_dev + i; 815 union child_device_config *uchild = dev_priv->vbt.child_dev + i;
816 struct old_child_dev_config *child = &uchild->old;
806 817
807 /* If the device type is not LFP, continue. 818 /* If the device type is not LFP, continue.
808 * We have to check both the new identifiers as well as the 819 * We have to check both the new identifiers as well as the
@@ -956,11 +967,11 @@ void intel_lvds_init(struct drm_device *dev)
956 } 967 }
957 } 968 }
958 969
959 lvds_encoder = kzalloc(sizeof(struct intel_lvds_encoder), GFP_KERNEL); 970 lvds_encoder = kzalloc(sizeof(*lvds_encoder), GFP_KERNEL);
960 if (!lvds_encoder) 971 if (!lvds_encoder)
961 return; 972 return;
962 973
963 lvds_connector = kzalloc(sizeof(struct intel_lvds_connector), GFP_KERNEL); 974 lvds_connector = kzalloc(sizeof(*lvds_connector), GFP_KERNEL);
964 if (!lvds_connector) { 975 if (!lvds_connector) {
965 kfree(lvds_encoder); 976 kfree(lvds_encoder);
966 return; 977 return;
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 119771ff46ab..1b2f41c3f191 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -36,8 +36,11 @@
36#include "i915_drv.h" 36#include "i915_drv.h"
37#include "intel_drv.h" 37#include "intel_drv.h"
38 38
39#define PCI_ASLE 0xe4 39#define PCI_ASLE 0xe4
40#define PCI_ASLS 0xfc 40#define PCI_ASLS 0xfc
41#define PCI_SWSCI 0xe8
42#define PCI_SWSCI_SCISEL (1 << 15)
43#define PCI_SWSCI_GSSCIE (1 << 0)
41 44
42#define OPREGION_HEADER_OFFSET 0 45#define OPREGION_HEADER_OFFSET 0
43#define OPREGION_ACPI_OFFSET 0x100 46#define OPREGION_ACPI_OFFSET 0x100
@@ -107,25 +110,38 @@ struct opregion_asle {
107 u32 epfm; /* enabled panel fitting modes */ 110 u32 epfm; /* enabled panel fitting modes */
108 u8 plut[74]; /* panel LUT and identifier */ 111 u8 plut[74]; /* panel LUT and identifier */
109 u32 pfmb; /* PWM freq and min brightness */ 112 u32 pfmb; /* PWM freq and min brightness */
110 u8 rsvd[102]; 113 u32 cddv; /* color correction default values */
114 u32 pcft; /* power conservation features */
115 u32 srot; /* supported rotation angles */
116 u32 iuer; /* IUER events */
117 u8 rsvd[86];
111} __attribute__((packed)); 118} __attribute__((packed));
112 119
113/* Driver readiness indicator */ 120/* Driver readiness indicator */
114#define ASLE_ARDY_READY (1 << 0) 121#define ASLE_ARDY_READY (1 << 0)
115#define ASLE_ARDY_NOT_READY (0 << 0) 122#define ASLE_ARDY_NOT_READY (0 << 0)
116 123
117/* ASLE irq request bits */ 124/* ASLE Interrupt Command (ASLC) bits */
118#define ASLE_SET_ALS_ILLUM (1 << 0) 125#define ASLC_SET_ALS_ILLUM (1 << 0)
119#define ASLE_SET_BACKLIGHT (1 << 1) 126#define ASLC_SET_BACKLIGHT (1 << 1)
120#define ASLE_SET_PFIT (1 << 2) 127#define ASLC_SET_PFIT (1 << 2)
121#define ASLE_SET_PWM_FREQ (1 << 3) 128#define ASLC_SET_PWM_FREQ (1 << 3)
122#define ASLE_REQ_MSK 0xf 129#define ASLC_SUPPORTED_ROTATION_ANGLES (1 << 4)
123 130#define ASLC_BUTTON_ARRAY (1 << 5)
124/* response bits of ASLE irq request */ 131#define ASLC_CONVERTIBLE_INDICATOR (1 << 6)
125#define ASLE_ALS_ILLUM_FAILED (1<<10) 132#define ASLC_DOCKING_INDICATOR (1 << 7)
126#define ASLE_BACKLIGHT_FAILED (1<<12) 133#define ASLC_ISCT_STATE_CHANGE (1 << 8)
127#define ASLE_PFIT_FAILED (1<<14) 134#define ASLC_REQ_MSK 0x1ff
128#define ASLE_PWM_FREQ_FAILED (1<<16) 135/* response bits */
136#define ASLC_ALS_ILLUM_FAILED (1 << 10)
137#define ASLC_BACKLIGHT_FAILED (1 << 12)
138#define ASLC_PFIT_FAILED (1 << 14)
139#define ASLC_PWM_FREQ_FAILED (1 << 16)
140#define ASLC_ROTATION_ANGLES_FAILED (1 << 18)
141#define ASLC_BUTTON_ARRAY_FAILED (1 << 20)
142#define ASLC_CONVERTIBLE_FAILED (1 << 22)
143#define ASLC_DOCKING_FAILED (1 << 24)
144#define ASLC_ISCT_STATE_FAILED (1 << 26)
129 145
130/* Technology enabled indicator */ 146/* Technology enabled indicator */
131#define ASLE_TCHE_ALS_EN (1 << 0) 147#define ASLE_TCHE_ALS_EN (1 << 0)
@@ -151,6 +167,60 @@ struct opregion_asle {
151 167
152#define ASLE_CBLV_VALID (1<<31) 168#define ASLE_CBLV_VALID (1<<31)
153 169
170/* IUER */
171#define ASLE_IUER_DOCKING (1 << 7)
172#define ASLE_IUER_CONVERTIBLE (1 << 6)
173#define ASLE_IUER_ROTATION_LOCK_BTN (1 << 4)
174#define ASLE_IUER_VOLUME_DOWN_BTN (1 << 3)
175#define ASLE_IUER_VOLUME_UP_BTN (1 << 2)
176#define ASLE_IUER_WINDOWS_BTN (1 << 1)
177#define ASLE_IUER_POWER_BTN (1 << 0)
178
179/* Software System Control Interrupt (SWSCI) */
180#define SWSCI_SCIC_INDICATOR (1 << 0)
181#define SWSCI_SCIC_MAIN_FUNCTION_SHIFT 1
182#define SWSCI_SCIC_MAIN_FUNCTION_MASK (0xf << 1)
183#define SWSCI_SCIC_SUB_FUNCTION_SHIFT 8
184#define SWSCI_SCIC_SUB_FUNCTION_MASK (0xff << 8)
185#define SWSCI_SCIC_EXIT_PARAMETER_SHIFT 8
186#define SWSCI_SCIC_EXIT_PARAMETER_MASK (0xff << 8)
187#define SWSCI_SCIC_EXIT_STATUS_SHIFT 5
188#define SWSCI_SCIC_EXIT_STATUS_MASK (7 << 5)
189#define SWSCI_SCIC_EXIT_STATUS_SUCCESS 1
190
191#define SWSCI_FUNCTION_CODE(main, sub) \
192 ((main) << SWSCI_SCIC_MAIN_FUNCTION_SHIFT | \
193 (sub) << SWSCI_SCIC_SUB_FUNCTION_SHIFT)
194
195/* SWSCI: Get BIOS Data (GBDA) */
196#define SWSCI_GBDA 4
197#define SWSCI_GBDA_SUPPORTED_CALLS SWSCI_FUNCTION_CODE(SWSCI_GBDA, 0)
198#define SWSCI_GBDA_REQUESTED_CALLBACKS SWSCI_FUNCTION_CODE(SWSCI_GBDA, 1)
199#define SWSCI_GBDA_BOOT_DISPLAY_PREF SWSCI_FUNCTION_CODE(SWSCI_GBDA, 4)
200#define SWSCI_GBDA_PANEL_DETAILS SWSCI_FUNCTION_CODE(SWSCI_GBDA, 5)
201#define SWSCI_GBDA_TV_STANDARD SWSCI_FUNCTION_CODE(SWSCI_GBDA, 6)
202#define SWSCI_GBDA_INTERNAL_GRAPHICS SWSCI_FUNCTION_CODE(SWSCI_GBDA, 7)
203#define SWSCI_GBDA_SPREAD_SPECTRUM SWSCI_FUNCTION_CODE(SWSCI_GBDA, 10)
204
205/* SWSCI: System BIOS Callbacks (SBCB) */
206#define SWSCI_SBCB 6
207#define SWSCI_SBCB_SUPPORTED_CALLBACKS SWSCI_FUNCTION_CODE(SWSCI_SBCB, 0)
208#define SWSCI_SBCB_INIT_COMPLETION SWSCI_FUNCTION_CODE(SWSCI_SBCB, 1)
209#define SWSCI_SBCB_PRE_HIRES_SET_MODE SWSCI_FUNCTION_CODE(SWSCI_SBCB, 3)
210#define SWSCI_SBCB_POST_HIRES_SET_MODE SWSCI_FUNCTION_CODE(SWSCI_SBCB, 4)
211#define SWSCI_SBCB_DISPLAY_SWITCH SWSCI_FUNCTION_CODE(SWSCI_SBCB, 5)
212#define SWSCI_SBCB_SET_TV_FORMAT SWSCI_FUNCTION_CODE(SWSCI_SBCB, 6)
213#define SWSCI_SBCB_ADAPTER_POWER_STATE SWSCI_FUNCTION_CODE(SWSCI_SBCB, 7)
214#define SWSCI_SBCB_DISPLAY_POWER_STATE SWSCI_FUNCTION_CODE(SWSCI_SBCB, 8)
215#define SWSCI_SBCB_SET_BOOT_DISPLAY SWSCI_FUNCTION_CODE(SWSCI_SBCB, 9)
216#define SWSCI_SBCB_SET_PANEL_DETAILS SWSCI_FUNCTION_CODE(SWSCI_SBCB, 10)
217#define SWSCI_SBCB_SET_INTERNAL_GFX SWSCI_FUNCTION_CODE(SWSCI_SBCB, 11)
218#define SWSCI_SBCB_POST_HIRES_TO_DOS_FS SWSCI_FUNCTION_CODE(SWSCI_SBCB, 16)
219#define SWSCI_SBCB_SUSPEND_RESUME SWSCI_FUNCTION_CODE(SWSCI_SBCB, 17)
220#define SWSCI_SBCB_SET_SPREAD_SPECTRUM SWSCI_FUNCTION_CODE(SWSCI_SBCB, 18)
221#define SWSCI_SBCB_POST_VBE_PM SWSCI_FUNCTION_CODE(SWSCI_SBCB, 19)
222#define SWSCI_SBCB_ENABLE_DISABLE_AUDIO SWSCI_FUNCTION_CODE(SWSCI_SBCB, 21)
223
154#define ACPI_OTHER_OUTPUT (0<<8) 224#define ACPI_OTHER_OUTPUT (0<<8)
155#define ACPI_VGA_OUTPUT (1<<8) 225#define ACPI_VGA_OUTPUT (1<<8)
156#define ACPI_TV_OUTPUT (2<<8) 226#define ACPI_TV_OUTPUT (2<<8)
@@ -158,24 +228,224 @@ struct opregion_asle {
158#define ACPI_LVDS_OUTPUT (4<<8) 228#define ACPI_LVDS_OUTPUT (4<<8)
159 229
160#ifdef CONFIG_ACPI 230#ifdef CONFIG_ACPI
231static int swsci(struct drm_device *dev, u32 function, u32 parm, u32 *parm_out)
232{
233 struct drm_i915_private *dev_priv = dev->dev_private;
234 struct opregion_swsci __iomem *swsci = dev_priv->opregion.swsci;
235 u32 main_function, sub_function, scic;
236 u16 pci_swsci;
237 u32 dslp;
238
239 if (!swsci)
240 return -ENODEV;
241
242 main_function = (function & SWSCI_SCIC_MAIN_FUNCTION_MASK) >>
243 SWSCI_SCIC_MAIN_FUNCTION_SHIFT;
244 sub_function = (function & SWSCI_SCIC_SUB_FUNCTION_MASK) >>
245 SWSCI_SCIC_SUB_FUNCTION_SHIFT;
246
247 /* Check if we can call the function. See swsci_setup for details. */
248 if (main_function == SWSCI_SBCB) {
249 if ((dev_priv->opregion.swsci_sbcb_sub_functions &
250 (1 << sub_function)) == 0)
251 return -EINVAL;
252 } else if (main_function == SWSCI_GBDA) {
253 if ((dev_priv->opregion.swsci_gbda_sub_functions &
254 (1 << sub_function)) == 0)
255 return -EINVAL;
256 }
257
258 /* Driver sleep timeout in ms. */
259 dslp = ioread32(&swsci->dslp);
260 if (!dslp) {
261 /* The spec says 2ms should be the default, but it's too small
262 * for some machines. */
263 dslp = 50;
264 } else if (dslp > 500) {
265 /* Hey bios, trust must be earned. */
266 WARN_ONCE(1, "excessive driver sleep timeout (DSPL) %u\n", dslp);
267 dslp = 500;
268 }
269
270 /* The spec tells us to do this, but we are the only user... */
271 scic = ioread32(&swsci->scic);
272 if (scic & SWSCI_SCIC_INDICATOR) {
273 DRM_DEBUG_DRIVER("SWSCI request already in progress\n");
274 return -EBUSY;
275 }
276
277 scic = function | SWSCI_SCIC_INDICATOR;
278
279 iowrite32(parm, &swsci->parm);
280 iowrite32(scic, &swsci->scic);
281
282 /* Ensure SCI event is selected and event trigger is cleared. */
283 pci_read_config_word(dev->pdev, PCI_SWSCI, &pci_swsci);
284 if (!(pci_swsci & PCI_SWSCI_SCISEL) || (pci_swsci & PCI_SWSCI_GSSCIE)) {
285 pci_swsci |= PCI_SWSCI_SCISEL;
286 pci_swsci &= ~PCI_SWSCI_GSSCIE;
287 pci_write_config_word(dev->pdev, PCI_SWSCI, pci_swsci);
288 }
289
290 /* Use event trigger to tell bios to check the mail. */
291 pci_swsci |= PCI_SWSCI_GSSCIE;
292 pci_write_config_word(dev->pdev, PCI_SWSCI, pci_swsci);
293
294 /* Poll for the result. */
295#define C (((scic = ioread32(&swsci->scic)) & SWSCI_SCIC_INDICATOR) == 0)
296 if (wait_for(C, dslp)) {
297 DRM_DEBUG_DRIVER("SWSCI request timed out\n");
298 return -ETIMEDOUT;
299 }
300
301 scic = (scic & SWSCI_SCIC_EXIT_STATUS_MASK) >>
302 SWSCI_SCIC_EXIT_STATUS_SHIFT;
303
304 /* Note: scic == 0 is an error! */
305 if (scic != SWSCI_SCIC_EXIT_STATUS_SUCCESS) {
306 DRM_DEBUG_DRIVER("SWSCI request error %u\n", scic);
307 return -EIO;
308 }
309
310 if (parm_out)
311 *parm_out = ioread32(&swsci->parm);
312
313 return 0;
314
315#undef C
316}
317
318#define DISPLAY_TYPE_CRT 0
319#define DISPLAY_TYPE_TV 1
320#define DISPLAY_TYPE_EXTERNAL_FLAT_PANEL 2
321#define DISPLAY_TYPE_INTERNAL_FLAT_PANEL 3
322
323int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
324 bool enable)
325{
326 struct drm_device *dev = intel_encoder->base.dev;
327 u32 parm = 0;
328 u32 type = 0;
329 u32 port;
330
331 /* don't care about old stuff for now */
332 if (!HAS_DDI(dev))
333 return 0;
334
335 port = intel_ddi_get_encoder_port(intel_encoder);
336 if (port == PORT_E) {
337 port = 0;
338 } else {
339 parm |= 1 << port;
340 port++;
341 }
342
343 if (!enable)
344 parm |= 4 << 8;
345
346 switch (intel_encoder->type) {
347 case INTEL_OUTPUT_ANALOG:
348 type = DISPLAY_TYPE_CRT;
349 break;
350 case INTEL_OUTPUT_UNKNOWN:
351 case INTEL_OUTPUT_DISPLAYPORT:
352 case INTEL_OUTPUT_HDMI:
353 type = DISPLAY_TYPE_EXTERNAL_FLAT_PANEL;
354 break;
355 case INTEL_OUTPUT_EDP:
356 type = DISPLAY_TYPE_INTERNAL_FLAT_PANEL;
357 break;
358 default:
359 WARN_ONCE(1, "unsupported intel_encoder type %d\n",
360 intel_encoder->type);
361 return -EINVAL;
362 }
363
364 parm |= type << (16 + port * 3);
365
366 return swsci(dev, SWSCI_SBCB_DISPLAY_POWER_STATE, parm, NULL);
367}
368
369static const struct {
370 pci_power_t pci_power_state;
371 u32 parm;
372} power_state_map[] = {
373 { PCI_D0, 0x00 },
374 { PCI_D1, 0x01 },
375 { PCI_D2, 0x02 },
376 { PCI_D3hot, 0x04 },
377 { PCI_D3cold, 0x04 },
378};
379
380int intel_opregion_notify_adapter(struct drm_device *dev, pci_power_t state)
381{
382 int i;
383
384 if (!HAS_DDI(dev))
385 return 0;
386
387 for (i = 0; i < ARRAY_SIZE(power_state_map); i++) {
388 if (state == power_state_map[i].pci_power_state)
389 return swsci(dev, SWSCI_SBCB_ADAPTER_POWER_STATE,
390 power_state_map[i].parm, NULL);
391 }
392
393 return -EINVAL;
394}
395
161static u32 asle_set_backlight(struct drm_device *dev, u32 bclp) 396static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
162{ 397{
163 struct drm_i915_private *dev_priv = dev->dev_private; 398 struct drm_i915_private *dev_priv = dev->dev_private;
399 struct drm_encoder *encoder;
400 struct drm_connector *connector;
401 struct intel_connector *intel_connector = NULL;
402 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[0];
164 struct opregion_asle __iomem *asle = dev_priv->opregion.asle; 403 struct opregion_asle __iomem *asle = dev_priv->opregion.asle;
404 u32 ret = 0;
405 bool found = false;
165 406
166 DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp); 407 DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp);
167 408
168 if (!(bclp & ASLE_BCLP_VALID)) 409 if (!(bclp & ASLE_BCLP_VALID))
169 return ASLE_BACKLIGHT_FAILED; 410 return ASLC_BACKLIGHT_FAILED;
170 411
171 bclp &= ASLE_BCLP_MSK; 412 bclp &= ASLE_BCLP_MSK;
172 if (bclp > 255) 413 if (bclp > 255)
173 return ASLE_BACKLIGHT_FAILED; 414 return ASLC_BACKLIGHT_FAILED;
415
416 mutex_lock(&dev->mode_config.mutex);
417 /*
418 * Could match the OpRegion connector here instead, but we'd also need
419 * to verify the connector could handle a backlight call.
420 */
421 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
422 if (encoder->crtc == crtc) {
423 found = true;
424 break;
425 }
426
427 if (!found) {
428 ret = ASLC_BACKLIGHT_FAILED;
429 goto out;
430 }
431
432 list_for_each_entry(connector, &dev->mode_config.connector_list, head)
433 if (connector->encoder == encoder)
434 intel_connector = to_intel_connector(connector);
435
436 if (!intel_connector) {
437 ret = ASLC_BACKLIGHT_FAILED;
438 goto out;
439 }
174 440
175 intel_panel_set_backlight(dev, bclp, 255); 441 DRM_DEBUG_KMS("updating opregion backlight %d/255\n", bclp);
442 intel_panel_set_backlight(intel_connector, bclp, 255);
176 iowrite32(DIV_ROUND_UP(bclp * 100, 255) | ASLE_CBLV_VALID, &asle->cblv); 443 iowrite32(DIV_ROUND_UP(bclp * 100, 255) | ASLE_CBLV_VALID, &asle->cblv);
177 444
178 return 0; 445out:
446 mutex_unlock(&dev->mode_config.mutex);
447
448 return ret;
179} 449}
180 450
181static u32 asle_set_als_illum(struct drm_device *dev, u32 alsi) 451static u32 asle_set_als_illum(struct drm_device *dev, u32 alsi)
@@ -183,13 +453,13 @@ static u32 asle_set_als_illum(struct drm_device *dev, u32 alsi)
183 /* alsi is the current ALS reading in lux. 0 indicates below sensor 453 /* alsi is the current ALS reading in lux. 0 indicates below sensor
184 range, 0xffff indicates above sensor range. 1-0xfffe are valid */ 454 range, 0xffff indicates above sensor range. 1-0xfffe are valid */
185 DRM_DEBUG_DRIVER("Illum is not supported\n"); 455 DRM_DEBUG_DRIVER("Illum is not supported\n");
186 return ASLE_ALS_ILLUM_FAILED; 456 return ASLC_ALS_ILLUM_FAILED;
187} 457}
188 458
189static u32 asle_set_pwm_freq(struct drm_device *dev, u32 pfmb) 459static u32 asle_set_pwm_freq(struct drm_device *dev, u32 pfmb)
190{ 460{
191 DRM_DEBUG_DRIVER("PWM freq is not supported\n"); 461 DRM_DEBUG_DRIVER("PWM freq is not supported\n");
192 return ASLE_PWM_FREQ_FAILED; 462 return ASLC_PWM_FREQ_FAILED;
193} 463}
194 464
195static u32 asle_set_pfit(struct drm_device *dev, u32 pfit) 465static u32 asle_set_pfit(struct drm_device *dev, u32 pfit)
@@ -197,39 +467,118 @@ static u32 asle_set_pfit(struct drm_device *dev, u32 pfit)
197 /* Panel fitting is currently controlled by the X code, so this is a 467 /* Panel fitting is currently controlled by the X code, so this is a
198 noop until modesetting support works fully */ 468 noop until modesetting support works fully */
199 DRM_DEBUG_DRIVER("Pfit is not supported\n"); 469 DRM_DEBUG_DRIVER("Pfit is not supported\n");
200 return ASLE_PFIT_FAILED; 470 return ASLC_PFIT_FAILED;
201} 471}
202 472
203void intel_opregion_asle_intr(struct drm_device *dev) 473static u32 asle_set_supported_rotation_angles(struct drm_device *dev, u32 srot)
204{ 474{
205 struct drm_i915_private *dev_priv = dev->dev_private; 475 DRM_DEBUG_DRIVER("SROT is not supported\n");
476 return ASLC_ROTATION_ANGLES_FAILED;
477}
478
479static u32 asle_set_button_array(struct drm_device *dev, u32 iuer)
480{
481 if (!iuer)
482 DRM_DEBUG_DRIVER("Button array event is not supported (nothing)\n");
483 if (iuer & ASLE_IUER_ROTATION_LOCK_BTN)
484 DRM_DEBUG_DRIVER("Button array event is not supported (rotation lock)\n");
485 if (iuer & ASLE_IUER_VOLUME_DOWN_BTN)
486 DRM_DEBUG_DRIVER("Button array event is not supported (volume down)\n");
487 if (iuer & ASLE_IUER_VOLUME_UP_BTN)
488 DRM_DEBUG_DRIVER("Button array event is not supported (volume up)\n");
489 if (iuer & ASLE_IUER_WINDOWS_BTN)
490 DRM_DEBUG_DRIVER("Button array event is not supported (windows)\n");
491 if (iuer & ASLE_IUER_POWER_BTN)
492 DRM_DEBUG_DRIVER("Button array event is not supported (power)\n");
493
494 return ASLC_BUTTON_ARRAY_FAILED;
495}
496
497static u32 asle_set_convertible(struct drm_device *dev, u32 iuer)
498{
499 if (iuer & ASLE_IUER_CONVERTIBLE)
500 DRM_DEBUG_DRIVER("Convertible is not supported (clamshell)\n");
501 else
502 DRM_DEBUG_DRIVER("Convertible is not supported (slate)\n");
503
504 return ASLC_CONVERTIBLE_FAILED;
505}
506
507static u32 asle_set_docking(struct drm_device *dev, u32 iuer)
508{
509 if (iuer & ASLE_IUER_DOCKING)
510 DRM_DEBUG_DRIVER("Docking is not supported (docked)\n");
511 else
512 DRM_DEBUG_DRIVER("Docking is not supported (undocked)\n");
513
514 return ASLC_DOCKING_FAILED;
515}
516
517static u32 asle_isct_state(struct drm_device *dev)
518{
519 DRM_DEBUG_DRIVER("ISCT is not supported\n");
520 return ASLC_ISCT_STATE_FAILED;
521}
522
523static void asle_work(struct work_struct *work)
524{
525 struct intel_opregion *opregion =
526 container_of(work, struct intel_opregion, asle_work);
527 struct drm_i915_private *dev_priv =
528 container_of(opregion, struct drm_i915_private, opregion);
529 struct drm_device *dev = dev_priv->dev;
206 struct opregion_asle __iomem *asle = dev_priv->opregion.asle; 530 struct opregion_asle __iomem *asle = dev_priv->opregion.asle;
207 u32 asle_stat = 0; 531 u32 aslc_stat = 0;
208 u32 asle_req; 532 u32 aslc_req;
209 533
210 if (!asle) 534 if (!asle)
211 return; 535 return;
212 536
213 asle_req = ioread32(&asle->aslc) & ASLE_REQ_MSK; 537 aslc_req = ioread32(&asle->aslc);
214 538
215 if (!asle_req) { 539 if (!(aslc_req & ASLC_REQ_MSK)) {
216 DRM_DEBUG_DRIVER("non asle set request??\n"); 540 DRM_DEBUG_DRIVER("No request on ASLC interrupt 0x%08x\n",
541 aslc_req);
217 return; 542 return;
218 } 543 }
219 544
220 if (asle_req & ASLE_SET_ALS_ILLUM) 545 if (aslc_req & ASLC_SET_ALS_ILLUM)
221 asle_stat |= asle_set_als_illum(dev, ioread32(&asle->alsi)); 546 aslc_stat |= asle_set_als_illum(dev, ioread32(&asle->alsi));
547
548 if (aslc_req & ASLC_SET_BACKLIGHT)
549 aslc_stat |= asle_set_backlight(dev, ioread32(&asle->bclp));
550
551 if (aslc_req & ASLC_SET_PFIT)
552 aslc_stat |= asle_set_pfit(dev, ioread32(&asle->pfit));
553
554 if (aslc_req & ASLC_SET_PWM_FREQ)
555 aslc_stat |= asle_set_pwm_freq(dev, ioread32(&asle->pfmb));
222 556
223 if (asle_req & ASLE_SET_BACKLIGHT) 557 if (aslc_req & ASLC_SUPPORTED_ROTATION_ANGLES)
224 asle_stat |= asle_set_backlight(dev, ioread32(&asle->bclp)); 558 aslc_stat |= asle_set_supported_rotation_angles(dev,
559 ioread32(&asle->srot));
225 560
226 if (asle_req & ASLE_SET_PFIT) 561 if (aslc_req & ASLC_BUTTON_ARRAY)
227 asle_stat |= asle_set_pfit(dev, ioread32(&asle->pfit)); 562 aslc_stat |= asle_set_button_array(dev, ioread32(&asle->iuer));
228 563
229 if (asle_req & ASLE_SET_PWM_FREQ) 564 if (aslc_req & ASLC_CONVERTIBLE_INDICATOR)
230 asle_stat |= asle_set_pwm_freq(dev, ioread32(&asle->pfmb)); 565 aslc_stat |= asle_set_convertible(dev, ioread32(&asle->iuer));
231 566
232 iowrite32(asle_stat, &asle->aslc); 567 if (aslc_req & ASLC_DOCKING_INDICATOR)
568 aslc_stat |= asle_set_docking(dev, ioread32(&asle->iuer));
569
570 if (aslc_req & ASLC_ISCT_STATE_CHANGE)
571 aslc_stat |= asle_isct_state(dev);
572
573 iowrite32(aslc_stat, &asle->aslc);
574}
575
576void intel_opregion_asle_intr(struct drm_device *dev)
577{
578 struct drm_i915_private *dev_priv = dev->dev_private;
579
580 if (dev_priv->opregion.asle)
581 schedule_work(&dev_priv->opregion.asle_work);
233} 582}
234 583
235#define ACPI_EV_DISPLAY_SWITCH (1<<0) 584#define ACPI_EV_DISPLAY_SWITCH (1<<0)
@@ -432,6 +781,8 @@ void intel_opregion_fini(struct drm_device *dev)
432 if (opregion->asle) 781 if (opregion->asle)
433 iowrite32(ASLE_ARDY_NOT_READY, &opregion->asle->ardy); 782 iowrite32(ASLE_ARDY_NOT_READY, &opregion->asle->ardy);
434 783
784 cancel_work_sync(&dev_priv->opregion.asle_work);
785
435 if (opregion->acpi) { 786 if (opregion->acpi) {
436 iowrite32(0, &opregion->acpi->drdy); 787 iowrite32(0, &opregion->acpi->drdy);
437 788
@@ -446,8 +797,68 @@ void intel_opregion_fini(struct drm_device *dev)
446 opregion->swsci = NULL; 797 opregion->swsci = NULL;
447 opregion->asle = NULL; 798 opregion->asle = NULL;
448 opregion->vbt = NULL; 799 opregion->vbt = NULL;
800 opregion->lid_state = NULL;
449} 801}
450#endif 802
803static void swsci_setup(struct drm_device *dev)
804{
805 struct drm_i915_private *dev_priv = dev->dev_private;
806 struct intel_opregion *opregion = &dev_priv->opregion;
807 bool requested_callbacks = false;
808 u32 tmp;
809
810 /* Sub-function code 0 is okay, let's allow them. */
811 opregion->swsci_gbda_sub_functions = 1;
812 opregion->swsci_sbcb_sub_functions = 1;
813
814 /* We use GBDA to ask for supported GBDA calls. */
815 if (swsci(dev, SWSCI_GBDA_SUPPORTED_CALLS, 0, &tmp) == 0) {
816 /* make the bits match the sub-function codes */
817 tmp <<= 1;
818 opregion->swsci_gbda_sub_functions |= tmp;
819 }
820
821 /*
822 * We also use GBDA to ask for _requested_ SBCB callbacks. The driver
823 * must not call interfaces that are not specifically requested by the
824 * bios.
825 */
826 if (swsci(dev, SWSCI_GBDA_REQUESTED_CALLBACKS, 0, &tmp) == 0) {
827 /* here, the bits already match sub-function codes */
828 opregion->swsci_sbcb_sub_functions |= tmp;
829 requested_callbacks = true;
830 }
831
832 /*
833 * But we use SBCB to ask for _supported_ SBCB calls. This does not mean
834 * the callback is _requested_. But we still can't call interfaces that
835 * are not requested.
836 */
837 if (swsci(dev, SWSCI_SBCB_SUPPORTED_CALLBACKS, 0, &tmp) == 0) {
838 /* make the bits match the sub-function codes */
839 u32 low = tmp & 0x7ff;
840 u32 high = tmp & ~0xfff; /* bit 11 is reserved */
841 tmp = (high << 4) | (low << 1) | 1;
842
843 /* best guess what to do with supported wrt requested */
844 if (requested_callbacks) {
845 u32 req = opregion->swsci_sbcb_sub_functions;
846 if ((req & tmp) != req)
847 DRM_DEBUG_DRIVER("SWSCI BIOS requested (%08x) SBCB callbacks that are not supported (%08x)\n", req, tmp);
848 /* XXX: for now, trust the requested callbacks */
849 /* opregion->swsci_sbcb_sub_functions &= tmp; */
850 } else {
851 opregion->swsci_sbcb_sub_functions |= tmp;
852 }
853 }
854
855 DRM_DEBUG_DRIVER("SWSCI GBDA callbacks %08x, SBCB callbacks %08x\n",
856 opregion->swsci_gbda_sub_functions,
857 opregion->swsci_sbcb_sub_functions);
858}
859#else /* CONFIG_ACPI */
860static inline void swsci_setup(struct drm_device *dev) {}
861#endif /* CONFIG_ACPI */
451 862
452int intel_opregion_setup(struct drm_device *dev) 863int intel_opregion_setup(struct drm_device *dev)
453{ 864{
@@ -465,6 +876,10 @@ int intel_opregion_setup(struct drm_device *dev)
465 return -ENOTSUPP; 876 return -ENOTSUPP;
466 } 877 }
467 878
879#ifdef CONFIG_ACPI
880 INIT_WORK(&opregion->asle_work, asle_work);
881#endif
882
468 base = acpi_os_ioremap(asls, OPREGION_SIZE); 883 base = acpi_os_ioremap(asls, OPREGION_SIZE);
469 if (!base) 884 if (!base)
470 return -ENOMEM; 885 return -ENOMEM;
@@ -490,6 +905,7 @@ int intel_opregion_setup(struct drm_device *dev)
490 if (mboxes & MBOX_SWSCI) { 905 if (mboxes & MBOX_SWSCI) {
491 DRM_DEBUG_DRIVER("SWSCI supported\n"); 906 DRM_DEBUG_DRIVER("SWSCI supported\n");
492 opregion->swsci = base + OPREGION_SWSCI_OFFSET; 907 opregion->swsci = base + OPREGION_SWSCI_OFFSET;
908 swsci_setup(dev);
493 } 909 }
494 if (mboxes & MBOX_ASLE) { 910 if (mboxes & MBOX_ASLE) {
495 DRM_DEBUG_DRIVER("ASLE supported\n"); 911 DRM_DEBUG_DRIVER("ASLE supported\n");
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index ddfd0aefe0c0..a98a990fbab3 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -821,14 +821,11 @@ int intel_overlay_switch_off(struct intel_overlay *overlay)
821static int check_overlay_possible_on_crtc(struct intel_overlay *overlay, 821static int check_overlay_possible_on_crtc(struct intel_overlay *overlay,
822 struct intel_crtc *crtc) 822 struct intel_crtc *crtc)
823{ 823{
824 drm_i915_private_t *dev_priv = overlay->dev->dev_private;
825
826 if (!crtc->active) 824 if (!crtc->active)
827 return -EINVAL; 825 return -EINVAL;
828 826
829 /* can't use the overlay with double wide pipe */ 827 /* can't use the overlay with double wide pipe */
830 if (INTEL_INFO(overlay->dev)->gen < 4 && 828 if (crtc->config.double_wide)
831 (I915_READ(PIPECONF(crtc->pipe)) & (PIPECONF_DOUBLE_WIDE | PIPECONF_ENABLE)) != PIPECONF_ENABLE)
832 return -EINVAL; 829 return -EINVAL;
833 830
834 return 0; 831 return 0;
@@ -1056,7 +1053,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
1056 return ret; 1053 return ret;
1057 } 1054 }
1058 1055
1059 params = kmalloc(sizeof(struct put_image_params), GFP_KERNEL); 1056 params = kmalloc(sizeof(*params), GFP_KERNEL);
1060 if (!params) 1057 if (!params)
1061 return -ENOMEM; 1058 return -ENOMEM;
1062 1059
@@ -1323,7 +1320,7 @@ void intel_setup_overlay(struct drm_device *dev)
1323 if (!HAS_OVERLAY(dev)) 1320 if (!HAS_OVERLAY(dev))
1324 return; 1321 return;
1325 1322
1326 overlay = kzalloc(sizeof(struct intel_overlay), GFP_KERNEL); 1323 overlay = kzalloc(sizeof(*overlay), GFP_KERNEL);
1327 if (!overlay) 1324 if (!overlay)
1328 return; 1325 return;
1329 1326
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 293564a2896a..f161ac02c4f6 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -50,23 +50,22 @@ intel_pch_panel_fitting(struct intel_crtc *intel_crtc,
50 struct intel_crtc_config *pipe_config, 50 struct intel_crtc_config *pipe_config,
51 int fitting_mode) 51 int fitting_mode)
52{ 52{
53 struct drm_display_mode *mode, *adjusted_mode; 53 struct drm_display_mode *adjusted_mode;
54 int x, y, width, height; 54 int x, y, width, height;
55 55
56 mode = &pipe_config->requested_mode;
57 adjusted_mode = &pipe_config->adjusted_mode; 56 adjusted_mode = &pipe_config->adjusted_mode;
58 57
59 x = y = width = height = 0; 58 x = y = width = height = 0;
60 59
61 /* Native modes don't need fitting */ 60 /* Native modes don't need fitting */
62 if (adjusted_mode->hdisplay == mode->hdisplay && 61 if (adjusted_mode->hdisplay == pipe_config->pipe_src_w &&
63 adjusted_mode->vdisplay == mode->vdisplay) 62 adjusted_mode->vdisplay == pipe_config->pipe_src_h)
64 goto done; 63 goto done;
65 64
66 switch (fitting_mode) { 65 switch (fitting_mode) {
67 case DRM_MODE_SCALE_CENTER: 66 case DRM_MODE_SCALE_CENTER:
68 width = mode->hdisplay; 67 width = pipe_config->pipe_src_w;
69 height = mode->vdisplay; 68 height = pipe_config->pipe_src_h;
70 x = (adjusted_mode->hdisplay - width + 1)/2; 69 x = (adjusted_mode->hdisplay - width + 1)/2;
71 y = (adjusted_mode->vdisplay - height + 1)/2; 70 y = (adjusted_mode->vdisplay - height + 1)/2;
72 break; 71 break;
@@ -74,17 +73,19 @@ intel_pch_panel_fitting(struct intel_crtc *intel_crtc,
74 case DRM_MODE_SCALE_ASPECT: 73 case DRM_MODE_SCALE_ASPECT:
75 /* Scale but preserve the aspect ratio */ 74 /* Scale but preserve the aspect ratio */
76 { 75 {
77 u32 scaled_width = adjusted_mode->hdisplay * mode->vdisplay; 76 u32 scaled_width = adjusted_mode->hdisplay
78 u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay; 77 * pipe_config->pipe_src_h;
78 u32 scaled_height = pipe_config->pipe_src_w
79 * adjusted_mode->vdisplay;
79 if (scaled_width > scaled_height) { /* pillar */ 80 if (scaled_width > scaled_height) { /* pillar */
80 width = scaled_height / mode->vdisplay; 81 width = scaled_height / pipe_config->pipe_src_h;
81 if (width & 1) 82 if (width & 1)
82 width++; 83 width++;
83 x = (adjusted_mode->hdisplay - width + 1) / 2; 84 x = (adjusted_mode->hdisplay - width + 1) / 2;
84 y = 0; 85 y = 0;
85 height = adjusted_mode->vdisplay; 86 height = adjusted_mode->vdisplay;
86 } else if (scaled_width < scaled_height) { /* letter */ 87 } else if (scaled_width < scaled_height) { /* letter */
87 height = scaled_width / mode->hdisplay; 88 height = scaled_width / pipe_config->pipe_src_w;
88 if (height & 1) 89 if (height & 1)
89 height++; 90 height++;
90 y = (adjusted_mode->vdisplay - height + 1) / 2; 91 y = (adjusted_mode->vdisplay - height + 1) / 2;
@@ -171,20 +172,96 @@ static inline u32 panel_fitter_scaling(u32 source, u32 target)
171 return (FACTOR * ratio + FACTOR/2) / FACTOR; 172 return (FACTOR * ratio + FACTOR/2) / FACTOR;
172} 173}
173 174
175static void i965_scale_aspect(struct intel_crtc_config *pipe_config,
176 u32 *pfit_control)
177{
178 struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
179 u32 scaled_width = adjusted_mode->hdisplay *
180 pipe_config->pipe_src_h;
181 u32 scaled_height = pipe_config->pipe_src_w *
182 adjusted_mode->vdisplay;
183
184 /* 965+ is easy, it does everything in hw */
185 if (scaled_width > scaled_height)
186 *pfit_control |= PFIT_ENABLE |
187 PFIT_SCALING_PILLAR;
188 else if (scaled_width < scaled_height)
189 *pfit_control |= PFIT_ENABLE |
190 PFIT_SCALING_LETTER;
191 else if (adjusted_mode->hdisplay != pipe_config->pipe_src_w)
192 *pfit_control |= PFIT_ENABLE | PFIT_SCALING_AUTO;
193}
194
195static void i9xx_scale_aspect(struct intel_crtc_config *pipe_config,
196 u32 *pfit_control, u32 *pfit_pgm_ratios,
197 u32 *border)
198{
199 struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
200 u32 scaled_width = adjusted_mode->hdisplay *
201 pipe_config->pipe_src_h;
202 u32 scaled_height = pipe_config->pipe_src_w *
203 adjusted_mode->vdisplay;
204 u32 bits;
205
206 /*
207 * For earlier chips we have to calculate the scaling
208 * ratio by hand and program it into the
209 * PFIT_PGM_RATIO register
210 */
211 if (scaled_width > scaled_height) { /* pillar */
212 centre_horizontally(adjusted_mode,
213 scaled_height /
214 pipe_config->pipe_src_h);
215
216 *border = LVDS_BORDER_ENABLE;
217 if (pipe_config->pipe_src_h != adjusted_mode->vdisplay) {
218 bits = panel_fitter_scaling(pipe_config->pipe_src_h,
219 adjusted_mode->vdisplay);
220
221 *pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT |
222 bits << PFIT_VERT_SCALE_SHIFT);
223 *pfit_control |= (PFIT_ENABLE |
224 VERT_INTERP_BILINEAR |
225 HORIZ_INTERP_BILINEAR);
226 }
227 } else if (scaled_width < scaled_height) { /* letter */
228 centre_vertically(adjusted_mode,
229 scaled_width /
230 pipe_config->pipe_src_w);
231
232 *border = LVDS_BORDER_ENABLE;
233 if (pipe_config->pipe_src_w != adjusted_mode->hdisplay) {
234 bits = panel_fitter_scaling(pipe_config->pipe_src_w,
235 adjusted_mode->hdisplay);
236
237 *pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT |
238 bits << PFIT_VERT_SCALE_SHIFT);
239 *pfit_control |= (PFIT_ENABLE |
240 VERT_INTERP_BILINEAR |
241 HORIZ_INTERP_BILINEAR);
242 }
243 } else {
244 /* Aspects match, Let hw scale both directions */
245 *pfit_control |= (PFIT_ENABLE |
246 VERT_AUTO_SCALE | HORIZ_AUTO_SCALE |
247 VERT_INTERP_BILINEAR |
248 HORIZ_INTERP_BILINEAR);
249 }
250}
251
174void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc, 252void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc,
175 struct intel_crtc_config *pipe_config, 253 struct intel_crtc_config *pipe_config,
176 int fitting_mode) 254 int fitting_mode)
177{ 255{
178 struct drm_device *dev = intel_crtc->base.dev; 256 struct drm_device *dev = intel_crtc->base.dev;
179 u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0; 257 u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0;
180 struct drm_display_mode *mode, *adjusted_mode; 258 struct drm_display_mode *adjusted_mode;
181 259
182 mode = &pipe_config->requested_mode;
183 adjusted_mode = &pipe_config->adjusted_mode; 260 adjusted_mode = &pipe_config->adjusted_mode;
184 261
185 /* Native modes don't need fitting */ 262 /* Native modes don't need fitting */
186 if (adjusted_mode->hdisplay == mode->hdisplay && 263 if (adjusted_mode->hdisplay == pipe_config->pipe_src_w &&
187 adjusted_mode->vdisplay == mode->vdisplay) 264 adjusted_mode->vdisplay == pipe_config->pipe_src_h)
188 goto out; 265 goto out;
189 266
190 switch (fitting_mode) { 267 switch (fitting_mode) {
@@ -193,81 +270,25 @@ void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc,
193 * For centered modes, we have to calculate border widths & 270 * For centered modes, we have to calculate border widths &
194 * heights and modify the values programmed into the CRTC. 271 * heights and modify the values programmed into the CRTC.
195 */ 272 */
196 centre_horizontally(adjusted_mode, mode->hdisplay); 273 centre_horizontally(adjusted_mode, pipe_config->pipe_src_w);
197 centre_vertically(adjusted_mode, mode->vdisplay); 274 centre_vertically(adjusted_mode, pipe_config->pipe_src_h);
198 border = LVDS_BORDER_ENABLE; 275 border = LVDS_BORDER_ENABLE;
199 break; 276 break;
200 case DRM_MODE_SCALE_ASPECT: 277 case DRM_MODE_SCALE_ASPECT:
201 /* Scale but preserve the aspect ratio */ 278 /* Scale but preserve the aspect ratio */
202 if (INTEL_INFO(dev)->gen >= 4) { 279 if (INTEL_INFO(dev)->gen >= 4)
203 u32 scaled_width = adjusted_mode->hdisplay * 280 i965_scale_aspect(pipe_config, &pfit_control);
204 mode->vdisplay; 281 else
205 u32 scaled_height = mode->hdisplay * 282 i9xx_scale_aspect(pipe_config, &pfit_control,
206 adjusted_mode->vdisplay; 283 &pfit_pgm_ratios, &border);
207
208 /* 965+ is easy, it does everything in hw */
209 if (scaled_width > scaled_height)
210 pfit_control |= PFIT_ENABLE |
211 PFIT_SCALING_PILLAR;
212 else if (scaled_width < scaled_height)
213 pfit_control |= PFIT_ENABLE |
214 PFIT_SCALING_LETTER;
215 else if (adjusted_mode->hdisplay != mode->hdisplay)
216 pfit_control |= PFIT_ENABLE | PFIT_SCALING_AUTO;
217 } else {
218 u32 scaled_width = adjusted_mode->hdisplay *
219 mode->vdisplay;
220 u32 scaled_height = mode->hdisplay *
221 adjusted_mode->vdisplay;
222 /*
223 * For earlier chips we have to calculate the scaling
224 * ratio by hand and program it into the
225 * PFIT_PGM_RATIO register
226 */
227 if (scaled_width > scaled_height) { /* pillar */
228 centre_horizontally(adjusted_mode,
229 scaled_height /
230 mode->vdisplay);
231
232 border = LVDS_BORDER_ENABLE;
233 if (mode->vdisplay != adjusted_mode->vdisplay) {
234 u32 bits = panel_fitter_scaling(mode->vdisplay, adjusted_mode->vdisplay);
235 pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT |
236 bits << PFIT_VERT_SCALE_SHIFT);
237 pfit_control |= (PFIT_ENABLE |
238 VERT_INTERP_BILINEAR |
239 HORIZ_INTERP_BILINEAR);
240 }
241 } else if (scaled_width < scaled_height) { /* letter */
242 centre_vertically(adjusted_mode,
243 scaled_width /
244 mode->hdisplay);
245
246 border = LVDS_BORDER_ENABLE;
247 if (mode->hdisplay != adjusted_mode->hdisplay) {
248 u32 bits = panel_fitter_scaling(mode->hdisplay, adjusted_mode->hdisplay);
249 pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT |
250 bits << PFIT_VERT_SCALE_SHIFT);
251 pfit_control |= (PFIT_ENABLE |
252 VERT_INTERP_BILINEAR |
253 HORIZ_INTERP_BILINEAR);
254 }
255 } else {
256 /* Aspects match, Let hw scale both directions */
257 pfit_control |= (PFIT_ENABLE |
258 VERT_AUTO_SCALE | HORIZ_AUTO_SCALE |
259 VERT_INTERP_BILINEAR |
260 HORIZ_INTERP_BILINEAR);
261 }
262 }
263 break; 284 break;
264 case DRM_MODE_SCALE_FULLSCREEN: 285 case DRM_MODE_SCALE_FULLSCREEN:
265 /* 286 /*
266 * Full scaling, even if it changes the aspect ratio. 287 * Full scaling, even if it changes the aspect ratio.
267 * Fortunately this is all done for us in hw. 288 * Fortunately this is all done for us in hw.
268 */ 289 */
269 if (mode->vdisplay != adjusted_mode->vdisplay || 290 if (pipe_config->pipe_src_h != adjusted_mode->vdisplay ||
270 mode->hdisplay != adjusted_mode->hdisplay) { 291 pipe_config->pipe_src_w != adjusted_mode->hdisplay) {
271 pfit_control |= PFIT_ENABLE; 292 pfit_control |= PFIT_ENABLE;
272 if (INTEL_INFO(dev)->gen >= 4) 293 if (INTEL_INFO(dev)->gen >= 4)
273 pfit_control |= PFIT_SCALING_AUTO; 294 pfit_control |= PFIT_SCALING_AUTO;
@@ -308,7 +329,7 @@ static int is_backlight_combination_mode(struct drm_device *dev)
308{ 329{
309 struct drm_i915_private *dev_priv = dev->dev_private; 330 struct drm_i915_private *dev_priv = dev->dev_private;
310 331
311 if (INTEL_INFO(dev)->gen >= 4) 332 if (IS_GEN4(dev))
312 return I915_READ(BLC_PWM_CTL2) & BLM_COMBINATION_MODE; 333 return I915_READ(BLC_PWM_CTL2) & BLM_COMBINATION_MODE;
313 334
314 if (IS_GEN2(dev)) 335 if (IS_GEN2(dev))
@@ -320,7 +341,7 @@ static int is_backlight_combination_mode(struct drm_device *dev)
320/* XXX: query mode clock or hardware clock and program max PWM appropriately 341/* XXX: query mode clock or hardware clock and program max PWM appropriately
321 * when it's 0. 342 * when it's 0.
322 */ 343 */
323static u32 i915_read_blc_pwm_ctl(struct drm_device *dev) 344static u32 i915_read_blc_pwm_ctl(struct drm_device *dev, enum pipe pipe)
324{ 345{
325 struct drm_i915_private *dev_priv = dev->dev_private; 346 struct drm_i915_private *dev_priv = dev->dev_private;
326 u32 val; 347 u32 val;
@@ -337,6 +358,21 @@ static u32 i915_read_blc_pwm_ctl(struct drm_device *dev)
337 val = dev_priv->regfile.saveBLC_PWM_CTL2; 358 val = dev_priv->regfile.saveBLC_PWM_CTL2;
338 I915_WRITE(BLC_PWM_PCH_CTL2, val); 359 I915_WRITE(BLC_PWM_PCH_CTL2, val);
339 } 360 }
361 } else if (IS_VALLEYVIEW(dev)) {
362 val = I915_READ(VLV_BLC_PWM_CTL(pipe));
363 if (dev_priv->regfile.saveBLC_PWM_CTL == 0) {
364 dev_priv->regfile.saveBLC_PWM_CTL = val;
365 dev_priv->regfile.saveBLC_PWM_CTL2 =
366 I915_READ(VLV_BLC_PWM_CTL2(pipe));
367 } else if (val == 0) {
368 val = dev_priv->regfile.saveBLC_PWM_CTL;
369 I915_WRITE(VLV_BLC_PWM_CTL(pipe), val);
370 I915_WRITE(VLV_BLC_PWM_CTL2(pipe),
371 dev_priv->regfile.saveBLC_PWM_CTL2);
372 }
373
374 if (!val)
375 val = 0x0f42ffff;
340 } else { 376 } else {
341 val = I915_READ(BLC_PWM_CTL); 377 val = I915_READ(BLC_PWM_CTL);
342 if (dev_priv->regfile.saveBLC_PWM_CTL == 0) { 378 if (dev_priv->regfile.saveBLC_PWM_CTL == 0) {
@@ -356,11 +392,12 @@ static u32 i915_read_blc_pwm_ctl(struct drm_device *dev)
356 return val; 392 return val;
357} 393}
358 394
359static u32 intel_panel_get_max_backlight(struct drm_device *dev) 395static u32 intel_panel_get_max_backlight(struct drm_device *dev,
396 enum pipe pipe)
360{ 397{
361 u32 max; 398 u32 max;
362 399
363 max = i915_read_blc_pwm_ctl(dev); 400 max = i915_read_blc_pwm_ctl(dev, pipe);
364 401
365 if (HAS_PCH_SPLIT(dev)) { 402 if (HAS_PCH_SPLIT(dev)) {
366 max >>= 16; 403 max >>= 16;
@@ -386,7 +423,8 @@ MODULE_PARM_DESC(invert_brightness, "Invert backlight brightness "
386 "to dri-devel@lists.freedesktop.org, if your machine needs it. " 423 "to dri-devel@lists.freedesktop.org, if your machine needs it. "
387 "It will then be included in an upcoming module version."); 424 "It will then be included in an upcoming module version.");
388module_param_named(invert_brightness, i915_panel_invert_brightness, int, 0600); 425module_param_named(invert_brightness, i915_panel_invert_brightness, int, 0600);
389static u32 intel_panel_compute_brightness(struct drm_device *dev, u32 val) 426static u32 intel_panel_compute_brightness(struct drm_device *dev,
427 enum pipe pipe, u32 val)
390{ 428{
391 struct drm_i915_private *dev_priv = dev->dev_private; 429 struct drm_i915_private *dev_priv = dev->dev_private;
392 430
@@ -395,7 +433,7 @@ static u32 intel_panel_compute_brightness(struct drm_device *dev, u32 val)
395 433
396 if (i915_panel_invert_brightness > 0 || 434 if (i915_panel_invert_brightness > 0 ||
397 dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS) { 435 dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS) {
398 u32 max = intel_panel_get_max_backlight(dev); 436 u32 max = intel_panel_get_max_backlight(dev, pipe);
399 if (max) 437 if (max)
400 return max - val; 438 return max - val;
401 } 439 }
@@ -403,18 +441,25 @@ static u32 intel_panel_compute_brightness(struct drm_device *dev, u32 val)
403 return val; 441 return val;
404} 442}
405 443
406static u32 intel_panel_get_backlight(struct drm_device *dev) 444static u32 intel_panel_get_backlight(struct drm_device *dev,
445 enum pipe pipe)
407{ 446{
408 struct drm_i915_private *dev_priv = dev->dev_private; 447 struct drm_i915_private *dev_priv = dev->dev_private;
409 u32 val; 448 u32 val;
410 unsigned long flags; 449 unsigned long flags;
450 int reg;
411 451
412 spin_lock_irqsave(&dev_priv->backlight.lock, flags); 452 spin_lock_irqsave(&dev_priv->backlight.lock, flags);
413 453
414 if (HAS_PCH_SPLIT(dev)) { 454 if (HAS_PCH_SPLIT(dev)) {
415 val = I915_READ(BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK; 455 val = I915_READ(BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
416 } else { 456 } else {
417 val = I915_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK; 457 if (IS_VALLEYVIEW(dev))
458 reg = VLV_BLC_PWM_CTL(pipe);
459 else
460 reg = BLC_PWM_CTL;
461
462 val = I915_READ(reg) & BACKLIGHT_DUTY_CYCLE_MASK;
418 if (INTEL_INFO(dev)->gen < 4) 463 if (INTEL_INFO(dev)->gen < 4)
419 val >>= 1; 464 val >>= 1;
420 465
@@ -426,7 +471,7 @@ static u32 intel_panel_get_backlight(struct drm_device *dev)
426 } 471 }
427 } 472 }
428 473
429 val = intel_panel_compute_brightness(dev, val); 474 val = intel_panel_compute_brightness(dev, pipe, val);
430 475
431 spin_unlock_irqrestore(&dev_priv->backlight.lock, flags); 476 spin_unlock_irqrestore(&dev_priv->backlight.lock, flags);
432 477
@@ -441,19 +486,21 @@ static void intel_pch_panel_set_backlight(struct drm_device *dev, u32 level)
441 I915_WRITE(BLC_PWM_CPU_CTL, val | level); 486 I915_WRITE(BLC_PWM_CPU_CTL, val | level);
442} 487}
443 488
444static void intel_panel_actually_set_backlight(struct drm_device *dev, u32 level) 489static void intel_panel_actually_set_backlight(struct drm_device *dev,
490 enum pipe pipe, u32 level)
445{ 491{
446 struct drm_i915_private *dev_priv = dev->dev_private; 492 struct drm_i915_private *dev_priv = dev->dev_private;
447 u32 tmp; 493 u32 tmp;
494 int reg;
448 495
449 DRM_DEBUG_DRIVER("set backlight PWM = %d\n", level); 496 DRM_DEBUG_DRIVER("set backlight PWM = %d\n", level);
450 level = intel_panel_compute_brightness(dev, level); 497 level = intel_panel_compute_brightness(dev, pipe, level);
451 498
452 if (HAS_PCH_SPLIT(dev)) 499 if (HAS_PCH_SPLIT(dev))
453 return intel_pch_panel_set_backlight(dev, level); 500 return intel_pch_panel_set_backlight(dev, level);
454 501
455 if (is_backlight_combination_mode(dev)) { 502 if (is_backlight_combination_mode(dev)) {
456 u32 max = intel_panel_get_max_backlight(dev); 503 u32 max = intel_panel_get_max_backlight(dev, pipe);
457 u8 lbpc; 504 u8 lbpc;
458 505
459 /* we're screwed, but keep behaviour backwards compatible */ 506 /* we're screwed, but keep behaviour backwards compatible */
@@ -465,23 +512,34 @@ static void intel_panel_actually_set_backlight(struct drm_device *dev, u32 level
465 pci_write_config_byte(dev->pdev, PCI_LBPC, lbpc); 512 pci_write_config_byte(dev->pdev, PCI_LBPC, lbpc);
466 } 513 }
467 514
468 tmp = I915_READ(BLC_PWM_CTL); 515 if (IS_VALLEYVIEW(dev))
516 reg = VLV_BLC_PWM_CTL(pipe);
517 else
518 reg = BLC_PWM_CTL;
519
520 tmp = I915_READ(reg);
469 if (INTEL_INFO(dev)->gen < 4) 521 if (INTEL_INFO(dev)->gen < 4)
470 level <<= 1; 522 level <<= 1;
471 tmp &= ~BACKLIGHT_DUTY_CYCLE_MASK; 523 tmp &= ~BACKLIGHT_DUTY_CYCLE_MASK;
472 I915_WRITE(BLC_PWM_CTL, tmp | level); 524 I915_WRITE(reg, tmp | level);
473} 525}
474 526
475/* set backlight brightness to level in range [0..max] */ 527/* set backlight brightness to level in range [0..max] */
476void intel_panel_set_backlight(struct drm_device *dev, u32 level, u32 max) 528void intel_panel_set_backlight(struct intel_connector *connector, u32 level,
529 u32 max)
477{ 530{
531 struct drm_device *dev = connector->base.dev;
478 struct drm_i915_private *dev_priv = dev->dev_private; 532 struct drm_i915_private *dev_priv = dev->dev_private;
533 enum pipe pipe = intel_get_pipe_from_connector(connector);
479 u32 freq; 534 u32 freq;
480 unsigned long flags; 535 unsigned long flags;
481 536
537 if (pipe == INVALID_PIPE)
538 return;
539
482 spin_lock_irqsave(&dev_priv->backlight.lock, flags); 540 spin_lock_irqsave(&dev_priv->backlight.lock, flags);
483 541
484 freq = intel_panel_get_max_backlight(dev); 542 freq = intel_panel_get_max_backlight(dev, pipe);
485 if (!freq) { 543 if (!freq) {
486 /* we are screwed, bail out */ 544 /* we are screwed, bail out */
487 goto out; 545 goto out;
@@ -498,16 +556,21 @@ void intel_panel_set_backlight(struct drm_device *dev, u32 level, u32 max)
498 dev_priv->backlight.device->props.brightness = level; 556 dev_priv->backlight.device->props.brightness = level;
499 557
500 if (dev_priv->backlight.enabled) 558 if (dev_priv->backlight.enabled)
501 intel_panel_actually_set_backlight(dev, level); 559 intel_panel_actually_set_backlight(dev, pipe, level);
502out: 560out:
503 spin_unlock_irqrestore(&dev_priv->backlight.lock, flags); 561 spin_unlock_irqrestore(&dev_priv->backlight.lock, flags);
504} 562}
505 563
506void intel_panel_disable_backlight(struct drm_device *dev) 564void intel_panel_disable_backlight(struct intel_connector *connector)
507{ 565{
566 struct drm_device *dev = connector->base.dev;
508 struct drm_i915_private *dev_priv = dev->dev_private; 567 struct drm_i915_private *dev_priv = dev->dev_private;
568 enum pipe pipe = intel_get_pipe_from_connector(connector);
509 unsigned long flags; 569 unsigned long flags;
510 570
571 if (pipe == INVALID_PIPE)
572 return;
573
511 /* 574 /*
512 * Do not disable backlight on the vgaswitcheroo path. When switching 575 * Do not disable backlight on the vgaswitcheroo path. When switching
513 * away from i915, the other client may depend on i915 to handle the 576 * away from i915, the other client may depend on i915 to handle the
@@ -522,12 +585,17 @@ void intel_panel_disable_backlight(struct drm_device *dev)
522 spin_lock_irqsave(&dev_priv->backlight.lock, flags); 585 spin_lock_irqsave(&dev_priv->backlight.lock, flags);
523 586
524 dev_priv->backlight.enabled = false; 587 dev_priv->backlight.enabled = false;
525 intel_panel_actually_set_backlight(dev, 0); 588 intel_panel_actually_set_backlight(dev, pipe, 0);
526 589
527 if (INTEL_INFO(dev)->gen >= 4) { 590 if (INTEL_INFO(dev)->gen >= 4) {
528 uint32_t reg, tmp; 591 uint32_t reg, tmp;
529 592
530 reg = HAS_PCH_SPLIT(dev) ? BLC_PWM_CPU_CTL2 : BLC_PWM_CTL2; 593 if (HAS_PCH_SPLIT(dev))
594 reg = BLC_PWM_CPU_CTL2;
595 else if (IS_VALLEYVIEW(dev))
596 reg = VLV_BLC_PWM_CTL2(pipe);
597 else
598 reg = BLC_PWM_CTL2;
531 599
532 I915_WRITE(reg, I915_READ(reg) & ~BLM_PWM_ENABLE); 600 I915_WRITE(reg, I915_READ(reg) & ~BLM_PWM_ENABLE);
533 601
@@ -541,18 +609,25 @@ void intel_panel_disable_backlight(struct drm_device *dev)
541 spin_unlock_irqrestore(&dev_priv->backlight.lock, flags); 609 spin_unlock_irqrestore(&dev_priv->backlight.lock, flags);
542} 610}
543 611
544void intel_panel_enable_backlight(struct drm_device *dev, 612void intel_panel_enable_backlight(struct intel_connector *connector)
545 enum pipe pipe)
546{ 613{
614 struct drm_device *dev = connector->base.dev;
547 struct drm_i915_private *dev_priv = dev->dev_private; 615 struct drm_i915_private *dev_priv = dev->dev_private;
616 enum pipe pipe = intel_get_pipe_from_connector(connector);
548 enum transcoder cpu_transcoder = 617 enum transcoder cpu_transcoder =
549 intel_pipe_to_cpu_transcoder(dev_priv, pipe); 618 intel_pipe_to_cpu_transcoder(dev_priv, pipe);
550 unsigned long flags; 619 unsigned long flags;
551 620
621 if (pipe == INVALID_PIPE)
622 return;
623
624 DRM_DEBUG_KMS("pipe %c\n", pipe_name(pipe));
625
552 spin_lock_irqsave(&dev_priv->backlight.lock, flags); 626 spin_lock_irqsave(&dev_priv->backlight.lock, flags);
553 627
554 if (dev_priv->backlight.level == 0) { 628 if (dev_priv->backlight.level == 0) {
555 dev_priv->backlight.level = intel_panel_get_max_backlight(dev); 629 dev_priv->backlight.level = intel_panel_get_max_backlight(dev,
630 pipe);
556 if (dev_priv->backlight.device) 631 if (dev_priv->backlight.device)
557 dev_priv->backlight.device->props.brightness = 632 dev_priv->backlight.device->props.brightness =
558 dev_priv->backlight.level; 633 dev_priv->backlight.level;
@@ -561,8 +636,12 @@ void intel_panel_enable_backlight(struct drm_device *dev,
561 if (INTEL_INFO(dev)->gen >= 4) { 636 if (INTEL_INFO(dev)->gen >= 4) {
562 uint32_t reg, tmp; 637 uint32_t reg, tmp;
563 638
564 reg = HAS_PCH_SPLIT(dev) ? BLC_PWM_CPU_CTL2 : BLC_PWM_CTL2; 639 if (HAS_PCH_SPLIT(dev))
565 640 reg = BLC_PWM_CPU_CTL2;
641 else if (IS_VALLEYVIEW(dev))
642 reg = VLV_BLC_PWM_CTL2(pipe);
643 else
644 reg = BLC_PWM_CTL2;
566 645
567 tmp = I915_READ(reg); 646 tmp = I915_READ(reg);
568 647
@@ -602,16 +681,41 @@ set_level:
602 * registers are set. 681 * registers are set.
603 */ 682 */
604 dev_priv->backlight.enabled = true; 683 dev_priv->backlight.enabled = true;
605 intel_panel_actually_set_backlight(dev, dev_priv->backlight.level); 684 intel_panel_actually_set_backlight(dev, pipe,
685 dev_priv->backlight.level);
606 686
607 spin_unlock_irqrestore(&dev_priv->backlight.lock, flags); 687 spin_unlock_irqrestore(&dev_priv->backlight.lock, flags);
608} 688}
609 689
690/* FIXME: use VBT vals to init PWM_CTL and PWM_CTL2 correctly */
691static void intel_panel_init_backlight_regs(struct drm_device *dev)
692{
693 struct drm_i915_private *dev_priv = dev->dev_private;
694
695 if (IS_VALLEYVIEW(dev)) {
696 enum pipe pipe;
697
698 for_each_pipe(pipe) {
699 u32 cur_val = I915_READ(VLV_BLC_PWM_CTL(pipe));
700
701 /* Skip if the modulation freq is already set */
702 if (cur_val & ~BACKLIGHT_DUTY_CYCLE_MASK)
703 continue;
704
705 cur_val &= BACKLIGHT_DUTY_CYCLE_MASK;
706 I915_WRITE(VLV_BLC_PWM_CTL(pipe), (0xf42 << 16) |
707 cur_val);
708 }
709 }
710}
711
610static void intel_panel_init_backlight(struct drm_device *dev) 712static void intel_panel_init_backlight(struct drm_device *dev)
611{ 713{
612 struct drm_i915_private *dev_priv = dev->dev_private; 714 struct drm_i915_private *dev_priv = dev->dev_private;
613 715
614 dev_priv->backlight.level = intel_panel_get_backlight(dev); 716 intel_panel_init_backlight_regs(dev);
717
718 dev_priv->backlight.level = intel_panel_get_backlight(dev, 0);
615 dev_priv->backlight.enabled = dev_priv->backlight.level != 0; 719 dev_priv->backlight.enabled = dev_priv->backlight.level != 0;
616} 720}
617 721
@@ -637,19 +741,34 @@ intel_panel_detect(struct drm_device *dev)
637 } 741 }
638} 742}
639 743
640#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE 744#if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE)
641static int intel_panel_update_status(struct backlight_device *bd) 745static int intel_panel_update_status(struct backlight_device *bd)
642{ 746{
643 struct drm_device *dev = bl_get_data(bd); 747 struct intel_connector *connector = bl_get_data(bd);
644 intel_panel_set_backlight(dev, bd->props.brightness, 748 struct drm_device *dev = connector->base.dev;
749
750 mutex_lock(&dev->mode_config.mutex);
751 DRM_DEBUG_KMS("updating intel_backlight, brightness=%d/%d\n",
752 bd->props.brightness, bd->props.max_brightness);
753 intel_panel_set_backlight(connector, bd->props.brightness,
645 bd->props.max_brightness); 754 bd->props.max_brightness);
755 mutex_unlock(&dev->mode_config.mutex);
646 return 0; 756 return 0;
647} 757}
648 758
649static int intel_panel_get_brightness(struct backlight_device *bd) 759static int intel_panel_get_brightness(struct backlight_device *bd)
650{ 760{
651 struct drm_device *dev = bl_get_data(bd); 761 struct intel_connector *connector = bl_get_data(bd);
652 return intel_panel_get_backlight(dev); 762 struct drm_device *dev = connector->base.dev;
763 enum pipe pipe;
764
765 mutex_lock(&dev->mode_config.mutex);
766 pipe = intel_get_pipe_from_connector(connector);
767 mutex_unlock(&dev->mode_config.mutex);
768 if (pipe == INVALID_PIPE)
769 return 0;
770
771 return intel_panel_get_backlight(connector->base.dev, pipe);
653} 772}
654 773
655static const struct backlight_ops intel_panel_bl_ops = { 774static const struct backlight_ops intel_panel_bl_ops = {
@@ -674,7 +793,7 @@ int intel_panel_setup_backlight(struct drm_connector *connector)
674 props.brightness = dev_priv->backlight.level; 793 props.brightness = dev_priv->backlight.level;
675 794
676 spin_lock_irqsave(&dev_priv->backlight.lock, flags); 795 spin_lock_irqsave(&dev_priv->backlight.lock, flags);
677 props.max_brightness = intel_panel_get_max_backlight(dev); 796 props.max_brightness = intel_panel_get_max_backlight(dev, 0);
678 spin_unlock_irqrestore(&dev_priv->backlight.lock, flags); 797 spin_unlock_irqrestore(&dev_priv->backlight.lock, flags);
679 798
680 if (props.max_brightness == 0) { 799 if (props.max_brightness == 0) {
@@ -683,7 +802,8 @@ int intel_panel_setup_backlight(struct drm_connector *connector)
683 } 802 }
684 dev_priv->backlight.device = 803 dev_priv->backlight.device =
685 backlight_device_register("intel_backlight", 804 backlight_device_register("intel_backlight",
686 &connector->kdev, dev, 805 connector->kdev,
806 to_intel_connector(connector),
687 &intel_panel_bl_ops, &props); 807 &intel_panel_bl_ops, &props);
688 808
689 if (IS_ERR(dev_priv->backlight.device)) { 809 if (IS_ERR(dev_priv->backlight.device)) {
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 26c2ea3e985c..0a07d7c9cafc 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -32,6 +32,27 @@
32#include <linux/module.h> 32#include <linux/module.h>
33#include <drm/i915_powerwell.h> 33#include <drm/i915_powerwell.h>
34 34
35/**
36 * RC6 is a special power stage which allows the GPU to enter an very
37 * low-voltage mode when idle, using down to 0V while at this stage. This
38 * stage is entered automatically when the GPU is idle when RC6 support is
39 * enabled, and as soon as new workload arises GPU wakes up automatically as well.
40 *
41 * There are different RC6 modes available in Intel GPU, which differentiate
42 * among each other with the latency required to enter and leave RC6 and
43 * voltage consumed by the GPU in different states.
44 *
45 * The combination of the following flags define which states GPU is allowed
46 * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and
47 * RC6pp is deepest RC6. Their support by hardware varies according to the
48 * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one
49 * which brings the most power savings; deeper states save more power, but
50 * require higher latency to switch to and wake up.
51 */
52#define INTEL_RC6_ENABLE (1<<0)
53#define INTEL_RC6p_ENABLE (1<<1)
54#define INTEL_RC6pp_ENABLE (1<<2)
55
35/* FBC, or Frame Buffer Compression, is a technique employed to compress the 56/* FBC, or Frame Buffer Compression, is a technique employed to compress the
36 * framebuffer contents in-memory, aiming at reducing the required bandwidth 57 * framebuffer contents in-memory, aiming at reducing the required bandwidth
37 * during in-memory transfers and, therefore, reduce the power packet. 58 * during in-memory transfers and, therefore, reduce the power packet.
@@ -43,14 +64,6 @@
43 * i915.i915_enable_fbc parameter 64 * i915.i915_enable_fbc parameter
44 */ 65 */
45 66
46static bool intel_crtc_active(struct drm_crtc *crtc)
47{
48 /* Be paranoid as we can arrive here with only partial
49 * state retrieved from the hardware during setup.
50 */
51 return to_intel_crtc(crtc)->active && crtc->fb && crtc->mode.clock;
52}
53
54static void i8xx_disable_fbc(struct drm_device *dev) 67static void i8xx_disable_fbc(struct drm_device *dev)
55{ 68{
56 struct drm_i915_private *dev_priv = dev->dev_private; 69 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -241,18 +254,6 @@ static void ironlake_disable_fbc(struct drm_device *dev)
241 dpfc_ctl &= ~DPFC_CTL_EN; 254 dpfc_ctl &= ~DPFC_CTL_EN;
242 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl); 255 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
243 256
244 if (IS_IVYBRIDGE(dev))
245 /* WaFbcDisableDpfcClockGating:ivb */
246 I915_WRITE(ILK_DSPCLK_GATE_D,
247 I915_READ(ILK_DSPCLK_GATE_D) &
248 ~ILK_DPFCUNIT_CLOCK_GATE_DISABLE);
249
250 if (IS_HASWELL(dev))
251 /* WaFbcDisableDpfcClockGating:hsw */
252 I915_WRITE(HSW_CLKGATE_DISABLE_PART_1,
253 I915_READ(HSW_CLKGATE_DISABLE_PART_1) &
254 ~HSW_DPFC_GATING_DISABLE);
255
256 DRM_DEBUG_KMS("disabled FBC\n"); 257 DRM_DEBUG_KMS("disabled FBC\n");
257 } 258 }
258} 259}
@@ -282,18 +283,10 @@ static void gen7_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
282 if (IS_IVYBRIDGE(dev)) { 283 if (IS_IVYBRIDGE(dev)) {
283 /* WaFbcAsynchFlipDisableFbcQueue:ivb */ 284 /* WaFbcAsynchFlipDisableFbcQueue:ivb */
284 I915_WRITE(ILK_DISPLAY_CHICKEN1, ILK_FBCQ_DIS); 285 I915_WRITE(ILK_DISPLAY_CHICKEN1, ILK_FBCQ_DIS);
285 /* WaFbcDisableDpfcClockGating:ivb */
286 I915_WRITE(ILK_DSPCLK_GATE_D,
287 I915_READ(ILK_DSPCLK_GATE_D) |
288 ILK_DPFCUNIT_CLOCK_GATE_DISABLE);
289 } else { 286 } else {
290 /* WaFbcAsynchFlipDisableFbcQueue:hsw */ 287 /* WaFbcAsynchFlipDisableFbcQueue:hsw */
291 I915_WRITE(HSW_PIPE_SLICE_CHICKEN_1(intel_crtc->pipe), 288 I915_WRITE(HSW_PIPE_SLICE_CHICKEN_1(intel_crtc->pipe),
292 HSW_BYPASS_FBC_QUEUE); 289 HSW_BYPASS_FBC_QUEUE);
293 /* WaFbcDisableDpfcClockGating:hsw */
294 I915_WRITE(HSW_CLKGATE_DISABLE_PART_1,
295 I915_READ(HSW_CLKGATE_DISABLE_PART_1) |
296 HSW_DPFC_GATING_DISABLE);
297 } 290 }
298 291
299 I915_WRITE(SNB_DPFC_CTL_SA, 292 I915_WRITE(SNB_DPFC_CTL_SA,
@@ -378,7 +371,7 @@ static void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
378 371
379 intel_cancel_fbc_work(dev_priv); 372 intel_cancel_fbc_work(dev_priv);
380 373
381 work = kzalloc(sizeof *work, GFP_KERNEL); 374 work = kzalloc(sizeof(*work), GFP_KERNEL);
382 if (work == NULL) { 375 if (work == NULL) {
383 DRM_ERROR("Failed to allocate FBC work structure\n"); 376 DRM_ERROR("Failed to allocate FBC work structure\n");
384 dev_priv->display.enable_fbc(crtc, interval); 377 dev_priv->display.enable_fbc(crtc, interval);
@@ -458,7 +451,8 @@ void intel_update_fbc(struct drm_device *dev)
458 struct drm_framebuffer *fb; 451 struct drm_framebuffer *fb;
459 struct intel_framebuffer *intel_fb; 452 struct intel_framebuffer *intel_fb;
460 struct drm_i915_gem_object *obj; 453 struct drm_i915_gem_object *obj;
461 unsigned int max_hdisplay, max_vdisplay; 454 const struct drm_display_mode *adjusted_mode;
455 unsigned int max_width, max_height;
462 456
463 if (!I915_HAS_FBC(dev)) { 457 if (!I915_HAS_FBC(dev)) {
464 set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED); 458 set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED);
@@ -482,7 +476,7 @@ void intel_update_fbc(struct drm_device *dev)
482 */ 476 */
483 list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) { 477 list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) {
484 if (intel_crtc_active(tmp_crtc) && 478 if (intel_crtc_active(tmp_crtc) &&
485 !to_intel_crtc(tmp_crtc)->primary_disabled) { 479 to_intel_crtc(tmp_crtc)->primary_enabled) {
486 if (crtc) { 480 if (crtc) {
487 if (set_no_fbc_reason(dev_priv, FBC_MULTIPLE_PIPES)) 481 if (set_no_fbc_reason(dev_priv, FBC_MULTIPLE_PIPES))
488 DRM_DEBUG_KMS("more than one pipe active, disabling compression\n"); 482 DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
@@ -502,6 +496,7 @@ void intel_update_fbc(struct drm_device *dev)
502 fb = crtc->fb; 496 fb = crtc->fb;
503 intel_fb = to_intel_framebuffer(fb); 497 intel_fb = to_intel_framebuffer(fb);
504 obj = intel_fb->obj; 498 obj = intel_fb->obj;
499 adjusted_mode = &intel_crtc->config.adjusted_mode;
505 500
506 if (i915_enable_fbc < 0 && 501 if (i915_enable_fbc < 0 &&
507 INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev)) { 502 INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev)) {
@@ -514,8 +509,8 @@ void intel_update_fbc(struct drm_device *dev)
514 DRM_DEBUG_KMS("fbc disabled per module param\n"); 509 DRM_DEBUG_KMS("fbc disabled per module param\n");
515 goto out_disable; 510 goto out_disable;
516 } 511 }
517 if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) || 512 if ((adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) ||
518 (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) { 513 (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)) {
519 if (set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE)) 514 if (set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE))
520 DRM_DEBUG_KMS("mode incompatible with compression, " 515 DRM_DEBUG_KMS("mode incompatible with compression, "
521 "disabling\n"); 516 "disabling\n");
@@ -523,14 +518,14 @@ void intel_update_fbc(struct drm_device *dev)
523 } 518 }
524 519
525 if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { 520 if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
526 max_hdisplay = 4096; 521 max_width = 4096;
527 max_vdisplay = 2048; 522 max_height = 2048;
528 } else { 523 } else {
529 max_hdisplay = 2048; 524 max_width = 2048;
530 max_vdisplay = 1536; 525 max_height = 1536;
531 } 526 }
532 if ((crtc->mode.hdisplay > max_hdisplay) || 527 if (intel_crtc->config.pipe_src_w > max_width ||
533 (crtc->mode.vdisplay > max_vdisplay)) { 528 intel_crtc->config.pipe_src_h > max_height) {
534 if (set_no_fbc_reason(dev_priv, FBC_MODE_TOO_LARGE)) 529 if (set_no_fbc_reason(dev_priv, FBC_MODE_TOO_LARGE))
535 DRM_DEBUG_KMS("mode too large for compression, disabling\n"); 530 DRM_DEBUG_KMS("mode too large for compression, disabling\n");
536 goto out_disable; 531 goto out_disable;
@@ -1087,8 +1082,9 @@ static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
1087 return enabled; 1082 return enabled;
1088} 1083}
1089 1084
1090static void pineview_update_wm(struct drm_device *dev) 1085static void pineview_update_wm(struct drm_crtc *unused_crtc)
1091{ 1086{
1087 struct drm_device *dev = unused_crtc->dev;
1092 struct drm_i915_private *dev_priv = dev->dev_private; 1088 struct drm_i915_private *dev_priv = dev->dev_private;
1093 struct drm_crtc *crtc; 1089 struct drm_crtc *crtc;
1094 const struct cxsr_latency *latency; 1090 const struct cxsr_latency *latency;
@@ -1105,8 +1101,12 @@ static void pineview_update_wm(struct drm_device *dev)
1105 1101
1106 crtc = single_enabled_crtc(dev); 1102 crtc = single_enabled_crtc(dev);
1107 if (crtc) { 1103 if (crtc) {
1108 int clock = crtc->mode.clock; 1104 const struct drm_display_mode *adjusted_mode;
1109 int pixel_size = crtc->fb->bits_per_pixel / 8; 1105 int pixel_size = crtc->fb->bits_per_pixel / 8;
1106 int clock;
1107
1108 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1109 clock = adjusted_mode->crtc_clock;
1110 1110
1111 /* Display SR */ 1111 /* Display SR */
1112 wm = intel_calculate_wm(clock, &pineview_display_wm, 1112 wm = intel_calculate_wm(clock, &pineview_display_wm,
@@ -1166,6 +1166,7 @@ static bool g4x_compute_wm0(struct drm_device *dev,
1166 int *cursor_wm) 1166 int *cursor_wm)
1167{ 1167{
1168 struct drm_crtc *crtc; 1168 struct drm_crtc *crtc;
1169 const struct drm_display_mode *adjusted_mode;
1169 int htotal, hdisplay, clock, pixel_size; 1170 int htotal, hdisplay, clock, pixel_size;
1170 int line_time_us, line_count; 1171 int line_time_us, line_count;
1171 int entries, tlb_miss; 1172 int entries, tlb_miss;
@@ -1177,9 +1178,10 @@ static bool g4x_compute_wm0(struct drm_device *dev,
1177 return false; 1178 return false;
1178 } 1179 }
1179 1180
1180 htotal = crtc->mode.htotal; 1181 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1181 hdisplay = crtc->mode.hdisplay; 1182 clock = adjusted_mode->crtc_clock;
1182 clock = crtc->mode.clock; 1183 htotal = adjusted_mode->htotal;
1184 hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
1183 pixel_size = crtc->fb->bits_per_pixel / 8; 1185 pixel_size = crtc->fb->bits_per_pixel / 8;
1184 1186
1185 /* Use the small buffer method to calculate plane watermark */ 1187 /* Use the small buffer method to calculate plane watermark */
@@ -1250,6 +1252,7 @@ static bool g4x_compute_srwm(struct drm_device *dev,
1250 int *display_wm, int *cursor_wm) 1252 int *display_wm, int *cursor_wm)
1251{ 1253{
1252 struct drm_crtc *crtc; 1254 struct drm_crtc *crtc;
1255 const struct drm_display_mode *adjusted_mode;
1253 int hdisplay, htotal, pixel_size, clock; 1256 int hdisplay, htotal, pixel_size, clock;
1254 unsigned long line_time_us; 1257 unsigned long line_time_us;
1255 int line_count, line_size; 1258 int line_count, line_size;
@@ -1262,9 +1265,10 @@ static bool g4x_compute_srwm(struct drm_device *dev,
1262 } 1265 }
1263 1266
1264 crtc = intel_get_crtc_for_plane(dev, plane); 1267 crtc = intel_get_crtc_for_plane(dev, plane);
1265 hdisplay = crtc->mode.hdisplay; 1268 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1266 htotal = crtc->mode.htotal; 1269 clock = adjusted_mode->crtc_clock;
1267 clock = crtc->mode.clock; 1270 htotal = adjusted_mode->htotal;
1271 hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
1268 pixel_size = crtc->fb->bits_per_pixel / 8; 1272 pixel_size = crtc->fb->bits_per_pixel / 8;
1269 1273
1270 line_time_us = (htotal * 1000) / clock; 1274 line_time_us = (htotal * 1000) / clock;
@@ -1303,7 +1307,7 @@ static bool vlv_compute_drain_latency(struct drm_device *dev,
1303 if (!intel_crtc_active(crtc)) 1307 if (!intel_crtc_active(crtc))
1304 return false; 1308 return false;
1305 1309
1306 clock = crtc->mode.clock; /* VESA DOT Clock */ 1310 clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock;
1307 pixel_size = crtc->fb->bits_per_pixel / 8; /* BPP */ 1311 pixel_size = crtc->fb->bits_per_pixel / 8; /* BPP */
1308 1312
1309 entries = (clock / 1000) * pixel_size; 1313 entries = (clock / 1000) * pixel_size;
@@ -1365,8 +1369,9 @@ static void vlv_update_drain_latency(struct drm_device *dev)
1365 1369
1366#define single_plane_enabled(mask) is_power_of_2(mask) 1370#define single_plane_enabled(mask) is_power_of_2(mask)
1367 1371
1368static void valleyview_update_wm(struct drm_device *dev) 1372static void valleyview_update_wm(struct drm_crtc *crtc)
1369{ 1373{
1374 struct drm_device *dev = crtc->dev;
1370 static const int sr_latency_ns = 12000; 1375 static const int sr_latency_ns = 12000;
1371 struct drm_i915_private *dev_priv = dev->dev_private; 1376 struct drm_i915_private *dev_priv = dev->dev_private;
1372 int planea_wm, planeb_wm, cursora_wm, cursorb_wm; 1377 int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
@@ -1424,8 +1429,9 @@ static void valleyview_update_wm(struct drm_device *dev)
1424 (cursor_sr << DSPFW_CURSOR_SR_SHIFT)); 1429 (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
1425} 1430}
1426 1431
1427static void g4x_update_wm(struct drm_device *dev) 1432static void g4x_update_wm(struct drm_crtc *crtc)
1428{ 1433{
1434 struct drm_device *dev = crtc->dev;
1429 static const int sr_latency_ns = 12000; 1435 static const int sr_latency_ns = 12000;
1430 struct drm_i915_private *dev_priv = dev->dev_private; 1436 struct drm_i915_private *dev_priv = dev->dev_private;
1431 int planea_wm, planeb_wm, cursora_wm, cursorb_wm; 1437 int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
@@ -1476,8 +1482,9 @@ static void g4x_update_wm(struct drm_device *dev)
1476 (cursor_sr << DSPFW_CURSOR_SR_SHIFT)); 1482 (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
1477} 1483}
1478 1484
1479static void i965_update_wm(struct drm_device *dev) 1485static void i965_update_wm(struct drm_crtc *unused_crtc)
1480{ 1486{
1487 struct drm_device *dev = unused_crtc->dev;
1481 struct drm_i915_private *dev_priv = dev->dev_private; 1488 struct drm_i915_private *dev_priv = dev->dev_private;
1482 struct drm_crtc *crtc; 1489 struct drm_crtc *crtc;
1483 int srwm = 1; 1490 int srwm = 1;
@@ -1488,9 +1495,11 @@ static void i965_update_wm(struct drm_device *dev)
1488 if (crtc) { 1495 if (crtc) {
1489 /* self-refresh has much higher latency */ 1496 /* self-refresh has much higher latency */
1490 static const int sr_latency_ns = 12000; 1497 static const int sr_latency_ns = 12000;
1491 int clock = crtc->mode.clock; 1498 const struct drm_display_mode *adjusted_mode =
1492 int htotal = crtc->mode.htotal; 1499 &to_intel_crtc(crtc)->config.adjusted_mode;
1493 int hdisplay = crtc->mode.hdisplay; 1500 int clock = adjusted_mode->crtc_clock;
1501 int htotal = adjusted_mode->htotal;
1502 int hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
1494 int pixel_size = crtc->fb->bits_per_pixel / 8; 1503 int pixel_size = crtc->fb->bits_per_pixel / 8;
1495 unsigned long line_time_us; 1504 unsigned long line_time_us;
1496 int entries; 1505 int entries;
@@ -1541,8 +1550,9 @@ static void i965_update_wm(struct drm_device *dev)
1541 I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT)); 1550 I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
1542} 1551}
1543 1552
1544static void i9xx_update_wm(struct drm_device *dev) 1553static void i9xx_update_wm(struct drm_crtc *unused_crtc)
1545{ 1554{
1555 struct drm_device *dev = unused_crtc->dev;
1546 struct drm_i915_private *dev_priv = dev->dev_private; 1556 struct drm_i915_private *dev_priv = dev->dev_private;
1547 const struct intel_watermark_params *wm_info; 1557 const struct intel_watermark_params *wm_info;
1548 uint32_t fwater_lo; 1558 uint32_t fwater_lo;
@@ -1562,11 +1572,13 @@ static void i9xx_update_wm(struct drm_device *dev)
1562 fifo_size = dev_priv->display.get_fifo_size(dev, 0); 1572 fifo_size = dev_priv->display.get_fifo_size(dev, 0);
1563 crtc = intel_get_crtc_for_plane(dev, 0); 1573 crtc = intel_get_crtc_for_plane(dev, 0);
1564 if (intel_crtc_active(crtc)) { 1574 if (intel_crtc_active(crtc)) {
1575 const struct drm_display_mode *adjusted_mode;
1565 int cpp = crtc->fb->bits_per_pixel / 8; 1576 int cpp = crtc->fb->bits_per_pixel / 8;
1566 if (IS_GEN2(dev)) 1577 if (IS_GEN2(dev))
1567 cpp = 4; 1578 cpp = 4;
1568 1579
1569 planea_wm = intel_calculate_wm(crtc->mode.clock, 1580 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1581 planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
1570 wm_info, fifo_size, cpp, 1582 wm_info, fifo_size, cpp,
1571 latency_ns); 1583 latency_ns);
1572 enabled = crtc; 1584 enabled = crtc;
@@ -1576,11 +1588,13 @@ static void i9xx_update_wm(struct drm_device *dev)
1576 fifo_size = dev_priv->display.get_fifo_size(dev, 1); 1588 fifo_size = dev_priv->display.get_fifo_size(dev, 1);
1577 crtc = intel_get_crtc_for_plane(dev, 1); 1589 crtc = intel_get_crtc_for_plane(dev, 1);
1578 if (intel_crtc_active(crtc)) { 1590 if (intel_crtc_active(crtc)) {
1591 const struct drm_display_mode *adjusted_mode;
1579 int cpp = crtc->fb->bits_per_pixel / 8; 1592 int cpp = crtc->fb->bits_per_pixel / 8;
1580 if (IS_GEN2(dev)) 1593 if (IS_GEN2(dev))
1581 cpp = 4; 1594 cpp = 4;
1582 1595
1583 planeb_wm = intel_calculate_wm(crtc->mode.clock, 1596 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1597 planeb_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
1584 wm_info, fifo_size, cpp, 1598 wm_info, fifo_size, cpp,
1585 latency_ns); 1599 latency_ns);
1586 if (enabled == NULL) 1600 if (enabled == NULL)
@@ -1607,9 +1621,11 @@ static void i9xx_update_wm(struct drm_device *dev)
1607 if (HAS_FW_BLC(dev) && enabled) { 1621 if (HAS_FW_BLC(dev) && enabled) {
1608 /* self-refresh has much higher latency */ 1622 /* self-refresh has much higher latency */
1609 static const int sr_latency_ns = 6000; 1623 static const int sr_latency_ns = 6000;
1610 int clock = enabled->mode.clock; 1624 const struct drm_display_mode *adjusted_mode =
1611 int htotal = enabled->mode.htotal; 1625 &to_intel_crtc(enabled)->config.adjusted_mode;
1612 int hdisplay = enabled->mode.hdisplay; 1626 int clock = adjusted_mode->crtc_clock;
1627 int htotal = adjusted_mode->htotal;
1628 int hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
1613 int pixel_size = enabled->fb->bits_per_pixel / 8; 1629 int pixel_size = enabled->fb->bits_per_pixel / 8;
1614 unsigned long line_time_us; 1630 unsigned long line_time_us;
1615 int entries; 1631 int entries;
@@ -1658,10 +1674,12 @@ static void i9xx_update_wm(struct drm_device *dev)
1658 } 1674 }
1659} 1675}
1660 1676
1661static void i830_update_wm(struct drm_device *dev) 1677static void i830_update_wm(struct drm_crtc *unused_crtc)
1662{ 1678{
1679 struct drm_device *dev = unused_crtc->dev;
1663 struct drm_i915_private *dev_priv = dev->dev_private; 1680 struct drm_i915_private *dev_priv = dev->dev_private;
1664 struct drm_crtc *crtc; 1681 struct drm_crtc *crtc;
1682 const struct drm_display_mode *adjusted_mode;
1665 uint32_t fwater_lo; 1683 uint32_t fwater_lo;
1666 int planea_wm; 1684 int planea_wm;
1667 1685
@@ -1669,7 +1687,9 @@ static void i830_update_wm(struct drm_device *dev)
1669 if (crtc == NULL) 1687 if (crtc == NULL)
1670 return; 1688 return;
1671 1689
1672 planea_wm = intel_calculate_wm(crtc->mode.clock, &i830_wm_info, 1690 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1691 planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
1692 &i830_wm_info,
1673 dev_priv->display.get_fifo_size(dev, 0), 1693 dev_priv->display.get_fifo_size(dev, 0),
1674 4, latency_ns); 1694 4, latency_ns);
1675 fwater_lo = I915_READ(FW_BLC) & ~0xfff; 1695 fwater_lo = I915_READ(FW_BLC) & ~0xfff;
@@ -1741,6 +1761,7 @@ static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane,
1741 int *fbc_wm, int *display_wm, int *cursor_wm) 1761 int *fbc_wm, int *display_wm, int *cursor_wm)
1742{ 1762{
1743 struct drm_crtc *crtc; 1763 struct drm_crtc *crtc;
1764 const struct drm_display_mode *adjusted_mode;
1744 unsigned long line_time_us; 1765 unsigned long line_time_us;
1745 int hdisplay, htotal, pixel_size, clock; 1766 int hdisplay, htotal, pixel_size, clock;
1746 int line_count, line_size; 1767 int line_count, line_size;
@@ -1753,9 +1774,10 @@ static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane,
1753 } 1774 }
1754 1775
1755 crtc = intel_get_crtc_for_plane(dev, plane); 1776 crtc = intel_get_crtc_for_plane(dev, plane);
1756 hdisplay = crtc->mode.hdisplay; 1777 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1757 htotal = crtc->mode.htotal; 1778 clock = adjusted_mode->crtc_clock;
1758 clock = crtc->mode.clock; 1779 htotal = adjusted_mode->htotal;
1780 hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
1759 pixel_size = crtc->fb->bits_per_pixel / 8; 1781 pixel_size = crtc->fb->bits_per_pixel / 8;
1760 1782
1761 line_time_us = (htotal * 1000) / clock; 1783 line_time_us = (htotal * 1000) / clock;
@@ -1785,8 +1807,9 @@ static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane,
1785 display, cursor); 1807 display, cursor);
1786} 1808}
1787 1809
1788static void ironlake_update_wm(struct drm_device *dev) 1810static void ironlake_update_wm(struct drm_crtc *crtc)
1789{ 1811{
1812 struct drm_device *dev = crtc->dev;
1790 struct drm_i915_private *dev_priv = dev->dev_private; 1813 struct drm_i915_private *dev_priv = dev->dev_private;
1791 int fbc_wm, plane_wm, cursor_wm; 1814 int fbc_wm, plane_wm, cursor_wm;
1792 unsigned int enabled; 1815 unsigned int enabled;
@@ -1868,8 +1891,9 @@ static void ironlake_update_wm(struct drm_device *dev)
1868 */ 1891 */
1869} 1892}
1870 1893
1871static void sandybridge_update_wm(struct drm_device *dev) 1894static void sandybridge_update_wm(struct drm_crtc *crtc)
1872{ 1895{
1896 struct drm_device *dev = crtc->dev;
1873 struct drm_i915_private *dev_priv = dev->dev_private; 1897 struct drm_i915_private *dev_priv = dev->dev_private;
1874 int latency = dev_priv->wm.pri_latency[0] * 100; /* In unit 0.1us */ 1898 int latency = dev_priv->wm.pri_latency[0] * 100; /* In unit 0.1us */
1875 u32 val; 1899 u32 val;
@@ -1970,8 +1994,9 @@ static void sandybridge_update_wm(struct drm_device *dev)
1970 cursor_wm); 1994 cursor_wm);
1971} 1995}
1972 1996
1973static void ivybridge_update_wm(struct drm_device *dev) 1997static void ivybridge_update_wm(struct drm_crtc *crtc)
1974{ 1998{
1999 struct drm_device *dev = crtc->dev;
1975 struct drm_i915_private *dev_priv = dev->dev_private; 2000 struct drm_i915_private *dev_priv = dev->dev_private;
1976 int latency = dev_priv->wm.pri_latency[0] * 100; /* In unit 0.1us */ 2001 int latency = dev_priv->wm.pri_latency[0] * 100; /* In unit 0.1us */
1977 u32 val; 2002 u32 val;
@@ -2098,7 +2123,7 @@ static uint32_t ilk_pipe_pixel_rate(struct drm_device *dev,
2098 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2123 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2099 uint32_t pixel_rate; 2124 uint32_t pixel_rate;
2100 2125
2101 pixel_rate = intel_crtc->config.adjusted_mode.clock; 2126 pixel_rate = intel_crtc->config.adjusted_mode.crtc_clock;
2102 2127
2103 /* We only use IF-ID interlacing. If we ever use PF-ID we'll need to 2128 /* We only use IF-ID interlacing. If we ever use PF-ID we'll need to
2104 * adjust the pixel_rate here. */ 2129 * adjust the pixel_rate here. */
@@ -2107,8 +2132,8 @@ static uint32_t ilk_pipe_pixel_rate(struct drm_device *dev,
2107 uint64_t pipe_w, pipe_h, pfit_w, pfit_h; 2132 uint64_t pipe_w, pipe_h, pfit_w, pfit_h;
2108 uint32_t pfit_size = intel_crtc->config.pch_pfit.size; 2133 uint32_t pfit_size = intel_crtc->config.pch_pfit.size;
2109 2134
2110 pipe_w = intel_crtc->config.requested_mode.hdisplay; 2135 pipe_w = intel_crtc->config.pipe_src_w;
2111 pipe_h = intel_crtc->config.requested_mode.vdisplay; 2136 pipe_h = intel_crtc->config.pipe_src_h;
2112 pfit_w = (pfit_size >> 16) & 0xFFFF; 2137 pfit_w = (pfit_size >> 16) & 0xFFFF;
2113 pfit_h = pfit_size & 0xFFFF; 2138 pfit_h = pfit_size & 0xFFFF;
2114 if (pipe_w < pfit_w) 2139 if (pipe_w < pfit_w)
@@ -2176,27 +2201,18 @@ struct hsw_wm_maximums {
2176 uint16_t fbc; 2201 uint16_t fbc;
2177}; 2202};
2178 2203
2179struct hsw_wm_values {
2180 uint32_t wm_pipe[3];
2181 uint32_t wm_lp[3];
2182 uint32_t wm_lp_spr[3];
2183 uint32_t wm_linetime[3];
2184 bool enable_fbc_wm;
2185};
2186
2187/* used in computing the new watermarks state */ 2204/* used in computing the new watermarks state */
2188struct intel_wm_config { 2205struct intel_wm_config {
2189 unsigned int num_pipes_active; 2206 unsigned int num_pipes_active;
2190 bool sprites_enabled; 2207 bool sprites_enabled;
2191 bool sprites_scaled; 2208 bool sprites_scaled;
2192 bool fbc_wm_enabled;
2193}; 2209};
2194 2210
2195/* 2211/*
2196 * For both WM_PIPE and WM_LP. 2212 * For both WM_PIPE and WM_LP.
2197 * mem_value must be in 0.1us units. 2213 * mem_value must be in 0.1us units.
2198 */ 2214 */
2199static uint32_t ilk_compute_pri_wm(struct hsw_pipe_wm_parameters *params, 2215static uint32_t ilk_compute_pri_wm(const struct hsw_pipe_wm_parameters *params,
2200 uint32_t mem_value, 2216 uint32_t mem_value,
2201 bool is_lp) 2217 bool is_lp)
2202{ 2218{
@@ -2225,7 +2241,7 @@ static uint32_t ilk_compute_pri_wm(struct hsw_pipe_wm_parameters *params,
2225 * For both WM_PIPE and WM_LP. 2241 * For both WM_PIPE and WM_LP.
2226 * mem_value must be in 0.1us units. 2242 * mem_value must be in 0.1us units.
2227 */ 2243 */
2228static uint32_t ilk_compute_spr_wm(struct hsw_pipe_wm_parameters *params, 2244static uint32_t ilk_compute_spr_wm(const struct hsw_pipe_wm_parameters *params,
2229 uint32_t mem_value) 2245 uint32_t mem_value)
2230{ 2246{
2231 uint32_t method1, method2; 2247 uint32_t method1, method2;
@@ -2248,7 +2264,7 @@ static uint32_t ilk_compute_spr_wm(struct hsw_pipe_wm_parameters *params,
2248 * For both WM_PIPE and WM_LP. 2264 * For both WM_PIPE and WM_LP.
2249 * mem_value must be in 0.1us units. 2265 * mem_value must be in 0.1us units.
2250 */ 2266 */
2251static uint32_t ilk_compute_cur_wm(struct hsw_pipe_wm_parameters *params, 2267static uint32_t ilk_compute_cur_wm(const struct hsw_pipe_wm_parameters *params,
2252 uint32_t mem_value) 2268 uint32_t mem_value)
2253{ 2269{
2254 if (!params->active || !params->cur.enabled) 2270 if (!params->active || !params->cur.enabled)
@@ -2262,7 +2278,7 @@ static uint32_t ilk_compute_cur_wm(struct hsw_pipe_wm_parameters *params,
2262} 2278}
2263 2279
2264/* Only for WM_LP. */ 2280/* Only for WM_LP. */
2265static uint32_t ilk_compute_fbc_wm(struct hsw_pipe_wm_parameters *params, 2281static uint32_t ilk_compute_fbc_wm(const struct hsw_pipe_wm_parameters *params,
2266 uint32_t pri_val) 2282 uint32_t pri_val)
2267{ 2283{
2268 if (!params->active || !params->pri.enabled) 2284 if (!params->active || !params->pri.enabled)
@@ -2275,7 +2291,9 @@ static uint32_t ilk_compute_fbc_wm(struct hsw_pipe_wm_parameters *params,
2275 2291
2276static unsigned int ilk_display_fifo_size(const struct drm_device *dev) 2292static unsigned int ilk_display_fifo_size(const struct drm_device *dev)
2277{ 2293{
2278 if (INTEL_INFO(dev)->gen >= 7) 2294 if (INTEL_INFO(dev)->gen >= 8)
2295 return 3072;
2296 else if (INTEL_INFO(dev)->gen >= 7)
2279 return 768; 2297 return 768;
2280 else 2298 else
2281 return 512; 2299 return 512;
@@ -2320,7 +2338,9 @@ static unsigned int ilk_plane_wm_max(const struct drm_device *dev,
2320 } 2338 }
2321 2339
2322 /* clamp to max that the registers can hold */ 2340 /* clamp to max that the registers can hold */
2323 if (INTEL_INFO(dev)->gen >= 7) 2341 if (INTEL_INFO(dev)->gen >= 8)
2342 max = level == 0 ? 255 : 2047;
2343 else if (INTEL_INFO(dev)->gen >= 7)
2324 /* IVB/HSW primary/sprite plane watermarks */ 2344 /* IVB/HSW primary/sprite plane watermarks */
2325 max = level == 0 ? 127 : 1023; 2345 max = level == 0 ? 127 : 1023;
2326 else if (!is_sprite) 2346 else if (!is_sprite)
@@ -2350,27 +2370,30 @@ static unsigned int ilk_cursor_wm_max(const struct drm_device *dev,
2350} 2370}
2351 2371
2352/* Calculate the maximum FBC watermark */ 2372/* Calculate the maximum FBC watermark */
2353static unsigned int ilk_fbc_wm_max(void) 2373static unsigned int ilk_fbc_wm_max(struct drm_device *dev)
2354{ 2374{
2355 /* max that registers can hold */ 2375 /* max that registers can hold */
2356 return 15; 2376 if (INTEL_INFO(dev)->gen >= 8)
2377 return 31;
2378 else
2379 return 15;
2357} 2380}
2358 2381
2359static void ilk_wm_max(struct drm_device *dev, 2382static void ilk_compute_wm_maximums(struct drm_device *dev,
2360 int level, 2383 int level,
2361 const struct intel_wm_config *config, 2384 const struct intel_wm_config *config,
2362 enum intel_ddb_partitioning ddb_partitioning, 2385 enum intel_ddb_partitioning ddb_partitioning,
2363 struct hsw_wm_maximums *max) 2386 struct hsw_wm_maximums *max)
2364{ 2387{
2365 max->pri = ilk_plane_wm_max(dev, level, config, ddb_partitioning, false); 2388 max->pri = ilk_plane_wm_max(dev, level, config, ddb_partitioning, false);
2366 max->spr = ilk_plane_wm_max(dev, level, config, ddb_partitioning, true); 2389 max->spr = ilk_plane_wm_max(dev, level, config, ddb_partitioning, true);
2367 max->cur = ilk_cursor_wm_max(dev, level, config); 2390 max->cur = ilk_cursor_wm_max(dev, level, config);
2368 max->fbc = ilk_fbc_wm_max(); 2391 max->fbc = ilk_fbc_wm_max(dev);
2369} 2392}
2370 2393
2371static bool ilk_check_wm(int level, 2394static bool ilk_validate_wm_level(int level,
2372 const struct hsw_wm_maximums *max, 2395 const struct hsw_wm_maximums *max,
2373 struct intel_wm_level *result) 2396 struct intel_wm_level *result)
2374{ 2397{
2375 bool ret; 2398 bool ret;
2376 2399
@@ -2406,14 +2429,12 @@ static bool ilk_check_wm(int level,
2406 result->enable = true; 2429 result->enable = true;
2407 } 2430 }
2408 2431
2409 DRM_DEBUG_KMS("WM%d: %sabled\n", level, result->enable ? "en" : "dis");
2410
2411 return ret; 2432 return ret;
2412} 2433}
2413 2434
2414static void ilk_compute_wm_level(struct drm_i915_private *dev_priv, 2435static void ilk_compute_wm_level(struct drm_i915_private *dev_priv,
2415 int level, 2436 int level,
2416 struct hsw_pipe_wm_parameters *p, 2437 const struct hsw_pipe_wm_parameters *p,
2417 struct intel_wm_level *result) 2438 struct intel_wm_level *result)
2418{ 2439{
2419 uint16_t pri_latency = dev_priv->wm.pri_latency[level]; 2440 uint16_t pri_latency = dev_priv->wm.pri_latency[level];
@@ -2434,55 +2455,6 @@ static void ilk_compute_wm_level(struct drm_i915_private *dev_priv,
2434 result->enable = true; 2455 result->enable = true;
2435} 2456}
2436 2457
2437static bool hsw_compute_lp_wm(struct drm_i915_private *dev_priv,
2438 int level, struct hsw_wm_maximums *max,
2439 struct hsw_pipe_wm_parameters *params,
2440 struct intel_wm_level *result)
2441{
2442 enum pipe pipe;
2443 struct intel_wm_level res[3];
2444
2445 for (pipe = PIPE_A; pipe <= PIPE_C; pipe++)
2446 ilk_compute_wm_level(dev_priv, level, &params[pipe], &res[pipe]);
2447
2448 result->pri_val = max3(res[0].pri_val, res[1].pri_val, res[2].pri_val);
2449 result->spr_val = max3(res[0].spr_val, res[1].spr_val, res[2].spr_val);
2450 result->cur_val = max3(res[0].cur_val, res[1].cur_val, res[2].cur_val);
2451 result->fbc_val = max3(res[0].fbc_val, res[1].fbc_val, res[2].fbc_val);
2452 result->enable = true;
2453
2454 return ilk_check_wm(level, max, result);
2455}
2456
2457static uint32_t hsw_compute_wm_pipe(struct drm_i915_private *dev_priv,
2458 enum pipe pipe,
2459 struct hsw_pipe_wm_parameters *params)
2460{
2461 uint32_t pri_val, cur_val, spr_val;
2462 /* WM0 latency values stored in 0.1us units */
2463 uint16_t pri_latency = dev_priv->wm.pri_latency[0];
2464 uint16_t spr_latency = dev_priv->wm.spr_latency[0];
2465 uint16_t cur_latency = dev_priv->wm.cur_latency[0];
2466
2467 pri_val = ilk_compute_pri_wm(params, pri_latency, false);
2468 spr_val = ilk_compute_spr_wm(params, spr_latency);
2469 cur_val = ilk_compute_cur_wm(params, cur_latency);
2470
2471 WARN(pri_val > 127,
2472 "Primary WM error, mode not supported for pipe %c\n",
2473 pipe_name(pipe));
2474 WARN(spr_val > 127,
2475 "Sprite WM error, mode not supported for pipe %c\n",
2476 pipe_name(pipe));
2477 WARN(cur_val > 63,
2478 "Cursor WM error, mode not supported for pipe %c\n",
2479 pipe_name(pipe));
2480
2481 return (pri_val << WM0_PIPE_PLANE_SHIFT) |
2482 (spr_val << WM0_PIPE_SPRITE_SHIFT) |
2483 cur_val;
2484}
2485
2486static uint32_t 2458static uint32_t
2487hsw_compute_linetime_wm(struct drm_device *dev, struct drm_crtc *crtc) 2459hsw_compute_linetime_wm(struct drm_device *dev, struct drm_crtc *crtc)
2488{ 2460{
@@ -2554,19 +2526,22 @@ static void intel_fixup_cur_wm_latency(struct drm_device *dev, uint16_t wm[5])
2554 wm[3] *= 2; 2526 wm[3] *= 2;
2555} 2527}
2556 2528
2557static void intel_print_wm_latency(struct drm_device *dev, 2529static int ilk_wm_max_level(const struct drm_device *dev)
2558 const char *name,
2559 const uint16_t wm[5])
2560{ 2530{
2561 int level, max_level;
2562
2563 /* how many WM levels are we expecting */ 2531 /* how many WM levels are we expecting */
2564 if (IS_HASWELL(dev)) 2532 if (IS_HASWELL(dev))
2565 max_level = 4; 2533 return 4;
2566 else if (INTEL_INFO(dev)->gen >= 6) 2534 else if (INTEL_INFO(dev)->gen >= 6)
2567 max_level = 3; 2535 return 3;
2568 else 2536 else
2569 max_level = 2; 2537 return 2;
2538}
2539
2540static void intel_print_wm_latency(struct drm_device *dev,
2541 const char *name,
2542 const uint16_t wm[5])
2543{
2544 int level, max_level = ilk_wm_max_level(dev);
2570 2545
2571 for (level = 0; level <= max_level; level++) { 2546 for (level = 0; level <= max_level; level++) {
2572 unsigned int latency = wm[level]; 2547 unsigned int latency = wm[level];
@@ -2606,218 +2581,321 @@ static void intel_setup_wm_latency(struct drm_device *dev)
2606 intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency); 2581 intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency);
2607} 2582}
2608 2583
2609static void hsw_compute_wm_parameters(struct drm_device *dev, 2584static void hsw_compute_wm_parameters(struct drm_crtc *crtc,
2610 struct hsw_pipe_wm_parameters *params, 2585 struct hsw_pipe_wm_parameters *p,
2611 struct hsw_wm_maximums *lp_max_1_2, 2586 struct intel_wm_config *config)
2612 struct hsw_wm_maximums *lp_max_5_6)
2613{ 2587{
2614 struct drm_crtc *crtc; 2588 struct drm_device *dev = crtc->dev;
2589 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2590 enum pipe pipe = intel_crtc->pipe;
2615 struct drm_plane *plane; 2591 struct drm_plane *plane;
2616 enum pipe pipe;
2617 struct intel_wm_config config = {};
2618
2619 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2620 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2621 struct hsw_pipe_wm_parameters *p;
2622
2623 pipe = intel_crtc->pipe;
2624 p = &params[pipe];
2625
2626 p->active = intel_crtc_active(crtc);
2627 if (!p->active)
2628 continue;
2629
2630 config.num_pipes_active++;
2631 2592
2593 p->active = intel_crtc_active(crtc);
2594 if (p->active) {
2632 p->pipe_htotal = intel_crtc->config.adjusted_mode.htotal; 2595 p->pipe_htotal = intel_crtc->config.adjusted_mode.htotal;
2633 p->pixel_rate = ilk_pipe_pixel_rate(dev, crtc); 2596 p->pixel_rate = ilk_pipe_pixel_rate(dev, crtc);
2634 p->pri.bytes_per_pixel = crtc->fb->bits_per_pixel / 8; 2597 p->pri.bytes_per_pixel = crtc->fb->bits_per_pixel / 8;
2635 p->cur.bytes_per_pixel = 4; 2598 p->cur.bytes_per_pixel = 4;
2636 p->pri.horiz_pixels = 2599 p->pri.horiz_pixels = intel_crtc->config.pipe_src_w;
2637 intel_crtc->config.requested_mode.hdisplay;
2638 p->cur.horiz_pixels = 64; 2600 p->cur.horiz_pixels = 64;
2639 /* TODO: for now, assume primary and cursor planes are always enabled. */ 2601 /* TODO: for now, assume primary and cursor planes are always enabled. */
2640 p->pri.enabled = true; 2602 p->pri.enabled = true;
2641 p->cur.enabled = true; 2603 p->cur.enabled = true;
2642 } 2604 }
2643 2605
2606 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
2607 config->num_pipes_active += intel_crtc_active(crtc);
2608
2644 list_for_each_entry(plane, &dev->mode_config.plane_list, head) { 2609 list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
2645 struct intel_plane *intel_plane = to_intel_plane(plane); 2610 struct intel_plane *intel_plane = to_intel_plane(plane);
2646 struct hsw_pipe_wm_parameters *p;
2647 2611
2648 pipe = intel_plane->pipe; 2612 if (intel_plane->pipe == pipe)
2649 p = &params[pipe]; 2613 p->spr = intel_plane->wm;
2650 2614
2651 p->spr = intel_plane->wm; 2615 config->sprites_enabled |= intel_plane->wm.enabled;
2652 2616 config->sprites_scaled |= intel_plane->wm.scaled;
2653 config.sprites_enabled |= p->spr.enabled;
2654 config.sprites_scaled |= p->spr.scaled;
2655 } 2617 }
2618}
2619
2620/* Compute new watermarks for the pipe */
2621static bool intel_compute_pipe_wm(struct drm_crtc *crtc,
2622 const struct hsw_pipe_wm_parameters *params,
2623 struct intel_pipe_wm *pipe_wm)
2624{
2625 struct drm_device *dev = crtc->dev;
2626 struct drm_i915_private *dev_priv = dev->dev_private;
2627 int level, max_level = ilk_wm_max_level(dev);
2628 /* LP0 watermark maximums depend on this pipe alone */
2629 struct intel_wm_config config = {
2630 .num_pipes_active = 1,
2631 .sprites_enabled = params->spr.enabled,
2632 .sprites_scaled = params->spr.scaled,
2633 };
2634 struct hsw_wm_maximums max;
2656 2635
2657 ilk_wm_max(dev, 1, &config, INTEL_DDB_PART_1_2, lp_max_1_2); 2636 /* LP0 watermarks always use 1/2 DDB partitioning */
2637 ilk_compute_wm_maximums(dev, 0, &config, INTEL_DDB_PART_1_2, &max);
2658 2638
2659 /* 5/6 split only in single pipe config on IVB+ */ 2639 for (level = 0; level <= max_level; level++)
2660 if (INTEL_INFO(dev)->gen >= 7 && config.num_pipes_active <= 1) 2640 ilk_compute_wm_level(dev_priv, level, params,
2661 ilk_wm_max(dev, 1, &config, INTEL_DDB_PART_5_6, lp_max_5_6); 2641 &pipe_wm->wm[level]);
2662 else 2642
2663 *lp_max_5_6 = *lp_max_1_2; 2643 pipe_wm->linetime = hsw_compute_linetime_wm(dev, crtc);
2644
2645 /* At least LP0 must be valid */
2646 return ilk_validate_wm_level(0, &max, &pipe_wm->wm[0]);
2664} 2647}
2665 2648
2666static void hsw_compute_wm_results(struct drm_device *dev, 2649/*
2667 struct hsw_pipe_wm_parameters *params, 2650 * Merge the watermarks from all active pipes for a specific level.
2668 struct hsw_wm_maximums *lp_maximums, 2651 */
2669 struct hsw_wm_values *results) 2652static void ilk_merge_wm_level(struct drm_device *dev,
2653 int level,
2654 struct intel_wm_level *ret_wm)
2670{ 2655{
2671 struct drm_i915_private *dev_priv = dev->dev_private; 2656 const struct intel_crtc *intel_crtc;
2672 struct drm_crtc *crtc;
2673 struct intel_wm_level lp_results[4] = {};
2674 enum pipe pipe;
2675 int level, max_level, wm_lp;
2676 2657
2677 for (level = 1; level <= 4; level++) 2658 list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head) {
2678 if (!hsw_compute_lp_wm(dev_priv, level, 2659 const struct intel_wm_level *wm =
2679 lp_maximums, params, 2660 &intel_crtc->wm.active.wm[level];
2680 &lp_results[level - 1])) 2661
2681 break; 2662 if (!wm->enable)
2682 max_level = level - 1; 2663 return;
2664
2665 ret_wm->pri_val = max(ret_wm->pri_val, wm->pri_val);
2666 ret_wm->spr_val = max(ret_wm->spr_val, wm->spr_val);
2667 ret_wm->cur_val = max(ret_wm->cur_val, wm->cur_val);
2668 ret_wm->fbc_val = max(ret_wm->fbc_val, wm->fbc_val);
2669 }
2670
2671 ret_wm->enable = true;
2672}
2683 2673
2684 memset(results, 0, sizeof(*results)); 2674/*
2675 * Merge all low power watermarks for all active pipes.
2676 */
2677static void ilk_wm_merge(struct drm_device *dev,
2678 const struct hsw_wm_maximums *max,
2679 struct intel_pipe_wm *merged)
2680{
2681 int level, max_level = ilk_wm_max_level(dev);
2682
2683 merged->fbc_wm_enabled = true;
2685 2684
2686 /* The spec says it is preferred to disable FBC WMs instead of disabling 2685 /* merge each WM1+ level */
2687 * a WM level. */
2688 results->enable_fbc_wm = true;
2689 for (level = 1; level <= max_level; level++) { 2686 for (level = 1; level <= max_level; level++) {
2690 if (lp_results[level - 1].fbc_val > lp_maximums->fbc) { 2687 struct intel_wm_level *wm = &merged->wm[level];
2691 results->enable_fbc_wm = false; 2688
2692 lp_results[level - 1].fbc_val = 0; 2689 ilk_merge_wm_level(dev, level, wm);
2690
2691 if (!ilk_validate_wm_level(level, max, wm))
2692 break;
2693
2694 /*
2695 * The spec says it is preferred to disable
2696 * FBC WMs instead of disabling a WM level.
2697 */
2698 if (wm->fbc_val > max->fbc) {
2699 merged->fbc_wm_enabled = false;
2700 wm->fbc_val = 0;
2693 } 2701 }
2694 } 2702 }
2703}
2695 2704
2705static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm)
2706{
2707 /* LP1,LP2,LP3 levels are either 1,2,3 or 1,3,4 */
2708 return wm_lp + (wm_lp >= 2 && pipe_wm->wm[4].enable);
2709}
2710
2711static void hsw_compute_wm_results(struct drm_device *dev,
2712 const struct intel_pipe_wm *merged,
2713 enum intel_ddb_partitioning partitioning,
2714 struct hsw_wm_values *results)
2715{
2716 struct intel_crtc *intel_crtc;
2717 int level, wm_lp;
2718
2719 results->enable_fbc_wm = merged->fbc_wm_enabled;
2720 results->partitioning = partitioning;
2721
2722 /* LP1+ register values */
2696 for (wm_lp = 1; wm_lp <= 3; wm_lp++) { 2723 for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
2697 const struct intel_wm_level *r; 2724 const struct intel_wm_level *r;
2698 2725
2699 level = (max_level == 4 && wm_lp > 1) ? wm_lp + 1 : wm_lp; 2726 level = ilk_wm_lp_to_level(wm_lp, merged);
2700 if (level > max_level) 2727
2728 r = &merged->wm[level];
2729 if (!r->enable)
2701 break; 2730 break;
2702 2731
2703 r = &lp_results[level - 1]; 2732 results->wm_lp[wm_lp - 1] = WM3_LP_EN |
2704 results->wm_lp[wm_lp - 1] = HSW_WM_LP_VAL(level * 2, 2733 ((level * 2) << WM1_LP_LATENCY_SHIFT) |
2705 r->fbc_val, 2734 (r->pri_val << WM1_LP_SR_SHIFT) |
2706 r->pri_val, 2735 r->cur_val;
2707 r->cur_val); 2736
2737 if (INTEL_INFO(dev)->gen >= 8)
2738 results->wm_lp[wm_lp - 1] |=
2739 r->fbc_val << WM1_LP_FBC_SHIFT_BDW;
2740 else
2741 results->wm_lp[wm_lp - 1] |=
2742 r->fbc_val << WM1_LP_FBC_SHIFT;
2743
2708 results->wm_lp_spr[wm_lp - 1] = r->spr_val; 2744 results->wm_lp_spr[wm_lp - 1] = r->spr_val;
2709 } 2745 }
2710 2746
2711 for_each_pipe(pipe) 2747 /* LP0 register values */
2712 results->wm_pipe[pipe] = hsw_compute_wm_pipe(dev_priv, pipe, 2748 list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head) {
2713 &params[pipe]); 2749 enum pipe pipe = intel_crtc->pipe;
2750 const struct intel_wm_level *r =
2751 &intel_crtc->wm.active.wm[0];
2714 2752
2715 for_each_pipe(pipe) { 2753 if (WARN_ON(!r->enable))
2716 crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 2754 continue;
2717 results->wm_linetime[pipe] = hsw_compute_linetime_wm(dev, crtc); 2755
2756 results->wm_linetime[pipe] = intel_crtc->wm.active.linetime;
2757
2758 results->wm_pipe[pipe] =
2759 (r->pri_val << WM0_PIPE_PLANE_SHIFT) |
2760 (r->spr_val << WM0_PIPE_SPRITE_SHIFT) |
2761 r->cur_val;
2718 } 2762 }
2719} 2763}
2720 2764
2721/* Find the result with the highest level enabled. Check for enable_fbc_wm in 2765/* Find the result with the highest level enabled. Check for enable_fbc_wm in
2722 * case both are at the same level. Prefer r1 in case they're the same. */ 2766 * case both are at the same level. Prefer r1 in case they're the same. */
2723static struct hsw_wm_values *hsw_find_best_result(struct hsw_wm_values *r1, 2767static struct intel_pipe_wm *hsw_find_best_result(struct drm_device *dev,
2724 struct hsw_wm_values *r2) 2768 struct intel_pipe_wm *r1,
2769 struct intel_pipe_wm *r2)
2725{ 2770{
2726 int i, val_r1 = 0, val_r2 = 0; 2771 int level, max_level = ilk_wm_max_level(dev);
2772 int level1 = 0, level2 = 0;
2727 2773
2728 for (i = 0; i < 3; i++) { 2774 for (level = 1; level <= max_level; level++) {
2729 if (r1->wm_lp[i] & WM3_LP_EN) 2775 if (r1->wm[level].enable)
2730 val_r1 = r1->wm_lp[i] & WM1_LP_LATENCY_MASK; 2776 level1 = level;
2731 if (r2->wm_lp[i] & WM3_LP_EN) 2777 if (r2->wm[level].enable)
2732 val_r2 = r2->wm_lp[i] & WM1_LP_LATENCY_MASK; 2778 level2 = level;
2733 } 2779 }
2734 2780
2735 if (val_r1 == val_r2) { 2781 if (level1 == level2) {
2736 if (r2->enable_fbc_wm && !r1->enable_fbc_wm) 2782 if (r2->fbc_wm_enabled && !r1->fbc_wm_enabled)
2737 return r2; 2783 return r2;
2738 else 2784 else
2739 return r1; 2785 return r1;
2740 } else if (val_r1 > val_r2) { 2786 } else if (level1 > level2) {
2741 return r1; 2787 return r1;
2742 } else { 2788 } else {
2743 return r2; 2789 return r2;
2744 } 2790 }
2745} 2791}
2746 2792
2793/* dirty bits used to track which watermarks need changes */
2794#define WM_DIRTY_PIPE(pipe) (1 << (pipe))
2795#define WM_DIRTY_LINETIME(pipe) (1 << (8 + (pipe)))
2796#define WM_DIRTY_LP(wm_lp) (1 << (15 + (wm_lp)))
2797#define WM_DIRTY_LP_ALL (WM_DIRTY_LP(1) | WM_DIRTY_LP(2) | WM_DIRTY_LP(3))
2798#define WM_DIRTY_FBC (1 << 24)
2799#define WM_DIRTY_DDB (1 << 25)
2800
2801static unsigned int ilk_compute_wm_dirty(struct drm_device *dev,
2802 const struct hsw_wm_values *old,
2803 const struct hsw_wm_values *new)
2804{
2805 unsigned int dirty = 0;
2806 enum pipe pipe;
2807 int wm_lp;
2808
2809 for_each_pipe(pipe) {
2810 if (old->wm_linetime[pipe] != new->wm_linetime[pipe]) {
2811 dirty |= WM_DIRTY_LINETIME(pipe);
2812 /* Must disable LP1+ watermarks too */
2813 dirty |= WM_DIRTY_LP_ALL;
2814 }
2815
2816 if (old->wm_pipe[pipe] != new->wm_pipe[pipe]) {
2817 dirty |= WM_DIRTY_PIPE(pipe);
2818 /* Must disable LP1+ watermarks too */
2819 dirty |= WM_DIRTY_LP_ALL;
2820 }
2821 }
2822
2823 if (old->enable_fbc_wm != new->enable_fbc_wm) {
2824 dirty |= WM_DIRTY_FBC;
2825 /* Must disable LP1+ watermarks too */
2826 dirty |= WM_DIRTY_LP_ALL;
2827 }
2828
2829 if (old->partitioning != new->partitioning) {
2830 dirty |= WM_DIRTY_DDB;
2831 /* Must disable LP1+ watermarks too */
2832 dirty |= WM_DIRTY_LP_ALL;
2833 }
2834
2835 /* LP1+ watermarks already deemed dirty, no need to continue */
2836 if (dirty & WM_DIRTY_LP_ALL)
2837 return dirty;
2838
2839 /* Find the lowest numbered LP1+ watermark in need of an update... */
2840 for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
2841 if (old->wm_lp[wm_lp - 1] != new->wm_lp[wm_lp - 1] ||
2842 old->wm_lp_spr[wm_lp - 1] != new->wm_lp_spr[wm_lp - 1])
2843 break;
2844 }
2845
2846 /* ...and mark it and all higher numbered LP1+ watermarks as dirty */
2847 for (; wm_lp <= 3; wm_lp++)
2848 dirty |= WM_DIRTY_LP(wm_lp);
2849
2850 return dirty;
2851}
2852
2747/* 2853/*
2748 * The spec says we shouldn't write when we don't need, because every write 2854 * The spec says we shouldn't write when we don't need, because every write
2749 * causes WMs to be re-evaluated, expending some power. 2855 * causes WMs to be re-evaluated, expending some power.
2750 */ 2856 */
2751static void hsw_write_wm_values(struct drm_i915_private *dev_priv, 2857static void hsw_write_wm_values(struct drm_i915_private *dev_priv,
2752 struct hsw_wm_values *results, 2858 struct hsw_wm_values *results)
2753 enum intel_ddb_partitioning partitioning)
2754{ 2859{
2755 struct hsw_wm_values previous; 2860 struct hsw_wm_values *previous = &dev_priv->wm.hw;
2861 unsigned int dirty;
2756 uint32_t val; 2862 uint32_t val;
2757 enum intel_ddb_partitioning prev_partitioning; 2863
2758 bool prev_enable_fbc_wm; 2864 dirty = ilk_compute_wm_dirty(dev_priv->dev, previous, results);
2759 2865 if (!dirty)
2760 previous.wm_pipe[0] = I915_READ(WM0_PIPEA_ILK);
2761 previous.wm_pipe[1] = I915_READ(WM0_PIPEB_ILK);
2762 previous.wm_pipe[2] = I915_READ(WM0_PIPEC_IVB);
2763 previous.wm_lp[0] = I915_READ(WM1_LP_ILK);
2764 previous.wm_lp[1] = I915_READ(WM2_LP_ILK);
2765 previous.wm_lp[2] = I915_READ(WM3_LP_ILK);
2766 previous.wm_lp_spr[0] = I915_READ(WM1S_LP_ILK);
2767 previous.wm_lp_spr[1] = I915_READ(WM2S_LP_IVB);
2768 previous.wm_lp_spr[2] = I915_READ(WM3S_LP_IVB);
2769 previous.wm_linetime[0] = I915_READ(PIPE_WM_LINETIME(PIPE_A));
2770 previous.wm_linetime[1] = I915_READ(PIPE_WM_LINETIME(PIPE_B));
2771 previous.wm_linetime[2] = I915_READ(PIPE_WM_LINETIME(PIPE_C));
2772
2773 prev_partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
2774 INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
2775
2776 prev_enable_fbc_wm = !(I915_READ(DISP_ARB_CTL) & DISP_FBC_WM_DIS);
2777
2778 if (memcmp(results->wm_pipe, previous.wm_pipe,
2779 sizeof(results->wm_pipe)) == 0 &&
2780 memcmp(results->wm_lp, previous.wm_lp,
2781 sizeof(results->wm_lp)) == 0 &&
2782 memcmp(results->wm_lp_spr, previous.wm_lp_spr,
2783 sizeof(results->wm_lp_spr)) == 0 &&
2784 memcmp(results->wm_linetime, previous.wm_linetime,
2785 sizeof(results->wm_linetime)) == 0 &&
2786 partitioning == prev_partitioning &&
2787 results->enable_fbc_wm == prev_enable_fbc_wm)
2788 return; 2866 return;
2789 2867
2790 if (previous.wm_lp[2] != 0) 2868 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] != 0)
2791 I915_WRITE(WM3_LP_ILK, 0); 2869 I915_WRITE(WM3_LP_ILK, 0);
2792 if (previous.wm_lp[1] != 0) 2870 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] != 0)
2793 I915_WRITE(WM2_LP_ILK, 0); 2871 I915_WRITE(WM2_LP_ILK, 0);
2794 if (previous.wm_lp[0] != 0) 2872 if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] != 0)
2795 I915_WRITE(WM1_LP_ILK, 0); 2873 I915_WRITE(WM1_LP_ILK, 0);
2796 2874
2797 if (previous.wm_pipe[0] != results->wm_pipe[0]) 2875 if (dirty & WM_DIRTY_PIPE(PIPE_A))
2798 I915_WRITE(WM0_PIPEA_ILK, results->wm_pipe[0]); 2876 I915_WRITE(WM0_PIPEA_ILK, results->wm_pipe[0]);
2799 if (previous.wm_pipe[1] != results->wm_pipe[1]) 2877 if (dirty & WM_DIRTY_PIPE(PIPE_B))
2800 I915_WRITE(WM0_PIPEB_ILK, results->wm_pipe[1]); 2878 I915_WRITE(WM0_PIPEB_ILK, results->wm_pipe[1]);
2801 if (previous.wm_pipe[2] != results->wm_pipe[2]) 2879 if (dirty & WM_DIRTY_PIPE(PIPE_C))
2802 I915_WRITE(WM0_PIPEC_IVB, results->wm_pipe[2]); 2880 I915_WRITE(WM0_PIPEC_IVB, results->wm_pipe[2]);
2803 2881
2804 if (previous.wm_linetime[0] != results->wm_linetime[0]) 2882 if (dirty & WM_DIRTY_LINETIME(PIPE_A))
2805 I915_WRITE(PIPE_WM_LINETIME(PIPE_A), results->wm_linetime[0]); 2883 I915_WRITE(PIPE_WM_LINETIME(PIPE_A), results->wm_linetime[0]);
2806 if (previous.wm_linetime[1] != results->wm_linetime[1]) 2884 if (dirty & WM_DIRTY_LINETIME(PIPE_B))
2807 I915_WRITE(PIPE_WM_LINETIME(PIPE_B), results->wm_linetime[1]); 2885 I915_WRITE(PIPE_WM_LINETIME(PIPE_B), results->wm_linetime[1]);
2808 if (previous.wm_linetime[2] != results->wm_linetime[2]) 2886 if (dirty & WM_DIRTY_LINETIME(PIPE_C))
2809 I915_WRITE(PIPE_WM_LINETIME(PIPE_C), results->wm_linetime[2]); 2887 I915_WRITE(PIPE_WM_LINETIME(PIPE_C), results->wm_linetime[2]);
2810 2888
2811 if (prev_partitioning != partitioning) { 2889 if (dirty & WM_DIRTY_DDB) {
2812 val = I915_READ(WM_MISC); 2890 val = I915_READ(WM_MISC);
2813 if (partitioning == INTEL_DDB_PART_1_2) 2891 if (results->partitioning == INTEL_DDB_PART_1_2)
2814 val &= ~WM_MISC_DATA_PARTITION_5_6; 2892 val &= ~WM_MISC_DATA_PARTITION_5_6;
2815 else 2893 else
2816 val |= WM_MISC_DATA_PARTITION_5_6; 2894 val |= WM_MISC_DATA_PARTITION_5_6;
2817 I915_WRITE(WM_MISC, val); 2895 I915_WRITE(WM_MISC, val);
2818 } 2896 }
2819 2897
2820 if (prev_enable_fbc_wm != results->enable_fbc_wm) { 2898 if (dirty & WM_DIRTY_FBC) {
2821 val = I915_READ(DISP_ARB_CTL); 2899 val = I915_READ(DISP_ARB_CTL);
2822 if (results->enable_fbc_wm) 2900 if (results->enable_fbc_wm)
2823 val &= ~DISP_FBC_WM_DIS; 2901 val &= ~DISP_FBC_WM_DIS;
@@ -2826,45 +2904,65 @@ static void hsw_write_wm_values(struct drm_i915_private *dev_priv,
2826 I915_WRITE(DISP_ARB_CTL, val); 2904 I915_WRITE(DISP_ARB_CTL, val);
2827 } 2905 }
2828 2906
2829 if (previous.wm_lp_spr[0] != results->wm_lp_spr[0]) 2907 if (dirty & WM_DIRTY_LP(1) && previous->wm_lp_spr[0] != results->wm_lp_spr[0])
2830 I915_WRITE(WM1S_LP_ILK, results->wm_lp_spr[0]); 2908 I915_WRITE(WM1S_LP_ILK, results->wm_lp_spr[0]);
2831 if (previous.wm_lp_spr[1] != results->wm_lp_spr[1]) 2909 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1])
2832 I915_WRITE(WM2S_LP_IVB, results->wm_lp_spr[1]); 2910 I915_WRITE(WM2S_LP_IVB, results->wm_lp_spr[1]);
2833 if (previous.wm_lp_spr[2] != results->wm_lp_spr[2]) 2911 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2])
2834 I915_WRITE(WM3S_LP_IVB, results->wm_lp_spr[2]); 2912 I915_WRITE(WM3S_LP_IVB, results->wm_lp_spr[2]);
2835 2913
2836 if (results->wm_lp[0] != 0) 2914 if (dirty & WM_DIRTY_LP(1) && results->wm_lp[0] != 0)
2837 I915_WRITE(WM1_LP_ILK, results->wm_lp[0]); 2915 I915_WRITE(WM1_LP_ILK, results->wm_lp[0]);
2838 if (results->wm_lp[1] != 0) 2916 if (dirty & WM_DIRTY_LP(2) && results->wm_lp[1] != 0)
2839 I915_WRITE(WM2_LP_ILK, results->wm_lp[1]); 2917 I915_WRITE(WM2_LP_ILK, results->wm_lp[1]);
2840 if (results->wm_lp[2] != 0) 2918 if (dirty & WM_DIRTY_LP(3) && results->wm_lp[2] != 0)
2841 I915_WRITE(WM3_LP_ILK, results->wm_lp[2]); 2919 I915_WRITE(WM3_LP_ILK, results->wm_lp[2]);
2920
2921 dev_priv->wm.hw = *results;
2842} 2922}
2843 2923
2844static void haswell_update_wm(struct drm_device *dev) 2924static void haswell_update_wm(struct drm_crtc *crtc)
2845{ 2925{
2926 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2927 struct drm_device *dev = crtc->dev;
2846 struct drm_i915_private *dev_priv = dev->dev_private; 2928 struct drm_i915_private *dev_priv = dev->dev_private;
2847 struct hsw_wm_maximums lp_max_1_2, lp_max_5_6; 2929 struct hsw_wm_maximums max;
2848 struct hsw_pipe_wm_parameters params[3]; 2930 struct hsw_pipe_wm_parameters params = {};
2849 struct hsw_wm_values results_1_2, results_5_6, *best_results; 2931 struct hsw_wm_values results = {};
2850 enum intel_ddb_partitioning partitioning; 2932 enum intel_ddb_partitioning partitioning;
2933 struct intel_pipe_wm pipe_wm = {};
2934 struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
2935 struct intel_wm_config config = {};
2851 2936
2852 hsw_compute_wm_parameters(dev, params, &lp_max_1_2, &lp_max_5_6); 2937 hsw_compute_wm_parameters(crtc, &params, &config);
2938
2939 intel_compute_pipe_wm(crtc, &params, &pipe_wm);
2940
2941 if (!memcmp(&intel_crtc->wm.active, &pipe_wm, sizeof(pipe_wm)))
2942 return;
2853 2943
2854 hsw_compute_wm_results(dev, params, 2944 intel_crtc->wm.active = pipe_wm;
2855 &lp_max_1_2, &results_1_2); 2945
2856 if (lp_max_1_2.pri != lp_max_5_6.pri) { 2946 ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_1_2, &max);
2857 hsw_compute_wm_results(dev, params, 2947 ilk_wm_merge(dev, &max, &lp_wm_1_2);
2858 &lp_max_5_6, &results_5_6); 2948
2859 best_results = hsw_find_best_result(&results_1_2, &results_5_6); 2949 /* 5/6 split only in single pipe config on IVB+ */
2950 if (INTEL_INFO(dev)->gen >= 7 &&
2951 config.num_pipes_active == 1 && config.sprites_enabled) {
2952 ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_5_6, &max);
2953 ilk_wm_merge(dev, &max, &lp_wm_5_6);
2954
2955 best_lp_wm = hsw_find_best_result(dev, &lp_wm_1_2, &lp_wm_5_6);
2860 } else { 2956 } else {
2861 best_results = &results_1_2; 2957 best_lp_wm = &lp_wm_1_2;
2862 } 2958 }
2863 2959
2864 partitioning = (best_results == &results_1_2) ? 2960 partitioning = (best_lp_wm == &lp_wm_1_2) ?
2865 INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6; 2961 INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6;
2866 2962
2867 hsw_write_wm_values(dev_priv, best_results, partitioning); 2963 hsw_compute_wm_results(dev, best_lp_wm, partitioning, &results);
2964
2965 hsw_write_wm_values(dev_priv, &results);
2868} 2966}
2869 2967
2870static void haswell_update_sprite_wm(struct drm_plane *plane, 2968static void haswell_update_sprite_wm(struct drm_plane *plane,
@@ -2879,7 +2977,7 @@ static void haswell_update_sprite_wm(struct drm_plane *plane,
2879 intel_plane->wm.horiz_pixels = sprite_width; 2977 intel_plane->wm.horiz_pixels = sprite_width;
2880 intel_plane->wm.bytes_per_pixel = pixel_size; 2978 intel_plane->wm.bytes_per_pixel = pixel_size;
2881 2979
2882 haswell_update_wm(plane->dev); 2980 haswell_update_wm(crtc);
2883} 2981}
2884 2982
2885static bool 2983static bool
@@ -2898,7 +2996,7 @@ sandybridge_compute_sprite_wm(struct drm_device *dev, int plane,
2898 return false; 2996 return false;
2899 } 2997 }
2900 2998
2901 clock = crtc->mode.clock; 2999 clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock;
2902 3000
2903 /* Use the small buffer method to calculate the sprite watermark */ 3001 /* Use the small buffer method to calculate the sprite watermark */
2904 entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000; 3002 entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
@@ -2933,7 +3031,7 @@ sandybridge_compute_sprite_srwm(struct drm_device *dev, int plane,
2933 } 3031 }
2934 3032
2935 crtc = intel_get_crtc_for_plane(dev, plane); 3033 crtc = intel_get_crtc_for_plane(dev, plane);
2936 clock = crtc->mode.clock; 3034 clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock;
2937 if (!clock) { 3035 if (!clock) {
2938 *sprite_wm = 0; 3036 *sprite_wm = 0;
2939 return false; 3037 return false;
@@ -3044,6 +3142,74 @@ static void sandybridge_update_sprite_wm(struct drm_plane *plane,
3044 I915_WRITE(WM3S_LP_IVB, sprite_wm); 3142 I915_WRITE(WM3S_LP_IVB, sprite_wm);
3045} 3143}
3046 3144
3145static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
3146{
3147 struct drm_device *dev = crtc->dev;
3148 struct drm_i915_private *dev_priv = dev->dev_private;
3149 struct hsw_wm_values *hw = &dev_priv->wm.hw;
3150 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3151 struct intel_pipe_wm *active = &intel_crtc->wm.active;
3152 enum pipe pipe = intel_crtc->pipe;
3153 static const unsigned int wm0_pipe_reg[] = {
3154 [PIPE_A] = WM0_PIPEA_ILK,
3155 [PIPE_B] = WM0_PIPEB_ILK,
3156 [PIPE_C] = WM0_PIPEC_IVB,
3157 };
3158
3159 hw->wm_pipe[pipe] = I915_READ(wm0_pipe_reg[pipe]);
3160 hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
3161
3162 if (intel_crtc_active(crtc)) {
3163 u32 tmp = hw->wm_pipe[pipe];
3164
3165 /*
3166 * For active pipes LP0 watermark is marked as
3167 * enabled, and LP1+ watermaks as disabled since
3168 * we can't really reverse compute them in case
3169 * multiple pipes are active.
3170 */
3171 active->wm[0].enable = true;
3172 active->wm[0].pri_val = (tmp & WM0_PIPE_PLANE_MASK) >> WM0_PIPE_PLANE_SHIFT;
3173 active->wm[0].spr_val = (tmp & WM0_PIPE_SPRITE_MASK) >> WM0_PIPE_SPRITE_SHIFT;
3174 active->wm[0].cur_val = tmp & WM0_PIPE_CURSOR_MASK;
3175 active->linetime = hw->wm_linetime[pipe];
3176 } else {
3177 int level, max_level = ilk_wm_max_level(dev);
3178
3179 /*
3180 * For inactive pipes, all watermark levels
3181 * should be marked as enabled but zeroed,
3182 * which is what we'd compute them to.
3183 */
3184 for (level = 0; level <= max_level; level++)
3185 active->wm[level].enable = true;
3186 }
3187}
3188
3189void ilk_wm_get_hw_state(struct drm_device *dev)
3190{
3191 struct drm_i915_private *dev_priv = dev->dev_private;
3192 struct hsw_wm_values *hw = &dev_priv->wm.hw;
3193 struct drm_crtc *crtc;
3194
3195 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
3196 ilk_pipe_wm_get_hw_state(crtc);
3197
3198 hw->wm_lp[0] = I915_READ(WM1_LP_ILK);
3199 hw->wm_lp[1] = I915_READ(WM2_LP_ILK);
3200 hw->wm_lp[2] = I915_READ(WM3_LP_ILK);
3201
3202 hw->wm_lp_spr[0] = I915_READ(WM1S_LP_ILK);
3203 hw->wm_lp_spr[1] = I915_READ(WM2S_LP_IVB);
3204 hw->wm_lp_spr[2] = I915_READ(WM3S_LP_IVB);
3205
3206 hw->partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
3207 INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
3208
3209 hw->enable_fbc_wm =
3210 !(I915_READ(DISP_ARB_CTL) & DISP_FBC_WM_DIS);
3211}
3212
3047/** 3213/**
3048 * intel_update_watermarks - update FIFO watermark values based on current modes 3214 * intel_update_watermarks - update FIFO watermark values based on current modes
3049 * 3215 *
@@ -3076,12 +3242,12 @@ static void sandybridge_update_sprite_wm(struct drm_plane *plane,
3076 * We don't use the sprite, so we can ignore that. And on Crestline we have 3242 * We don't use the sprite, so we can ignore that. And on Crestline we have
3077 * to set the non-SR watermarks to 8. 3243 * to set the non-SR watermarks to 8.
3078 */ 3244 */
3079void intel_update_watermarks(struct drm_device *dev) 3245void intel_update_watermarks(struct drm_crtc *crtc)
3080{ 3246{
3081 struct drm_i915_private *dev_priv = dev->dev_private; 3247 struct drm_i915_private *dev_priv = crtc->dev->dev_private;
3082 3248
3083 if (dev_priv->display.update_wm) 3249 if (dev_priv->display.update_wm)
3084 dev_priv->display.update_wm(dev); 3250 dev_priv->display.update_wm(crtc);
3085} 3251}
3086 3252
3087void intel_update_sprite_watermarks(struct drm_plane *plane, 3253void intel_update_sprite_watermarks(struct drm_plane *plane,
@@ -3287,6 +3453,98 @@ static u32 gen6_rps_limits(struct drm_i915_private *dev_priv, u8 *val)
3287 return limits; 3453 return limits;
3288} 3454}
3289 3455
3456static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
3457{
3458 int new_power;
3459
3460 new_power = dev_priv->rps.power;
3461 switch (dev_priv->rps.power) {
3462 case LOW_POWER:
3463 if (val > dev_priv->rps.rpe_delay + 1 && val > dev_priv->rps.cur_delay)
3464 new_power = BETWEEN;
3465 break;
3466
3467 case BETWEEN:
3468 if (val <= dev_priv->rps.rpe_delay && val < dev_priv->rps.cur_delay)
3469 new_power = LOW_POWER;
3470 else if (val >= dev_priv->rps.rp0_delay && val > dev_priv->rps.cur_delay)
3471 new_power = HIGH_POWER;
3472 break;
3473
3474 case HIGH_POWER:
3475 if (val < (dev_priv->rps.rp1_delay + dev_priv->rps.rp0_delay) >> 1 && val < dev_priv->rps.cur_delay)
3476 new_power = BETWEEN;
3477 break;
3478 }
3479 /* Max/min bins are special */
3480 if (val == dev_priv->rps.min_delay)
3481 new_power = LOW_POWER;
3482 if (val == dev_priv->rps.max_delay)
3483 new_power = HIGH_POWER;
3484 if (new_power == dev_priv->rps.power)
3485 return;
3486
3487 /* Note the units here are not exactly 1us, but 1280ns. */
3488 switch (new_power) {
3489 case LOW_POWER:
3490 /* Upclock if more than 95% busy over 16ms */
3491 I915_WRITE(GEN6_RP_UP_EI, 12500);
3492 I915_WRITE(GEN6_RP_UP_THRESHOLD, 11800);
3493
3494 /* Downclock if less than 85% busy over 32ms */
3495 I915_WRITE(GEN6_RP_DOWN_EI, 25000);
3496 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 21250);
3497
3498 I915_WRITE(GEN6_RP_CONTROL,
3499 GEN6_RP_MEDIA_TURBO |
3500 GEN6_RP_MEDIA_HW_NORMAL_MODE |
3501 GEN6_RP_MEDIA_IS_GFX |
3502 GEN6_RP_ENABLE |
3503 GEN6_RP_UP_BUSY_AVG |
3504 GEN6_RP_DOWN_IDLE_AVG);
3505 break;
3506
3507 case BETWEEN:
3508 /* Upclock if more than 90% busy over 13ms */
3509 I915_WRITE(GEN6_RP_UP_EI, 10250);
3510 I915_WRITE(GEN6_RP_UP_THRESHOLD, 9225);
3511
3512 /* Downclock if less than 75% busy over 32ms */
3513 I915_WRITE(GEN6_RP_DOWN_EI, 25000);
3514 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 18750);
3515
3516 I915_WRITE(GEN6_RP_CONTROL,
3517 GEN6_RP_MEDIA_TURBO |
3518 GEN6_RP_MEDIA_HW_NORMAL_MODE |
3519 GEN6_RP_MEDIA_IS_GFX |
3520 GEN6_RP_ENABLE |
3521 GEN6_RP_UP_BUSY_AVG |
3522 GEN6_RP_DOWN_IDLE_AVG);
3523 break;
3524
3525 case HIGH_POWER:
3526 /* Upclock if more than 85% busy over 10ms */
3527 I915_WRITE(GEN6_RP_UP_EI, 8000);
3528 I915_WRITE(GEN6_RP_UP_THRESHOLD, 6800);
3529
3530 /* Downclock if less than 60% busy over 32ms */
3531 I915_WRITE(GEN6_RP_DOWN_EI, 25000);
3532 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 15000);
3533
3534 I915_WRITE(GEN6_RP_CONTROL,
3535 GEN6_RP_MEDIA_TURBO |
3536 GEN6_RP_MEDIA_HW_NORMAL_MODE |
3537 GEN6_RP_MEDIA_IS_GFX |
3538 GEN6_RP_ENABLE |
3539 GEN6_RP_UP_BUSY_AVG |
3540 GEN6_RP_DOWN_IDLE_AVG);
3541 break;
3542 }
3543
3544 dev_priv->rps.power = new_power;
3545 dev_priv->rps.last_adj = 0;
3546}
3547
3290void gen6_set_rps(struct drm_device *dev, u8 val) 3548void gen6_set_rps(struct drm_device *dev, u8 val)
3291{ 3549{
3292 struct drm_i915_private *dev_priv = dev->dev_private; 3550 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3299,6 +3557,8 @@ void gen6_set_rps(struct drm_device *dev, u8 val)
3299 if (val == dev_priv->rps.cur_delay) 3557 if (val == dev_priv->rps.cur_delay)
3300 return; 3558 return;
3301 3559
3560 gen6_set_rps_thresholds(dev_priv, val);
3561
3302 if (IS_HASWELL(dev)) 3562 if (IS_HASWELL(dev))
3303 I915_WRITE(GEN6_RPNSWREQ, 3563 I915_WRITE(GEN6_RPNSWREQ,
3304 HSW_FREQUENCY(val)); 3564 HSW_FREQUENCY(val));
@@ -3320,6 +3580,32 @@ void gen6_set_rps(struct drm_device *dev, u8 val)
3320 trace_intel_gpu_freq_change(val * 50); 3580 trace_intel_gpu_freq_change(val * 50);
3321} 3581}
3322 3582
3583void gen6_rps_idle(struct drm_i915_private *dev_priv)
3584{
3585 mutex_lock(&dev_priv->rps.hw_lock);
3586 if (dev_priv->rps.enabled) {
3587 if (dev_priv->info->is_valleyview)
3588 valleyview_set_rps(dev_priv->dev, dev_priv->rps.min_delay);
3589 else
3590 gen6_set_rps(dev_priv->dev, dev_priv->rps.min_delay);
3591 dev_priv->rps.last_adj = 0;
3592 }
3593 mutex_unlock(&dev_priv->rps.hw_lock);
3594}
3595
3596void gen6_rps_boost(struct drm_i915_private *dev_priv)
3597{
3598 mutex_lock(&dev_priv->rps.hw_lock);
3599 if (dev_priv->rps.enabled) {
3600 if (dev_priv->info->is_valleyview)
3601 valleyview_set_rps(dev_priv->dev, dev_priv->rps.max_delay);
3602 else
3603 gen6_set_rps(dev_priv->dev, dev_priv->rps.max_delay);
3604 dev_priv->rps.last_adj = 0;
3605 }
3606 mutex_unlock(&dev_priv->rps.hw_lock);
3607}
3608
3323/* 3609/*
3324 * Wait until the previous freq change has completed, 3610 * Wait until the previous freq change has completed,
3325 * or the timeout elapsed, and then update our notion 3611 * or the timeout elapsed, and then update our notion
@@ -3415,6 +3701,20 @@ static void valleyview_disable_rps(struct drm_device *dev)
3415 } 3701 }
3416} 3702}
3417 3703
3704static void intel_print_rc6_info(struct drm_device *dev, u32 mode)
3705{
3706 if (IS_GEN6(dev))
3707 DRM_DEBUG_DRIVER("Sandybridge: deep RC6 disabled\n");
3708
3709 if (IS_HASWELL(dev))
3710 DRM_DEBUG_DRIVER("Haswell: only RC6 available\n");
3711
3712 DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n",
3713 (mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off",
3714 (mode & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off",
3715 (mode & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off");
3716}
3717
3418int intel_enable_rc6(const struct drm_device *dev) 3718int intel_enable_rc6(const struct drm_device *dev)
3419{ 3719{
3420 /* No RC6 before Ironlake */ 3720 /* No RC6 before Ironlake */
@@ -3429,18 +3729,13 @@ int intel_enable_rc6(const struct drm_device *dev)
3429 if (INTEL_INFO(dev)->gen == 5) 3729 if (INTEL_INFO(dev)->gen == 5)
3430 return 0; 3730 return 0;
3431 3731
3432 if (IS_HASWELL(dev)) { 3732 if (IS_HASWELL(dev))
3433 DRM_DEBUG_DRIVER("Haswell: only RC6 available\n");
3434 return INTEL_RC6_ENABLE; 3733 return INTEL_RC6_ENABLE;
3435 }
3436 3734
3437 /* snb/ivb have more than one rc6 state. */ 3735 /* snb/ivb have more than one rc6 state. */
3438 if (INTEL_INFO(dev)->gen == 6) { 3736 if (INTEL_INFO(dev)->gen == 6)
3439 DRM_DEBUG_DRIVER("Sandybridge: deep RC6 disabled\n");
3440 return INTEL_RC6_ENABLE; 3737 return INTEL_RC6_ENABLE;
3441 }
3442 3738
3443 DRM_DEBUG_DRIVER("RC6 and deep RC6 enabled\n");
3444 return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE); 3739 return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
3445} 3740}
3446 3741
@@ -3467,6 +3762,78 @@ static void gen6_enable_rps_interrupts(struct drm_device *dev)
3467 I915_WRITE(GEN6_PMINTRMSK, ~enabled_intrs); 3762 I915_WRITE(GEN6_PMINTRMSK, ~enabled_intrs);
3468} 3763}
3469 3764
3765static void gen8_enable_rps(struct drm_device *dev)
3766{
3767 struct drm_i915_private *dev_priv = dev->dev_private;
3768 struct intel_ring_buffer *ring;
3769 uint32_t rc6_mask = 0, rp_state_cap;
3770 int unused;
3771
3772 /* 1a: Software RC state - RC0 */
3773 I915_WRITE(GEN6_RC_STATE, 0);
3774
3775 /* 1c & 1d: Get forcewake during program sequence. Although the driver
3776 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
3777 gen6_gt_force_wake_get(dev_priv);
3778
3779 /* 2a: Disable RC states. */
3780 I915_WRITE(GEN6_RC_CONTROL, 0);
3781
3782 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
3783
3784 /* 2b: Program RC6 thresholds.*/
3785 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
3786 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
3787 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
3788 for_each_ring(ring, dev_priv, unused)
3789 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
3790 I915_WRITE(GEN6_RC_SLEEP, 0);
3791 I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
3792
3793 /* 3: Enable RC6 */
3794 if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
3795 rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
3796 DRM_INFO("RC6 %s\n", (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off");
3797 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
3798 GEN6_RC_CTL_EI_MODE(1) |
3799 rc6_mask);
3800
3801 /* 4 Program defaults and thresholds for RPS*/
3802 I915_WRITE(GEN6_RPNSWREQ, HSW_FREQUENCY(10)); /* Request 500 MHz */
3803 I915_WRITE(GEN6_RC_VIDEO_FREQ, HSW_FREQUENCY(12)); /* Request 600 MHz */
3804 /* NB: Docs say 1s, and 1000000 - which aren't equivalent */
3805 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 100000000 / 128); /* 1 second timeout */
3806
3807 /* Docs recommend 900MHz, and 300 MHz respectively */
3808 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
3809 dev_priv->rps.max_delay << 24 |
3810 dev_priv->rps.min_delay << 16);
3811
3812 I915_WRITE(GEN6_RP_UP_THRESHOLD, 7600000 / 128); /* 76ms busyness per EI, 90% */
3813 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 31300000 / 128); /* 313ms busyness per EI, 70%*/
3814 I915_WRITE(GEN6_RP_UP_EI, 66000); /* 84.48ms, XXX: random? */
3815 I915_WRITE(GEN6_RP_DOWN_EI, 350000); /* 448ms, XXX: random? */
3816
3817 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
3818
3819 /* 5: Enable RPS */
3820 I915_WRITE(GEN6_RP_CONTROL,
3821 GEN6_RP_MEDIA_TURBO |
3822 GEN6_RP_MEDIA_HW_NORMAL_MODE |
3823 GEN6_RP_MEDIA_IS_GFX |
3824 GEN6_RP_ENABLE |
3825 GEN6_RP_UP_BUSY_AVG |
3826 GEN6_RP_DOWN_IDLE_AVG);
3827
3828 /* 6: Ring frequency + overclocking (our driver does this later */
3829
3830 gen6_set_rps(dev, (I915_READ(GEN6_GT_PERF_STATUS) & 0xff00) >> 8);
3831
3832 gen6_enable_rps_interrupts(dev);
3833
3834 gen6_gt_force_wake_put(dev_priv);
3835}
3836
3470static void gen6_enable_rps(struct drm_device *dev) 3837static void gen6_enable_rps(struct drm_device *dev)
3471{ 3838{
3472 struct drm_i915_private *dev_priv = dev->dev_private; 3839 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3501,7 +3868,10 @@ static void gen6_enable_rps(struct drm_device *dev)
3501 3868
3502 /* In units of 50MHz */ 3869 /* In units of 50MHz */
3503 dev_priv->rps.hw_max = dev_priv->rps.max_delay = rp_state_cap & 0xff; 3870 dev_priv->rps.hw_max = dev_priv->rps.max_delay = rp_state_cap & 0xff;
3504 dev_priv->rps.min_delay = (rp_state_cap & 0xff0000) >> 16; 3871 dev_priv->rps.min_delay = (rp_state_cap >> 16) & 0xff;
3872 dev_priv->rps.rp1_delay = (rp_state_cap >> 8) & 0xff;
3873 dev_priv->rps.rp0_delay = (rp_state_cap >> 0) & 0xff;
3874 dev_priv->rps.rpe_delay = dev_priv->rps.rp1_delay;
3505 dev_priv->rps.cur_delay = 0; 3875 dev_priv->rps.cur_delay = 0;
3506 3876
3507 /* disable the counters and set deterministic thresholds */ 3877 /* disable the counters and set deterministic thresholds */
@@ -3539,48 +3909,16 @@ static void gen6_enable_rps(struct drm_device *dev)
3539 rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE; 3909 rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
3540 } 3910 }
3541 3911
3542 DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n", 3912 intel_print_rc6_info(dev, rc6_mask);
3543 (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off",
3544 (rc6_mask & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off",
3545 (rc6_mask & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off");
3546 3913
3547 I915_WRITE(GEN6_RC_CONTROL, 3914 I915_WRITE(GEN6_RC_CONTROL,
3548 rc6_mask | 3915 rc6_mask |
3549 GEN6_RC_CTL_EI_MODE(1) | 3916 GEN6_RC_CTL_EI_MODE(1) |
3550 GEN6_RC_CTL_HW_ENABLE); 3917 GEN6_RC_CTL_HW_ENABLE);
3551 3918
3552 if (IS_HASWELL(dev)) { 3919 /* Power down if completely idle for over 50ms */
3553 I915_WRITE(GEN6_RPNSWREQ, 3920 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 50000);
3554 HSW_FREQUENCY(10));
3555 I915_WRITE(GEN6_RC_VIDEO_FREQ,
3556 HSW_FREQUENCY(12));
3557 } else {
3558 I915_WRITE(GEN6_RPNSWREQ,
3559 GEN6_FREQUENCY(10) |
3560 GEN6_OFFSET(0) |
3561 GEN6_AGGRESSIVE_TURBO);
3562 I915_WRITE(GEN6_RC_VIDEO_FREQ,
3563 GEN6_FREQUENCY(12));
3564 }
3565
3566 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
3567 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
3568 dev_priv->rps.max_delay << 24 |
3569 dev_priv->rps.min_delay << 16);
3570
3571 I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
3572 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
3573 I915_WRITE(GEN6_RP_UP_EI, 66000);
3574 I915_WRITE(GEN6_RP_DOWN_EI, 350000);
3575
3576 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10); 3921 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
3577 I915_WRITE(GEN6_RP_CONTROL,
3578 GEN6_RP_MEDIA_TURBO |
3579 GEN6_RP_MEDIA_HW_NORMAL_MODE |
3580 GEN6_RP_MEDIA_IS_GFX |
3581 GEN6_RP_ENABLE |
3582 GEN6_RP_UP_BUSY_AVG |
3583 (IS_HASWELL(dev) ? GEN7_RP_DOWN_IDLE_AVG : GEN6_RP_DOWN_IDLE_CONT));
3584 3922
3585 ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_MIN_FREQ_TABLE, 0); 3923 ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_MIN_FREQ_TABLE, 0);
3586 if (!ret) { 3924 if (!ret) {
@@ -3596,7 +3934,8 @@ static void gen6_enable_rps(struct drm_device *dev)
3596 DRM_DEBUG_DRIVER("Failed to set the min frequency\n"); 3934 DRM_DEBUG_DRIVER("Failed to set the min frequency\n");
3597 } 3935 }
3598 3936
3599 gen6_set_rps(dev_priv->dev, (gt_perf_status & 0xff00) >> 8); 3937 dev_priv->rps.power = HIGH_POWER; /* force a reset */
3938 gen6_set_rps(dev_priv->dev, dev_priv->rps.min_delay);
3600 3939
3601 gen6_enable_rps_interrupts(dev); 3940 gen6_enable_rps_interrupts(dev);
3602 3941
@@ -3624,23 +3963,28 @@ void gen6_update_ring_freq(struct drm_device *dev)
3624 unsigned int gpu_freq; 3963 unsigned int gpu_freq;
3625 unsigned int max_ia_freq, min_ring_freq; 3964 unsigned int max_ia_freq, min_ring_freq;
3626 int scaling_factor = 180; 3965 int scaling_factor = 180;
3966 struct cpufreq_policy *policy;
3627 3967
3628 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 3968 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
3629 3969
3630 max_ia_freq = cpufreq_quick_get_max(0); 3970 policy = cpufreq_cpu_get(0);
3631 /* 3971 if (policy) {
3632 * Default to measured freq if none found, PCU will ensure we don't go 3972 max_ia_freq = policy->cpuinfo.max_freq;
3633 * over 3973 cpufreq_cpu_put(policy);
3634 */ 3974 } else {
3635 if (!max_ia_freq) 3975 /*
3976 * Default to measured freq if none found, PCU will ensure we
3977 * don't go over
3978 */
3636 max_ia_freq = tsc_khz; 3979 max_ia_freq = tsc_khz;
3980 }
3637 3981
3638 /* Convert from kHz to MHz */ 3982 /* Convert from kHz to MHz */
3639 max_ia_freq /= 1000; 3983 max_ia_freq /= 1000;
3640 3984
3641 min_ring_freq = I915_READ(MCHBAR_MIRROR_BASE_SNB + DCLK); 3985 min_ring_freq = I915_READ(DCLK) & 0xf;
3642 /* convert DDR frequency from units of 133.3MHz to bandwidth */ 3986 /* convert DDR frequency from units of 266.6MHz to bandwidth */
3643 min_ring_freq = (2 * 4 * min_ring_freq + 2) / 3; 3987 min_ring_freq = mult_frac(min_ring_freq, 8, 3);
3644 3988
3645 /* 3989 /*
3646 * For each potential GPU frequency, load a ring frequency we'd like 3990 * For each potential GPU frequency, load a ring frequency we'd like
@@ -3652,8 +3996,11 @@ void gen6_update_ring_freq(struct drm_device *dev)
3652 int diff = dev_priv->rps.max_delay - gpu_freq; 3996 int diff = dev_priv->rps.max_delay - gpu_freq;
3653 unsigned int ia_freq = 0, ring_freq = 0; 3997 unsigned int ia_freq = 0, ring_freq = 0;
3654 3998
3655 if (IS_HASWELL(dev)) { 3999 if (INTEL_INFO(dev)->gen >= 8) {
3656 ring_freq = (gpu_freq * 5 + 3) / 4; 4000 /* max(2 * GT, DDR). NB: GT is 50MHz units */
4001 ring_freq = max(min_ring_freq, gpu_freq);
4002 } else if (IS_HASWELL(dev)) {
4003 ring_freq = mult_frac(gpu_freq, 5, 4);
3657 ring_freq = max(min_ring_freq, ring_freq); 4004 ring_freq = max(min_ring_freq, ring_freq);
3658 /* leave ia_freq as the default, chosen by cpufreq */ 4005 /* leave ia_freq as the default, chosen by cpufreq */
3659 } else { 4006 } else {
@@ -3709,24 +4056,6 @@ int valleyview_rps_min_freq(struct drm_i915_private *dev_priv)
3709 return vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM) & 0xff; 4056 return vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM) & 0xff;
3710} 4057}
3711 4058
3712static void vlv_rps_timer_work(struct work_struct *work)
3713{
3714 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
3715 rps.vlv_work.work);
3716
3717 /*
3718 * Timer fired, we must be idle. Drop to min voltage state.
3719 * Note: we use RPe here since it should match the
3720 * Vmin we were shooting for. That should give us better
3721 * perf when we come back out of RC6 than if we used the
3722 * min freq available.
3723 */
3724 mutex_lock(&dev_priv->rps.hw_lock);
3725 if (dev_priv->rps.cur_delay > dev_priv->rps.rpe_delay)
3726 valleyview_set_rps(dev_priv->dev, dev_priv->rps.rpe_delay);
3727 mutex_unlock(&dev_priv->rps.hw_lock);
3728}
3729
3730static void valleyview_setup_pctx(struct drm_device *dev) 4059static void valleyview_setup_pctx(struct drm_device *dev)
3731{ 4060{
3732 struct drm_i915_private *dev_priv = dev->dev_private; 4061 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3773,13 +4102,14 @@ static void valleyview_enable_rps(struct drm_device *dev)
3773{ 4102{
3774 struct drm_i915_private *dev_priv = dev->dev_private; 4103 struct drm_i915_private *dev_priv = dev->dev_private;
3775 struct intel_ring_buffer *ring; 4104 struct intel_ring_buffer *ring;
3776 u32 gtfifodbg, val; 4105 u32 gtfifodbg, val, rc6_mode = 0;
3777 int i; 4106 int i;
3778 4107
3779 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 4108 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
3780 4109
3781 if ((gtfifodbg = I915_READ(GTFIFODBG))) { 4110 if ((gtfifodbg = I915_READ(GTFIFODBG))) {
3782 DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg); 4111 DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
4112 gtfifodbg);
3783 I915_WRITE(GTFIFODBG, gtfifodbg); 4113 I915_WRITE(GTFIFODBG, gtfifodbg);
3784 } 4114 }
3785 4115
@@ -3812,9 +4142,16 @@ static void valleyview_enable_rps(struct drm_device *dev)
3812 I915_WRITE(GEN6_RC6_THRESHOLD, 0xc350); 4142 I915_WRITE(GEN6_RC6_THRESHOLD, 0xc350);
3813 4143
3814 /* allows RC6 residency counter to work */ 4144 /* allows RC6 residency counter to work */
3815 I915_WRITE(0x138104, _MASKED_BIT_ENABLE(0x3)); 4145 I915_WRITE(VLV_COUNTER_CONTROL,
3816 I915_WRITE(GEN6_RC_CONTROL, 4146 _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
3817 GEN7_RC_CTL_TO_MODE); 4147 VLV_MEDIA_RC6_COUNT_EN |
4148 VLV_RENDER_RC6_COUNT_EN));
4149 if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
4150 rc6_mode = GEN7_RC_CTL_TO_MODE;
4151
4152 intel_print_rc6_info(dev, rc6_mode);
4153
4154 I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
3818 4155
3819 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); 4156 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
3820 switch ((val >> 6) & 3) { 4157 switch ((val >> 6) & 3) {
@@ -3985,6 +4322,8 @@ static void ironlake_enable_rc6(struct drm_device *dev)
3985 4322
3986 I915_WRITE(PWRCTXA, i915_gem_obj_ggtt_offset(dev_priv->ips.pwrctx) | PWRCTX_EN); 4323 I915_WRITE(PWRCTXA, i915_gem_obj_ggtt_offset(dev_priv->ips.pwrctx) | PWRCTX_EN);
3987 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT); 4324 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
4325
4326 intel_print_rc6_info(dev, INTEL_RC6_ENABLE);
3988} 4327}
3989 4328
3990static unsigned long intel_pxfreq(u32 vidfreq) 4329static unsigned long intel_pxfreq(u32 vidfreq)
@@ -4603,13 +4942,12 @@ void intel_disable_gt_powersave(struct drm_device *dev)
4603 } else if (INTEL_INFO(dev)->gen >= 6) { 4942 } else if (INTEL_INFO(dev)->gen >= 6) {
4604 cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work); 4943 cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work);
4605 cancel_work_sync(&dev_priv->rps.work); 4944 cancel_work_sync(&dev_priv->rps.work);
4606 if (IS_VALLEYVIEW(dev))
4607 cancel_delayed_work_sync(&dev_priv->rps.vlv_work);
4608 mutex_lock(&dev_priv->rps.hw_lock); 4945 mutex_lock(&dev_priv->rps.hw_lock);
4609 if (IS_VALLEYVIEW(dev)) 4946 if (IS_VALLEYVIEW(dev))
4610 valleyview_disable_rps(dev); 4947 valleyview_disable_rps(dev);
4611 else 4948 else
4612 gen6_disable_rps(dev); 4949 gen6_disable_rps(dev);
4950 dev_priv->rps.enabled = false;
4613 mutex_unlock(&dev_priv->rps.hw_lock); 4951 mutex_unlock(&dev_priv->rps.hw_lock);
4614 } 4952 }
4615} 4953}
@@ -4625,10 +4963,14 @@ static void intel_gen6_powersave_work(struct work_struct *work)
4625 4963
4626 if (IS_VALLEYVIEW(dev)) { 4964 if (IS_VALLEYVIEW(dev)) {
4627 valleyview_enable_rps(dev); 4965 valleyview_enable_rps(dev);
4966 } else if (IS_BROADWELL(dev)) {
4967 gen8_enable_rps(dev);
4968 gen6_update_ring_freq(dev);
4628 } else { 4969 } else {
4629 gen6_enable_rps(dev); 4970 gen6_enable_rps(dev);
4630 gen6_update_ring_freq(dev); 4971 gen6_update_ring_freq(dev);
4631 } 4972 }
4973 dev_priv->rps.enabled = true;
4632 mutex_unlock(&dev_priv->rps.hw_lock); 4974 mutex_unlock(&dev_priv->rps.hw_lock);
4633} 4975}
4634 4976
@@ -4672,7 +5014,7 @@ static void g4x_disable_trickle_feed(struct drm_device *dev)
4672 I915_WRITE(DSPCNTR(pipe), 5014 I915_WRITE(DSPCNTR(pipe),
4673 I915_READ(DSPCNTR(pipe)) | 5015 I915_READ(DSPCNTR(pipe)) |
4674 DISPPLANE_TRICKLE_FEED_DISABLE); 5016 DISPPLANE_TRICKLE_FEED_DISABLE);
4675 intel_flush_display_plane(dev_priv, pipe); 5017 intel_flush_primary_plane(dev_priv, pipe);
4676 } 5018 }
4677} 5019}
4678 5020
@@ -4932,6 +5274,50 @@ static void lpt_suspend_hw(struct drm_device *dev)
4932 } 5274 }
4933} 5275}
4934 5276
5277static void gen8_init_clock_gating(struct drm_device *dev)
5278{
5279 struct drm_i915_private *dev_priv = dev->dev_private;
5280 enum pipe i;
5281
5282 I915_WRITE(WM3_LP_ILK, 0);
5283 I915_WRITE(WM2_LP_ILK, 0);
5284 I915_WRITE(WM1_LP_ILK, 0);
5285
5286 /* FIXME(BDW): Check all the w/a, some might only apply to
5287 * pre-production hw. */
5288
5289 WARN(!i915_preliminary_hw_support,
5290 "GEN8_CENTROID_PIXEL_OPT_DIS not be needed for production\n");
5291 I915_WRITE(HALF_SLICE_CHICKEN3,
5292 _MASKED_BIT_ENABLE(GEN8_CENTROID_PIXEL_OPT_DIS));
5293 I915_WRITE(HALF_SLICE_CHICKEN3,
5294 _MASKED_BIT_ENABLE(GEN8_SAMPLER_POWER_BYPASS_DIS));
5295 I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_BWGTLB_DISABLE));
5296
5297 I915_WRITE(_3D_CHICKEN3,
5298 _3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(2));
5299
5300 I915_WRITE(COMMON_SLICE_CHICKEN2,
5301 _MASKED_BIT_ENABLE(GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE));
5302
5303 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
5304 _MASKED_BIT_ENABLE(GEN7_SINGLE_SUBSCAN_DISPATCH_ENABLE));
5305
5306 /* WaSwitchSolVfFArbitrationPriority */
5307 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
5308
5309 /* WaPsrDPAMaskVBlankInSRD */
5310 I915_WRITE(CHICKEN_PAR1_1,
5311 I915_READ(CHICKEN_PAR1_1) | DPA_MASK_VBLANK_SRD);
5312
5313 /* WaPsrDPRSUnmaskVBlankInSRD */
5314 for_each_pipe(i) {
5315 I915_WRITE(CHICKEN_PIPESL_1(i),
5316 I915_READ(CHICKEN_PIPESL_1(i) |
5317 DPRS_MASK_VBLANK_SRD));
5318 }
5319}
5320
4935static void haswell_init_clock_gating(struct drm_device *dev) 5321static void haswell_init_clock_gating(struct drm_device *dev)
4936{ 5322{
4937 struct drm_i915_private *dev_priv = dev->dev_private; 5323 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -5255,6 +5641,25 @@ void intel_suspend_hw(struct drm_device *dev)
5255 lpt_suspend_hw(dev); 5641 lpt_suspend_hw(dev);
5256} 5642}
5257 5643
5644static bool is_always_on_power_domain(struct drm_device *dev,
5645 enum intel_display_power_domain domain)
5646{
5647 unsigned long always_on_domains;
5648
5649 BUG_ON(BIT(domain) & ~POWER_DOMAIN_MASK);
5650
5651 if (IS_BROADWELL(dev)) {
5652 always_on_domains = BDW_ALWAYS_ON_POWER_DOMAINS;
5653 } else if (IS_HASWELL(dev)) {
5654 always_on_domains = HSW_ALWAYS_ON_POWER_DOMAINS;
5655 } else {
5656 WARN_ON(1);
5657 return true;
5658 }
5659
5660 return BIT(domain) & always_on_domains;
5661}
5662
5258/** 5663/**
5259 * We should only use the power well if we explicitly asked the hardware to 5664 * We should only use the power well if we explicitly asked the hardware to
5260 * enable it, so check if it's enabled and also check if we've requested it to 5665 * enable it, so check if it's enabled and also check if we've requested it to
@@ -5268,23 +5673,11 @@ bool intel_display_power_enabled(struct drm_device *dev,
5268 if (!HAS_POWER_WELL(dev)) 5673 if (!HAS_POWER_WELL(dev))
5269 return true; 5674 return true;
5270 5675
5271 switch (domain) { 5676 if (is_always_on_power_domain(dev, domain))
5272 case POWER_DOMAIN_PIPE_A:
5273 case POWER_DOMAIN_TRANSCODER_EDP:
5274 return true; 5677 return true;
5275 case POWER_DOMAIN_PIPE_B: 5678
5276 case POWER_DOMAIN_PIPE_C: 5679 return I915_READ(HSW_PWR_WELL_DRIVER) ==
5277 case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
5278 case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
5279 case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
5280 case POWER_DOMAIN_TRANSCODER_A:
5281 case POWER_DOMAIN_TRANSCODER_B:
5282 case POWER_DOMAIN_TRANSCODER_C:
5283 return I915_READ(HSW_PWR_WELL_DRIVER) ==
5284 (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED); 5680 (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED);
5285 default:
5286 BUG();
5287 }
5288} 5681}
5289 5682
5290static void __intel_set_power_well(struct drm_device *dev, bool enable) 5683static void __intel_set_power_well(struct drm_device *dev, bool enable)
@@ -5328,83 +5721,136 @@ static void __intel_set_power_well(struct drm_device *dev, bool enable)
5328 spin_lock_irqsave(&dev->vbl_lock, irqflags); 5721 spin_lock_irqsave(&dev->vbl_lock, irqflags);
5329 for_each_pipe(p) 5722 for_each_pipe(p)
5330 if (p != PIPE_A) 5723 if (p != PIPE_A)
5331 dev->last_vblank[p] = 0; 5724 dev->vblank[p].last = 0;
5332 spin_unlock_irqrestore(&dev->vbl_lock, irqflags); 5725 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
5333 } 5726 }
5334 } 5727 }
5335} 5728}
5336 5729
5337static struct i915_power_well *hsw_pwr; 5730static void __intel_power_well_get(struct drm_device *dev,
5731 struct i915_power_well *power_well)
5732{
5733 if (!power_well->count++)
5734 __intel_set_power_well(dev, true);
5735}
5736
5737static void __intel_power_well_put(struct drm_device *dev,
5738 struct i915_power_well *power_well)
5739{
5740 WARN_ON(!power_well->count);
5741 if (!--power_well->count && i915_disable_power_well)
5742 __intel_set_power_well(dev, false);
5743}
5744
5745void intel_display_power_get(struct drm_device *dev,
5746 enum intel_display_power_domain domain)
5747{
5748 struct drm_i915_private *dev_priv = dev->dev_private;
5749 struct i915_power_domains *power_domains;
5750
5751 if (!HAS_POWER_WELL(dev))
5752 return;
5753
5754 if (is_always_on_power_domain(dev, domain))
5755 return;
5756
5757 power_domains = &dev_priv->power_domains;
5758
5759 mutex_lock(&power_domains->lock);
5760 __intel_power_well_get(dev, &power_domains->power_wells[0]);
5761 mutex_unlock(&power_domains->lock);
5762}
5763
5764void intel_display_power_put(struct drm_device *dev,
5765 enum intel_display_power_domain domain)
5766{
5767 struct drm_i915_private *dev_priv = dev->dev_private;
5768 struct i915_power_domains *power_domains;
5769
5770 if (!HAS_POWER_WELL(dev))
5771 return;
5772
5773 if (is_always_on_power_domain(dev, domain))
5774 return;
5775
5776 power_domains = &dev_priv->power_domains;
5777
5778 mutex_lock(&power_domains->lock);
5779 __intel_power_well_put(dev, &power_domains->power_wells[0]);
5780 mutex_unlock(&power_domains->lock);
5781}
5782
5783static struct i915_power_domains *hsw_pwr;
5338 5784
5339/* Display audio driver power well request */ 5785/* Display audio driver power well request */
5340void i915_request_power_well(void) 5786void i915_request_power_well(void)
5341{ 5787{
5788 struct drm_i915_private *dev_priv;
5789
5342 if (WARN_ON(!hsw_pwr)) 5790 if (WARN_ON(!hsw_pwr))
5343 return; 5791 return;
5344 5792
5345 spin_lock_irq(&hsw_pwr->lock); 5793 dev_priv = container_of(hsw_pwr, struct drm_i915_private,
5346 if (!hsw_pwr->count++ && 5794 power_domains);
5347 !hsw_pwr->i915_request) 5795
5348 __intel_set_power_well(hsw_pwr->device, true); 5796 mutex_lock(&hsw_pwr->lock);
5349 spin_unlock_irq(&hsw_pwr->lock); 5797 __intel_power_well_get(dev_priv->dev, &hsw_pwr->power_wells[0]);
5798 mutex_unlock(&hsw_pwr->lock);
5350} 5799}
5351EXPORT_SYMBOL_GPL(i915_request_power_well); 5800EXPORT_SYMBOL_GPL(i915_request_power_well);
5352 5801
5353/* Display audio driver power well release */ 5802/* Display audio driver power well release */
5354void i915_release_power_well(void) 5803void i915_release_power_well(void)
5355{ 5804{
5805 struct drm_i915_private *dev_priv;
5806
5356 if (WARN_ON(!hsw_pwr)) 5807 if (WARN_ON(!hsw_pwr))
5357 return; 5808 return;
5358 5809
5359 spin_lock_irq(&hsw_pwr->lock); 5810 dev_priv = container_of(hsw_pwr, struct drm_i915_private,
5360 WARN_ON(!hsw_pwr->count); 5811 power_domains);
5361 if (!--hsw_pwr->count && 5812
5362 !hsw_pwr->i915_request) 5813 mutex_lock(&hsw_pwr->lock);
5363 __intel_set_power_well(hsw_pwr->device, false); 5814 __intel_power_well_put(dev_priv->dev, &hsw_pwr->power_wells[0]);
5364 spin_unlock_irq(&hsw_pwr->lock); 5815 mutex_unlock(&hsw_pwr->lock);
5365} 5816}
5366EXPORT_SYMBOL_GPL(i915_release_power_well); 5817EXPORT_SYMBOL_GPL(i915_release_power_well);
5367 5818
5368int i915_init_power_well(struct drm_device *dev) 5819int intel_power_domains_init(struct drm_device *dev)
5369{ 5820{
5370 struct drm_i915_private *dev_priv = dev->dev_private; 5821 struct drm_i915_private *dev_priv = dev->dev_private;
5822 struct i915_power_domains *power_domains = &dev_priv->power_domains;
5823 struct i915_power_well *power_well;
5371 5824
5372 hsw_pwr = &dev_priv->power_well; 5825 mutex_init(&power_domains->lock);
5826 hsw_pwr = power_domains;
5373 5827
5374 hsw_pwr->device = dev; 5828 power_well = &power_domains->power_wells[0];
5375 spin_lock_init(&hsw_pwr->lock); 5829 power_well->count = 0;
5376 hsw_pwr->count = 0;
5377 5830
5378 return 0; 5831 return 0;
5379} 5832}
5380 5833
5381void i915_remove_power_well(struct drm_device *dev) 5834void intel_power_domains_remove(struct drm_device *dev)
5382{ 5835{
5383 hsw_pwr = NULL; 5836 hsw_pwr = NULL;
5384} 5837}
5385 5838
5386void intel_set_power_well(struct drm_device *dev, bool enable) 5839static void intel_power_domains_resume(struct drm_device *dev)
5387{ 5840{
5388 struct drm_i915_private *dev_priv = dev->dev_private; 5841 struct drm_i915_private *dev_priv = dev->dev_private;
5389 struct i915_power_well *power_well = &dev_priv->power_well; 5842 struct i915_power_domains *power_domains = &dev_priv->power_domains;
5843 struct i915_power_well *power_well;
5390 5844
5391 if (!HAS_POWER_WELL(dev)) 5845 if (!HAS_POWER_WELL(dev))
5392 return; 5846 return;
5393 5847
5394 if (!i915_disable_power_well && !enable) 5848 mutex_lock(&power_domains->lock);
5395 return;
5396 5849
5397 spin_lock_irq(&power_well->lock); 5850 power_well = &power_domains->power_wells[0];
5398 power_well->i915_request = enable; 5851 __intel_set_power_well(dev, power_well->count > 0);
5399 5852
5400 /* only reject "disable" power well request */ 5853 mutex_unlock(&power_domains->lock);
5401 if (power_well->count && !enable) {
5402 spin_unlock_irq(&power_well->lock);
5403 return;
5404 }
5405
5406 __intel_set_power_well(dev, enable);
5407 spin_unlock_irq(&power_well->lock);
5408} 5854}
5409 5855
5410/* 5856/*
@@ -5413,7 +5859,7 @@ void intel_set_power_well(struct drm_device *dev, bool enable)
5413 * to be enabled, and it will only be disabled if none of the registers is 5859 * to be enabled, and it will only be disabled if none of the registers is
5414 * requesting it to be enabled. 5860 * requesting it to be enabled.
5415 */ 5861 */
5416void intel_init_power_well(struct drm_device *dev) 5862void intel_power_domains_init_hw(struct drm_device *dev)
5417{ 5863{
5418 struct drm_i915_private *dev_priv = dev->dev_private; 5864 struct drm_i915_private *dev_priv = dev->dev_private;
5419 5865
@@ -5421,7 +5867,8 @@ void intel_init_power_well(struct drm_device *dev)
5421 return; 5867 return;
5422 5868
5423 /* For now, we need the power well to be always enabled. */ 5869 /* For now, we need the power well to be always enabled. */
5424 intel_set_power_well(dev, true); 5870 intel_display_set_init_power(dev, true);
5871 intel_power_domains_resume(dev);
5425 5872
5426 /* We're taking over the BIOS, so clear any requests made by it since 5873 /* We're taking over the BIOS, so clear any requests made by it since
5427 * the driver is in charge now. */ 5874 * the driver is in charge now. */
@@ -5525,6 +5972,8 @@ void intel_init_pm(struct drm_device *dev)
5525 dev_priv->display.update_wm = NULL; 5972 dev_priv->display.update_wm = NULL;
5526 } 5973 }
5527 dev_priv->display.init_clock_gating = haswell_init_clock_gating; 5974 dev_priv->display.init_clock_gating = haswell_init_clock_gating;
5975 } else if (INTEL_INFO(dev)->gen == 8) {
5976 dev_priv->display.init_clock_gating = gen8_init_clock_gating;
5528 } else 5977 } else
5529 dev_priv->display.update_wm = NULL; 5978 dev_priv->display.update_wm = NULL;
5530 } else if (IS_VALLEYVIEW(dev)) { 5979 } else if (IS_VALLEYVIEW(dev)) {
@@ -5686,7 +6135,4 @@ void intel_pm_init(struct drm_device *dev)
5686 6135
5687 INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work, 6136 INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
5688 intel_gen6_powersave_work); 6137 intel_gen6_powersave_work);
5689
5690 INIT_DELAYED_WORK(&dev_priv->rps.vlv_work, vlv_rps_timer_work);
5691} 6138}
5692
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 460ee1026fca..b620337e6d67 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -41,6 +41,16 @@ static inline int ring_space(struct intel_ring_buffer *ring)
41 return space; 41 return space;
42} 42}
43 43
44void __intel_ring_advance(struct intel_ring_buffer *ring)
45{
46 struct drm_i915_private *dev_priv = ring->dev->dev_private;
47
48 ring->tail &= ring->size - 1;
49 if (dev_priv->gpu_error.stop_rings & intel_ring_flag(ring))
50 return;
51 ring->write_tail(ring, ring->tail);
52}
53
44static int 54static int
45gen2_render_ring_flush(struct intel_ring_buffer *ring, 55gen2_render_ring_flush(struct intel_ring_buffer *ring,
46 u32 invalidate_domains, 56 u32 invalidate_domains,
@@ -350,6 +360,47 @@ gen7_render_ring_flush(struct intel_ring_buffer *ring,
350 return 0; 360 return 0;
351} 361}
352 362
363static int
364gen8_render_ring_flush(struct intel_ring_buffer *ring,
365 u32 invalidate_domains, u32 flush_domains)
366{
367 u32 flags = 0;
368 u32 scratch_addr = ring->scratch.gtt_offset + 128;
369 int ret;
370
371 flags |= PIPE_CONTROL_CS_STALL;
372
373 if (flush_domains) {
374 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
375 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
376 }
377 if (invalidate_domains) {
378 flags |= PIPE_CONTROL_TLB_INVALIDATE;
379 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
380 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
381 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
382 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
383 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
384 flags |= PIPE_CONTROL_QW_WRITE;
385 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
386 }
387
388 ret = intel_ring_begin(ring, 6);
389 if (ret)
390 return ret;
391
392 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
393 intel_ring_emit(ring, flags);
394 intel_ring_emit(ring, scratch_addr);
395 intel_ring_emit(ring, 0);
396 intel_ring_emit(ring, 0);
397 intel_ring_emit(ring, 0);
398 intel_ring_advance(ring);
399
400 return 0;
401
402}
403
353static void ring_write_tail(struct intel_ring_buffer *ring, 404static void ring_write_tail(struct intel_ring_buffer *ring,
354 u32 value) 405 u32 value)
355{ 406{
@@ -385,8 +436,7 @@ static int init_ring_common(struct intel_ring_buffer *ring)
385 int ret = 0; 436 int ret = 0;
386 u32 head; 437 u32 head;
387 438
388 if (HAS_FORCE_WAKE(dev)) 439 gen6_gt_force_wake_get(dev_priv);
389 gen6_gt_force_wake_get(dev_priv);
390 440
391 if (I915_NEED_GFX_HWS(dev)) 441 if (I915_NEED_GFX_HWS(dev))
392 intel_ring_setup_status_page(ring); 442 intel_ring_setup_status_page(ring);
@@ -459,8 +509,7 @@ static int init_ring_common(struct intel_ring_buffer *ring)
459 memset(&ring->hangcheck, 0, sizeof(ring->hangcheck)); 509 memset(&ring->hangcheck, 0, sizeof(ring->hangcheck));
460 510
461out: 511out:
462 if (HAS_FORCE_WAKE(dev)) 512 gen6_gt_force_wake_put(dev_priv);
463 gen6_gt_force_wake_put(dev_priv);
464 513
465 return ret; 514 return ret;
466} 515}
@@ -559,8 +608,8 @@ static int init_render_ring(struct intel_ring_buffer *ring)
559 if (INTEL_INFO(dev)->gen >= 6) 608 if (INTEL_INFO(dev)->gen >= 6)
560 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING)); 609 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
561 610
562 if (HAS_L3_GPU_CACHE(dev)) 611 if (HAS_L3_DPF(dev))
563 I915_WRITE_IMR(ring, ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT); 612 I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev));
564 613
565 return ret; 614 return ret;
566} 615}
@@ -593,7 +642,7 @@ update_mboxes(struct intel_ring_buffer *ring,
593#define MBOX_UPDATE_DWORDS 4 642#define MBOX_UPDATE_DWORDS 4
594 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); 643 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
595 intel_ring_emit(ring, mmio_offset); 644 intel_ring_emit(ring, mmio_offset);
596 intel_ring_emit(ring, ring->outstanding_lazy_request); 645 intel_ring_emit(ring, ring->outstanding_lazy_seqno);
597 intel_ring_emit(ring, MI_NOOP); 646 intel_ring_emit(ring, MI_NOOP);
598} 647}
599 648
@@ -629,9 +678,9 @@ gen6_add_request(struct intel_ring_buffer *ring)
629 678
630 intel_ring_emit(ring, MI_STORE_DWORD_INDEX); 679 intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
631 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); 680 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
632 intel_ring_emit(ring, ring->outstanding_lazy_request); 681 intel_ring_emit(ring, ring->outstanding_lazy_seqno);
633 intel_ring_emit(ring, MI_USER_INTERRUPT); 682 intel_ring_emit(ring, MI_USER_INTERRUPT);
634 intel_ring_advance(ring); 683 __intel_ring_advance(ring);
635 684
636 return 0; 685 return 0;
637} 686}
@@ -723,7 +772,7 @@ pc_render_add_request(struct intel_ring_buffer *ring)
723 PIPE_CONTROL_WRITE_FLUSH | 772 PIPE_CONTROL_WRITE_FLUSH |
724 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE); 773 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
725 intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT); 774 intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
726 intel_ring_emit(ring, ring->outstanding_lazy_request); 775 intel_ring_emit(ring, ring->outstanding_lazy_seqno);
727 intel_ring_emit(ring, 0); 776 intel_ring_emit(ring, 0);
728 PIPE_CONTROL_FLUSH(ring, scratch_addr); 777 PIPE_CONTROL_FLUSH(ring, scratch_addr);
729 scratch_addr += 128; /* write to separate cachelines */ 778 scratch_addr += 128; /* write to separate cachelines */
@@ -742,9 +791,9 @@ pc_render_add_request(struct intel_ring_buffer *ring)
742 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE | 791 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
743 PIPE_CONTROL_NOTIFY); 792 PIPE_CONTROL_NOTIFY);
744 intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT); 793 intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
745 intel_ring_emit(ring, ring->outstanding_lazy_request); 794 intel_ring_emit(ring, ring->outstanding_lazy_seqno);
746 intel_ring_emit(ring, 0); 795 intel_ring_emit(ring, 0);
747 intel_ring_advance(ring); 796 __intel_ring_advance(ring);
748 797
749 return 0; 798 return 0;
750} 799}
@@ -963,9 +1012,9 @@ i9xx_add_request(struct intel_ring_buffer *ring)
963 1012
964 intel_ring_emit(ring, MI_STORE_DWORD_INDEX); 1013 intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
965 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); 1014 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
966 intel_ring_emit(ring, ring->outstanding_lazy_request); 1015 intel_ring_emit(ring, ring->outstanding_lazy_seqno);
967 intel_ring_emit(ring, MI_USER_INTERRUPT); 1016 intel_ring_emit(ring, MI_USER_INTERRUPT);
968 intel_ring_advance(ring); 1017 __intel_ring_advance(ring);
969 1018
970 return 0; 1019 return 0;
971} 1020}
@@ -987,10 +1036,10 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring)
987 1036
988 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1037 spin_lock_irqsave(&dev_priv->irq_lock, flags);
989 if (ring->irq_refcount++ == 0) { 1038 if (ring->irq_refcount++ == 0) {
990 if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS) 1039 if (HAS_L3_DPF(dev) && ring->id == RCS)
991 I915_WRITE_IMR(ring, 1040 I915_WRITE_IMR(ring,
992 ~(ring->irq_enable_mask | 1041 ~(ring->irq_enable_mask |
993 GT_RENDER_L3_PARITY_ERROR_INTERRUPT)); 1042 GT_PARITY_ERROR(dev)));
994 else 1043 else
995 I915_WRITE_IMR(ring, ~ring->irq_enable_mask); 1044 I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
996 ilk_enable_gt_irq(dev_priv, ring->irq_enable_mask); 1045 ilk_enable_gt_irq(dev_priv, ring->irq_enable_mask);
@@ -1009,9 +1058,8 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring)
1009 1058
1010 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1059 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1011 if (--ring->irq_refcount == 0) { 1060 if (--ring->irq_refcount == 0) {
1012 if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS) 1061 if (HAS_L3_DPF(dev) && ring->id == RCS)
1013 I915_WRITE_IMR(ring, 1062 I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev));
1014 ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
1015 else 1063 else
1016 I915_WRITE_IMR(ring, ~0); 1064 I915_WRITE_IMR(ring, ~0);
1017 ilk_disable_gt_irq(dev_priv, ring->irq_enable_mask); 1065 ilk_disable_gt_irq(dev_priv, ring->irq_enable_mask);
@@ -1059,6 +1107,52 @@ hsw_vebox_put_irq(struct intel_ring_buffer *ring)
1059 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 1107 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1060} 1108}
1061 1109
1110static bool
1111gen8_ring_get_irq(struct intel_ring_buffer *ring)
1112{
1113 struct drm_device *dev = ring->dev;
1114 struct drm_i915_private *dev_priv = dev->dev_private;
1115 unsigned long flags;
1116
1117 if (!dev->irq_enabled)
1118 return false;
1119
1120 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1121 if (ring->irq_refcount++ == 0) {
1122 if (HAS_L3_DPF(dev) && ring->id == RCS) {
1123 I915_WRITE_IMR(ring,
1124 ~(ring->irq_enable_mask |
1125 GT_RENDER_L3_PARITY_ERROR_INTERRUPT));
1126 } else {
1127 I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
1128 }
1129 POSTING_READ(RING_IMR(ring->mmio_base));
1130 }
1131 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1132
1133 return true;
1134}
1135
1136static void
1137gen8_ring_put_irq(struct intel_ring_buffer *ring)
1138{
1139 struct drm_device *dev = ring->dev;
1140 struct drm_i915_private *dev_priv = dev->dev_private;
1141 unsigned long flags;
1142
1143 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1144 if (--ring->irq_refcount == 0) {
1145 if (HAS_L3_DPF(dev) && ring->id == RCS) {
1146 I915_WRITE_IMR(ring,
1147 ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
1148 } else {
1149 I915_WRITE_IMR(ring, ~0);
1150 }
1151 POSTING_READ(RING_IMR(ring->mmio_base));
1152 }
1153 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1154}
1155
1062static int 1156static int
1063i965_dispatch_execbuffer(struct intel_ring_buffer *ring, 1157i965_dispatch_execbuffer(struct intel_ring_buffer *ring,
1064 u32 offset, u32 length, 1158 u32 offset, u32 length,
@@ -1317,7 +1411,7 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
1317 /* Disable the ring buffer. The ring must be idle at this point */ 1411 /* Disable the ring buffer. The ring must be idle at this point */
1318 dev_priv = ring->dev->dev_private; 1412 dev_priv = ring->dev->dev_private;
1319 ret = intel_ring_idle(ring); 1413 ret = intel_ring_idle(ring);
1320 if (ret) 1414 if (ret && !i915_reset_in_progress(&dev_priv->gpu_error))
1321 DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n", 1415 DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
1322 ring->name, ret); 1416 ring->name, ret);
1323 1417
@@ -1328,6 +1422,8 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
1328 i915_gem_object_unpin(ring->obj); 1422 i915_gem_object_unpin(ring->obj);
1329 drm_gem_object_unreference(&ring->obj->base); 1423 drm_gem_object_unreference(&ring->obj->base);
1330 ring->obj = NULL; 1424 ring->obj = NULL;
1425 ring->preallocated_lazy_request = NULL;
1426 ring->outstanding_lazy_seqno = 0;
1331 1427
1332 if (ring->cleanup) 1428 if (ring->cleanup)
1333 ring->cleanup(ring); 1429 ring->cleanup(ring);
@@ -1414,6 +1510,9 @@ static int ring_wait_for_space(struct intel_ring_buffer *ring, int n)
1414 if (ret != -ENOSPC) 1510 if (ret != -ENOSPC)
1415 return ret; 1511 return ret;
1416 1512
1513 /* force the tail write in case we have been skipping them */
1514 __intel_ring_advance(ring);
1515
1417 trace_i915_ring_wait_begin(ring); 1516 trace_i915_ring_wait_begin(ring);
1418 /* With GEM the hangcheck timer should kick us out of the loop, 1517 /* With GEM the hangcheck timer should kick us out of the loop,
1419 * leaving it early runs the risk of corrupting GEM state (due 1518 * leaving it early runs the risk of corrupting GEM state (due
@@ -1475,7 +1574,7 @@ int intel_ring_idle(struct intel_ring_buffer *ring)
1475 int ret; 1574 int ret;
1476 1575
1477 /* We need to add any requests required to flush the objects and ring */ 1576 /* We need to add any requests required to flush the objects and ring */
1478 if (ring->outstanding_lazy_request) { 1577 if (ring->outstanding_lazy_seqno) {
1479 ret = i915_add_request(ring, NULL); 1578 ret = i915_add_request(ring, NULL);
1480 if (ret) 1579 if (ret)
1481 return ret; 1580 return ret;
@@ -1495,10 +1594,20 @@ int intel_ring_idle(struct intel_ring_buffer *ring)
1495static int 1594static int
1496intel_ring_alloc_seqno(struct intel_ring_buffer *ring) 1595intel_ring_alloc_seqno(struct intel_ring_buffer *ring)
1497{ 1596{
1498 if (ring->outstanding_lazy_request) 1597 if (ring->outstanding_lazy_seqno)
1499 return 0; 1598 return 0;
1500 1599
1501 return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_request); 1600 if (ring->preallocated_lazy_request == NULL) {
1601 struct drm_i915_gem_request *request;
1602
1603 request = kmalloc(sizeof(*request), GFP_KERNEL);
1604 if (request == NULL)
1605 return -ENOMEM;
1606
1607 ring->preallocated_lazy_request = request;
1608 }
1609
1610 return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_seqno);
1502} 1611}
1503 1612
1504static int __intel_ring_begin(struct intel_ring_buffer *ring, 1613static int __intel_ring_begin(struct intel_ring_buffer *ring,
@@ -1545,7 +1654,7 @@ void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno)
1545{ 1654{
1546 struct drm_i915_private *dev_priv = ring->dev->dev_private; 1655 struct drm_i915_private *dev_priv = ring->dev->dev_private;
1547 1656
1548 BUG_ON(ring->outstanding_lazy_request); 1657 BUG_ON(ring->outstanding_lazy_seqno);
1549 1658
1550 if (INTEL_INFO(ring->dev)->gen >= 6) { 1659 if (INTEL_INFO(ring->dev)->gen >= 6) {
1551 I915_WRITE(RING_SYNC_0(ring->mmio_base), 0); 1660 I915_WRITE(RING_SYNC_0(ring->mmio_base), 0);
@@ -1558,17 +1667,6 @@ void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno)
1558 ring->hangcheck.seqno = seqno; 1667 ring->hangcheck.seqno = seqno;
1559} 1668}
1560 1669
1561void intel_ring_advance(struct intel_ring_buffer *ring)
1562{
1563 struct drm_i915_private *dev_priv = ring->dev->dev_private;
1564
1565 ring->tail &= ring->size - 1;
1566 if (dev_priv->gpu_error.stop_rings & intel_ring_flag(ring))
1567 return;
1568 ring->write_tail(ring, ring->tail);
1569}
1570
1571
1572static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring, 1670static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
1573 u32 value) 1671 u32 value)
1574{ 1672{
@@ -1613,6 +1711,8 @@ static int gen6_bsd_ring_flush(struct intel_ring_buffer *ring,
1613 return ret; 1711 return ret;
1614 1712
1615 cmd = MI_FLUSH_DW; 1713 cmd = MI_FLUSH_DW;
1714 if (INTEL_INFO(ring->dev)->gen >= 8)
1715 cmd += 1;
1616 /* 1716 /*
1617 * Bspec vol 1c.5 - video engine command streamer: 1717 * Bspec vol 1c.5 - video engine command streamer:
1618 * "If ENABLED, all TLBs will be invalidated once the flush 1718 * "If ENABLED, all TLBs will be invalidated once the flush
@@ -1624,9 +1724,38 @@ static int gen6_bsd_ring_flush(struct intel_ring_buffer *ring,
1624 MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW; 1724 MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
1625 intel_ring_emit(ring, cmd); 1725 intel_ring_emit(ring, cmd);
1626 intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT); 1726 intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
1727 if (INTEL_INFO(ring->dev)->gen >= 8) {
1728 intel_ring_emit(ring, 0); /* upper addr */
1729 intel_ring_emit(ring, 0); /* value */
1730 } else {
1731 intel_ring_emit(ring, 0);
1732 intel_ring_emit(ring, MI_NOOP);
1733 }
1734 intel_ring_advance(ring);
1735 return 0;
1736}
1737
1738static int
1739gen8_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
1740 u32 offset, u32 len,
1741 unsigned flags)
1742{
1743 struct drm_i915_private *dev_priv = ring->dev->dev_private;
1744 bool ppgtt = dev_priv->mm.aliasing_ppgtt != NULL &&
1745 !(flags & I915_DISPATCH_SECURE);
1746 int ret;
1747
1748 ret = intel_ring_begin(ring, 4);
1749 if (ret)
1750 return ret;
1751
1752 /* FIXME(BDW): Address space and security selectors. */
1753 intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8));
1754 intel_ring_emit(ring, offset);
1627 intel_ring_emit(ring, 0); 1755 intel_ring_emit(ring, 0);
1628 intel_ring_emit(ring, MI_NOOP); 1756 intel_ring_emit(ring, MI_NOOP);
1629 intel_ring_advance(ring); 1757 intel_ring_advance(ring);
1758
1630 return 0; 1759 return 0;
1631} 1760}
1632 1761
@@ -1686,6 +1815,8 @@ static int gen6_ring_flush(struct intel_ring_buffer *ring,
1686 return ret; 1815 return ret;
1687 1816
1688 cmd = MI_FLUSH_DW; 1817 cmd = MI_FLUSH_DW;
1818 if (INTEL_INFO(ring->dev)->gen >= 8)
1819 cmd += 1;
1689 /* 1820 /*
1690 * Bspec vol 1c.3 - blitter engine command streamer: 1821 * Bspec vol 1c.3 - blitter engine command streamer:
1691 * "If ENABLED, all TLBs will be invalidated once the flush 1822 * "If ENABLED, all TLBs will be invalidated once the flush
@@ -1697,8 +1828,13 @@ static int gen6_ring_flush(struct intel_ring_buffer *ring,
1697 MI_FLUSH_DW_OP_STOREDW; 1828 MI_FLUSH_DW_OP_STOREDW;
1698 intel_ring_emit(ring, cmd); 1829 intel_ring_emit(ring, cmd);
1699 intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT); 1830 intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
1700 intel_ring_emit(ring, 0); 1831 if (INTEL_INFO(ring->dev)->gen >= 8) {
1701 intel_ring_emit(ring, MI_NOOP); 1832 intel_ring_emit(ring, 0); /* upper addr */
1833 intel_ring_emit(ring, 0); /* value */
1834 } else {
1835 intel_ring_emit(ring, 0);
1836 intel_ring_emit(ring, MI_NOOP);
1837 }
1702 intel_ring_advance(ring); 1838 intel_ring_advance(ring);
1703 1839
1704 if (IS_GEN7(dev) && flush) 1840 if (IS_GEN7(dev) && flush)
@@ -1721,8 +1857,14 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
1721 ring->flush = gen7_render_ring_flush; 1857 ring->flush = gen7_render_ring_flush;
1722 if (INTEL_INFO(dev)->gen == 6) 1858 if (INTEL_INFO(dev)->gen == 6)
1723 ring->flush = gen6_render_ring_flush; 1859 ring->flush = gen6_render_ring_flush;
1724 ring->irq_get = gen6_ring_get_irq; 1860 if (INTEL_INFO(dev)->gen >= 8) {
1725 ring->irq_put = gen6_ring_put_irq; 1861 ring->flush = gen8_render_ring_flush;
1862 ring->irq_get = gen8_ring_get_irq;
1863 ring->irq_put = gen8_ring_put_irq;
1864 } else {
1865 ring->irq_get = gen6_ring_get_irq;
1866 ring->irq_put = gen6_ring_put_irq;
1867 }
1726 ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT; 1868 ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
1727 ring->get_seqno = gen6_ring_get_seqno; 1869 ring->get_seqno = gen6_ring_get_seqno;
1728 ring->set_seqno = ring_set_seqno; 1870 ring->set_seqno = ring_set_seqno;
@@ -1764,6 +1906,8 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
1764 ring->write_tail = ring_write_tail; 1906 ring->write_tail = ring_write_tail;
1765 if (IS_HASWELL(dev)) 1907 if (IS_HASWELL(dev))
1766 ring->dispatch_execbuffer = hsw_ring_dispatch_execbuffer; 1908 ring->dispatch_execbuffer = hsw_ring_dispatch_execbuffer;
1909 else if (IS_GEN8(dev))
1910 ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
1767 else if (INTEL_INFO(dev)->gen >= 6) 1911 else if (INTEL_INFO(dev)->gen >= 6)
1768 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; 1912 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
1769 else if (INTEL_INFO(dev)->gen >= 4) 1913 else if (INTEL_INFO(dev)->gen >= 4)
@@ -1877,7 +2021,7 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
1877 ring->id = VCS; 2021 ring->id = VCS;
1878 2022
1879 ring->write_tail = ring_write_tail; 2023 ring->write_tail = ring_write_tail;
1880 if (IS_GEN6(dev) || IS_GEN7(dev)) { 2024 if (INTEL_INFO(dev)->gen >= 6) {
1881 ring->mmio_base = GEN6_BSD_RING_BASE; 2025 ring->mmio_base = GEN6_BSD_RING_BASE;
1882 /* gen6 bsd needs a special wa for tail updates */ 2026 /* gen6 bsd needs a special wa for tail updates */
1883 if (IS_GEN6(dev)) 2027 if (IS_GEN6(dev))
@@ -1886,10 +2030,20 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
1886 ring->add_request = gen6_add_request; 2030 ring->add_request = gen6_add_request;
1887 ring->get_seqno = gen6_ring_get_seqno; 2031 ring->get_seqno = gen6_ring_get_seqno;
1888 ring->set_seqno = ring_set_seqno; 2032 ring->set_seqno = ring_set_seqno;
1889 ring->irq_enable_mask = GT_BSD_USER_INTERRUPT; 2033 if (INTEL_INFO(dev)->gen >= 8) {
1890 ring->irq_get = gen6_ring_get_irq; 2034 ring->irq_enable_mask =
1891 ring->irq_put = gen6_ring_put_irq; 2035 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
1892 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; 2036 ring->irq_get = gen8_ring_get_irq;
2037 ring->irq_put = gen8_ring_put_irq;
2038 ring->dispatch_execbuffer =
2039 gen8_ring_dispatch_execbuffer;
2040 } else {
2041 ring->irq_enable_mask = GT_BSD_USER_INTERRUPT;
2042 ring->irq_get = gen6_ring_get_irq;
2043 ring->irq_put = gen6_ring_put_irq;
2044 ring->dispatch_execbuffer =
2045 gen6_ring_dispatch_execbuffer;
2046 }
1893 ring->sync_to = gen6_ring_sync; 2047 ring->sync_to = gen6_ring_sync;
1894 ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_VR; 2048 ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_VR;
1895 ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_INVALID; 2049 ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_INVALID;
@@ -1935,10 +2089,18 @@ int intel_init_blt_ring_buffer(struct drm_device *dev)
1935 ring->add_request = gen6_add_request; 2089 ring->add_request = gen6_add_request;
1936 ring->get_seqno = gen6_ring_get_seqno; 2090 ring->get_seqno = gen6_ring_get_seqno;
1937 ring->set_seqno = ring_set_seqno; 2091 ring->set_seqno = ring_set_seqno;
1938 ring->irq_enable_mask = GT_BLT_USER_INTERRUPT; 2092 if (INTEL_INFO(dev)->gen >= 8) {
1939 ring->irq_get = gen6_ring_get_irq; 2093 ring->irq_enable_mask =
1940 ring->irq_put = gen6_ring_put_irq; 2094 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
1941 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; 2095 ring->irq_get = gen8_ring_get_irq;
2096 ring->irq_put = gen8_ring_put_irq;
2097 ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
2098 } else {
2099 ring->irq_enable_mask = GT_BLT_USER_INTERRUPT;
2100 ring->irq_get = gen6_ring_get_irq;
2101 ring->irq_put = gen6_ring_put_irq;
2102 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
2103 }
1942 ring->sync_to = gen6_ring_sync; 2104 ring->sync_to = gen6_ring_sync;
1943 ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_BR; 2105 ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_BR;
1944 ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_BV; 2106 ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_BV;
@@ -1967,10 +2129,19 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev)
1967 ring->add_request = gen6_add_request; 2129 ring->add_request = gen6_add_request;
1968 ring->get_seqno = gen6_ring_get_seqno; 2130 ring->get_seqno = gen6_ring_get_seqno;
1969 ring->set_seqno = ring_set_seqno; 2131 ring->set_seqno = ring_set_seqno;
1970 ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT; 2132
1971 ring->irq_get = hsw_vebox_get_irq; 2133 if (INTEL_INFO(dev)->gen >= 8) {
1972 ring->irq_put = hsw_vebox_put_irq; 2134 ring->irq_enable_mask =
1973 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; 2135 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT;
2136 ring->irq_get = gen8_ring_get_irq;
2137 ring->irq_put = gen8_ring_put_irq;
2138 ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
2139 } else {
2140 ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
2141 ring->irq_get = hsw_vebox_get_irq;
2142 ring->irq_put = hsw_vebox_put_irq;
2143 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
2144 }
1974 ring->sync_to = gen6_ring_sync; 2145 ring->sync_to = gen6_ring_sync;
1975 ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_VER; 2146 ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_VER;
1976 ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_VEV; 2147 ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_VEV;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 68b1ca974d59..71a73f4fe252 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -34,6 +34,7 @@ struct intel_hw_status_page {
34#define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val) 34#define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val)
35 35
36enum intel_ring_hangcheck_action { 36enum intel_ring_hangcheck_action {
37 HANGCHECK_IDLE = 0,
37 HANGCHECK_WAIT, 38 HANGCHECK_WAIT,
38 HANGCHECK_ACTIVE, 39 HANGCHECK_ACTIVE,
39 HANGCHECK_KICK, 40 HANGCHECK_KICK,
@@ -140,7 +141,8 @@ struct intel_ring_buffer {
140 /** 141 /**
141 * Do we have some not yet emitted requests outstanding? 142 * Do we have some not yet emitted requests outstanding?
142 */ 143 */
143 u32 outstanding_lazy_request; 144 struct drm_i915_gem_request *preallocated_lazy_request;
145 u32 outstanding_lazy_seqno;
144 bool gpu_caches_dirty; 146 bool gpu_caches_dirty;
145 bool fbc_dirty; 147 bool fbc_dirty;
146 148
@@ -237,7 +239,12 @@ static inline void intel_ring_emit(struct intel_ring_buffer *ring,
237 iowrite32(data, ring->virtual_start + ring->tail); 239 iowrite32(data, ring->virtual_start + ring->tail);
238 ring->tail += 4; 240 ring->tail += 4;
239} 241}
240void intel_ring_advance(struct intel_ring_buffer *ring); 242static inline void intel_ring_advance(struct intel_ring_buffer *ring)
243{
244 ring->tail &= ring->size - 1;
245}
246void __intel_ring_advance(struct intel_ring_buffer *ring);
247
241int __must_check intel_ring_idle(struct intel_ring_buffer *ring); 248int __must_check intel_ring_idle(struct intel_ring_buffer *ring);
242void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno); 249void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno);
243int intel_ring_flush_all_caches(struct intel_ring_buffer *ring); 250int intel_ring_flush_all_caches(struct intel_ring_buffer *ring);
@@ -258,8 +265,8 @@ static inline u32 intel_ring_get_tail(struct intel_ring_buffer *ring)
258 265
259static inline u32 intel_ring_get_seqno(struct intel_ring_buffer *ring) 266static inline u32 intel_ring_get_seqno(struct intel_ring_buffer *ring)
260{ 267{
261 BUG_ON(ring->outstanding_lazy_request == 0); 268 BUG_ON(ring->outstanding_lazy_seqno == 0);
262 return ring->outstanding_lazy_request; 269 return ring->outstanding_lazy_seqno;
263} 270}
264 271
265static inline void i915_trace_irq_get(struct intel_ring_buffer *ring, u32 seqno) 272static inline void i915_trace_irq_get(struct intel_ring_buffer *ring, u32 seqno)
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 49482fd5b76c..a583e8f718a7 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -539,7 +539,7 @@ static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
539 goto log_fail; 539 goto log_fail;
540 540
541 while ((status == SDVO_CMD_STATUS_PENDING || 541 while ((status == SDVO_CMD_STATUS_PENDING ||
542 status == SDVO_CMD_STATUS_TARGET_NOT_SPECIFIED) && --retry) { 542 status == SDVO_CMD_STATUS_TARGET_NOT_SPECIFIED) && --retry) {
543 if (retry < 10) 543 if (retry < 10)
544 msleep(15); 544 msleep(15);
545 else 545 else
@@ -1068,7 +1068,7 @@ intel_sdvo_get_preferred_input_mode(struct intel_sdvo *intel_sdvo,
1068 1068
1069static void i9xx_adjust_sdvo_tv_clock(struct intel_crtc_config *pipe_config) 1069static void i9xx_adjust_sdvo_tv_clock(struct intel_crtc_config *pipe_config)
1070{ 1070{
1071 unsigned dotclock = pipe_config->adjusted_mode.clock; 1071 unsigned dotclock = pipe_config->port_clock;
1072 struct dpll *clock = &pipe_config->dpll; 1072 struct dpll *clock = &pipe_config->dpll;
1073 1073
1074 /* SDVO TV has fixed PLL values depend on its clock range, 1074 /* SDVO TV has fixed PLL values depend on its clock range,
@@ -1133,7 +1133,6 @@ static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
1133 */ 1133 */
1134 pipe_config->pixel_multiplier = 1134 pipe_config->pixel_multiplier =
1135 intel_sdvo_get_pixel_multiplier(adjusted_mode); 1135 intel_sdvo_get_pixel_multiplier(adjusted_mode);
1136 adjusted_mode->clock *= pipe_config->pixel_multiplier;
1137 1136
1138 if (intel_sdvo->color_range_auto) { 1137 if (intel_sdvo->color_range_auto) {
1139 /* See CEA-861-E - 5.1 Default Encoding Parameters */ 1138 /* See CEA-861-E - 5.1 Default Encoding Parameters */
@@ -1217,11 +1216,7 @@ static void intel_sdvo_mode_set(struct intel_encoder *intel_encoder)
1217 !intel_sdvo_set_tv_format(intel_sdvo)) 1216 !intel_sdvo_set_tv_format(intel_sdvo))
1218 return; 1217 return;
1219 1218
1220 /* We have tried to get input timing in mode_fixup, and filled into
1221 * adjusted_mode.
1222 */
1223 intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode); 1219 intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode);
1224 input_dtd.part1.clock /= crtc->config.pixel_multiplier;
1225 1220
1226 if (intel_sdvo->is_tv || intel_sdvo->is_lvds) 1221 if (intel_sdvo->is_tv || intel_sdvo->is_lvds)
1227 input_dtd.part2.sdvo_flags = intel_sdvo->dtd_sdvo_flags; 1222 input_dtd.part2.sdvo_flags = intel_sdvo->dtd_sdvo_flags;
@@ -1330,6 +1325,7 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
1330 struct intel_sdvo *intel_sdvo = to_sdvo(encoder); 1325 struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
1331 struct intel_sdvo_dtd dtd; 1326 struct intel_sdvo_dtd dtd;
1332 int encoder_pixel_multiplier = 0; 1327 int encoder_pixel_multiplier = 0;
1328 int dotclock;
1333 u32 flags = 0, sdvox; 1329 u32 flags = 0, sdvox;
1334 u8 val; 1330 u8 val;
1335 bool ret; 1331 bool ret;
@@ -1368,6 +1364,13 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
1368 >> SDVO_PORT_MULTIPLY_SHIFT) + 1; 1364 >> SDVO_PORT_MULTIPLY_SHIFT) + 1;
1369 } 1365 }
1370 1366
1367 dotclock = pipe_config->port_clock / pipe_config->pixel_multiplier;
1368
1369 if (HAS_PCH_SPLIT(dev))
1370 ironlake_check_encoder_dotclock(pipe_config, dotclock);
1371
1372 pipe_config->adjusted_mode.crtc_clock = dotclock;
1373
1371 /* Cross check the port pixel multiplier with the sdvo encoder state. */ 1374 /* Cross check the port pixel multiplier with the sdvo encoder state. */
1372 if (intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_CLOCK_RATE_MULT, 1375 if (intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_CLOCK_RATE_MULT,
1373 &val, 1)) { 1376 &val, 1)) {
@@ -1770,6 +1773,9 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
1770{ 1773{
1771 struct edid *edid; 1774 struct edid *edid;
1772 1775
1776 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
1777 connector->base.id, drm_get_connector_name(connector));
1778
1773 /* set the bus switch and get the modes */ 1779 /* set the bus switch and get the modes */
1774 edid = intel_sdvo_get_edid(connector); 1780 edid = intel_sdvo_get_edid(connector);
1775 1781
@@ -1865,6 +1871,9 @@ static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
1865 uint32_t reply = 0, format_map = 0; 1871 uint32_t reply = 0, format_map = 0;
1866 int i; 1872 int i;
1867 1873
1874 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
1875 connector->base.id, drm_get_connector_name(connector));
1876
1868 /* Read the list of supported input resolutions for the selected TV 1877 /* Read the list of supported input resolutions for the selected TV
1869 * format. 1878 * format.
1870 */ 1879 */
@@ -1899,6 +1908,9 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
1899 struct drm_i915_private *dev_priv = connector->dev->dev_private; 1908 struct drm_i915_private *dev_priv = connector->dev->dev_private;
1900 struct drm_display_mode *newmode; 1909 struct drm_display_mode *newmode;
1901 1910
1911 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
1912 connector->base.id, drm_get_connector_name(connector));
1913
1902 /* 1914 /*
1903 * Fetch modes from VBT. For SDVO prefer the VBT mode since some 1915 * Fetch modes from VBT. For SDVO prefer the VBT mode since some
1904 * SDVO->LVDS transcoders can't cope with the EDID mode. 1916 * SDVO->LVDS transcoders can't cope with the EDID mode.
@@ -1930,7 +1942,6 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
1930 break; 1942 break;
1931 } 1943 }
1932 } 1944 }
1933
1934} 1945}
1935 1946
1936static int intel_sdvo_get_modes(struct drm_connector *connector) 1947static int intel_sdvo_get_modes(struct drm_connector *connector)
@@ -1998,7 +2009,6 @@ static void intel_sdvo_destroy(struct drm_connector *connector)
1998 intel_sdvo_connector->tv_format); 2009 intel_sdvo_connector->tv_format);
1999 2010
2000 intel_sdvo_destroy_enhance_property(connector); 2011 intel_sdvo_destroy_enhance_property(connector);
2001 drm_sysfs_connector_remove(connector);
2002 drm_connector_cleanup(connector); 2012 drm_connector_cleanup(connector);
2003 kfree(intel_sdvo_connector); 2013 kfree(intel_sdvo_connector);
2004} 2014}
@@ -2394,7 +2404,9 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
2394 struct intel_connector *intel_connector; 2404 struct intel_connector *intel_connector;
2395 struct intel_sdvo_connector *intel_sdvo_connector; 2405 struct intel_sdvo_connector *intel_sdvo_connector;
2396 2406
2397 intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL); 2407 DRM_DEBUG_KMS("initialising DVI device %d\n", device);
2408
2409 intel_sdvo_connector = kzalloc(sizeof(*intel_sdvo_connector), GFP_KERNEL);
2398 if (!intel_sdvo_connector) 2410 if (!intel_sdvo_connector)
2399 return false; 2411 return false;
2400 2412
@@ -2442,7 +2454,9 @@ intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type)
2442 struct intel_connector *intel_connector; 2454 struct intel_connector *intel_connector;
2443 struct intel_sdvo_connector *intel_sdvo_connector; 2455 struct intel_sdvo_connector *intel_sdvo_connector;
2444 2456
2445 intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL); 2457 DRM_DEBUG_KMS("initialising TV type %d\n", type);
2458
2459 intel_sdvo_connector = kzalloc(sizeof(*intel_sdvo_connector), GFP_KERNEL);
2446 if (!intel_sdvo_connector) 2460 if (!intel_sdvo_connector)
2447 return false; 2461 return false;
2448 2462
@@ -2467,6 +2481,7 @@ intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type)
2467 return true; 2481 return true;
2468 2482
2469err: 2483err:
2484 drm_sysfs_connector_remove(connector);
2470 intel_sdvo_destroy(connector); 2485 intel_sdvo_destroy(connector);
2471 return false; 2486 return false;
2472} 2487}
@@ -2479,7 +2494,9 @@ intel_sdvo_analog_init(struct intel_sdvo *intel_sdvo, int device)
2479 struct intel_connector *intel_connector; 2494 struct intel_connector *intel_connector;
2480 struct intel_sdvo_connector *intel_sdvo_connector; 2495 struct intel_sdvo_connector *intel_sdvo_connector;
2481 2496
2482 intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL); 2497 DRM_DEBUG_KMS("initialising analog device %d\n", device);
2498
2499 intel_sdvo_connector = kzalloc(sizeof(*intel_sdvo_connector), GFP_KERNEL);
2483 if (!intel_sdvo_connector) 2500 if (!intel_sdvo_connector)
2484 return false; 2501 return false;
2485 2502
@@ -2510,7 +2527,9 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
2510 struct intel_connector *intel_connector; 2527 struct intel_connector *intel_connector;
2511 struct intel_sdvo_connector *intel_sdvo_connector; 2528 struct intel_sdvo_connector *intel_sdvo_connector;
2512 2529
2513 intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL); 2530 DRM_DEBUG_KMS("initialising LVDS device %d\n", device);
2531
2532 intel_sdvo_connector = kzalloc(sizeof(*intel_sdvo_connector), GFP_KERNEL);
2514 if (!intel_sdvo_connector) 2533 if (!intel_sdvo_connector)
2515 return false; 2534 return false;
2516 2535
@@ -2534,6 +2553,7 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
2534 return true; 2553 return true;
2535 2554
2536err: 2555err:
2556 drm_sysfs_connector_remove(connector);
2537 intel_sdvo_destroy(connector); 2557 intel_sdvo_destroy(connector);
2538 return false; 2558 return false;
2539} 2559}
@@ -2605,8 +2625,10 @@ static void intel_sdvo_output_cleanup(struct intel_sdvo *intel_sdvo)
2605 2625
2606 list_for_each_entry_safe(connector, tmp, 2626 list_for_each_entry_safe(connector, tmp,
2607 &dev->mode_config.connector_list, head) { 2627 &dev->mode_config.connector_list, head) {
2608 if (intel_attached_encoder(connector) == &intel_sdvo->base) 2628 if (intel_attached_encoder(connector) == &intel_sdvo->base) {
2629 drm_sysfs_connector_remove(connector);
2609 intel_sdvo_destroy(connector); 2630 intel_sdvo_destroy(connector);
2631 }
2610 } 2632 }
2611} 2633}
2612 2634
@@ -2876,7 +2898,7 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
2876 struct intel_encoder *intel_encoder; 2898 struct intel_encoder *intel_encoder;
2877 struct intel_sdvo *intel_sdvo; 2899 struct intel_sdvo *intel_sdvo;
2878 int i; 2900 int i;
2879 intel_sdvo = kzalloc(sizeof(struct intel_sdvo), GFP_KERNEL); 2901 intel_sdvo = kzalloc(sizeof(*intel_sdvo), GFP_KERNEL);
2880 if (!intel_sdvo) 2902 if (!intel_sdvo)
2881 return false; 2903 return false;
2882 2904
diff --git a/drivers/gpu/drm/i915/intel_sideband.c b/drivers/gpu/drm/i915/intel_sideband.c
index 9a0e6c5ea540..9944d8135e87 100644
--- a/drivers/gpu/drm/i915/intel_sideband.c
+++ b/drivers/gpu/drm/i915/intel_sideband.c
@@ -25,7 +25,10 @@
25#include "i915_drv.h" 25#include "i915_drv.h"
26#include "intel_drv.h" 26#include "intel_drv.h"
27 27
28/* IOSF sideband */ 28/*
29 * IOSF sideband, see VLV2_SidebandMsg_HAS.docx and
30 * VLV_VLV2_PUNIT_HAS_0.8.docx
31 */
29static int vlv_sideband_rw(struct drm_i915_private *dev_priv, u32 devfn, 32static int vlv_sideband_rw(struct drm_i915_private *dev_priv, u32 devfn,
30 u32 port, u32 opcode, u32 addr, u32 *val) 33 u32 port, u32 opcode, u32 addr, u32 *val)
31{ 34{
@@ -101,19 +104,83 @@ u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr)
101 return val; 104 return val;
102} 105}
103 106
104u32 vlv_dpio_read(struct drm_i915_private *dev_priv, int reg) 107u32 vlv_gpio_nc_read(struct drm_i915_private *dev_priv, u32 reg)
105{ 108{
106 u32 val = 0; 109 u32 val = 0;
110 vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPIO_NC,
111 PUNIT_OPCODE_REG_READ, reg, &val);
112 return val;
113}
107 114
108 vlv_sideband_rw(dev_priv, DPIO_DEVFN, IOSF_PORT_DPIO, 115void vlv_gpio_nc_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
109 DPIO_OPCODE_REG_READ, reg, &val); 116{
117 vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPIO_NC,
118 PUNIT_OPCODE_REG_WRITE, reg, &val);
119}
120
121u32 vlv_cck_read(struct drm_i915_private *dev_priv, u32 reg)
122{
123 u32 val = 0;
124 vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCK,
125 PUNIT_OPCODE_REG_READ, reg, &val);
126 return val;
127}
128
129void vlv_cck_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
130{
131 vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCK,
132 PUNIT_OPCODE_REG_WRITE, reg, &val);
133}
134
135u32 vlv_ccu_read(struct drm_i915_private *dev_priv, u32 reg)
136{
137 u32 val = 0;
138 vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCU,
139 PUNIT_OPCODE_REG_READ, reg, &val);
140 return val;
141}
110 142
143void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
144{
145 vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCU,
146 PUNIT_OPCODE_REG_WRITE, reg, &val);
147}
148
149u32 vlv_gps_core_read(struct drm_i915_private *dev_priv, u32 reg)
150{
151 u32 val = 0;
152 vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPS_CORE,
153 PUNIT_OPCODE_REG_READ, reg, &val);
154 return val;
155}
156
157void vlv_gps_core_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
158{
159 vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPS_CORE,
160 PUNIT_OPCODE_REG_WRITE, reg, &val);
161}
162
163static u32 vlv_get_phy_port(enum pipe pipe)
164{
165 u32 port = IOSF_PORT_DPIO;
166
167 WARN_ON ((pipe != PIPE_A) && (pipe != PIPE_B));
168
169 return port;
170}
171
172u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum pipe pipe, int reg)
173{
174 u32 val = 0;
175
176 vlv_sideband_rw(dev_priv, DPIO_DEVFN, vlv_get_phy_port(pipe),
177 DPIO_OPCODE_REG_READ, reg, &val);
111 return val; 178 return val;
112} 179}
113 180
114void vlv_dpio_write(struct drm_i915_private *dev_priv, int reg, u32 val) 181void vlv_dpio_write(struct drm_i915_private *dev_priv, enum pipe pipe, int reg, u32 val)
115{ 182{
116 vlv_sideband_rw(dev_priv, DPIO_DEVFN, IOSF_PORT_DPIO, 183 vlv_sideband_rw(dev_priv, DPIO_DEVFN, vlv_get_phy_port(pipe),
117 DPIO_OPCODE_REG_WRITE, reg, &val); 184 DPIO_OPCODE_REG_WRITE, reg, &val);
118} 185}
119 186
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index ad6ec4b39005..b9fabf826f7d 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -260,14 +260,14 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
260 if (obj->tiling_mode != I915_TILING_NONE) 260 if (obj->tiling_mode != I915_TILING_NONE)
261 sprctl |= SPRITE_TILED; 261 sprctl |= SPRITE_TILED;
262 262
263 if (IS_HASWELL(dev)) 263 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
264 sprctl &= ~SPRITE_TRICKLE_FEED_DISABLE; 264 sprctl &= ~SPRITE_TRICKLE_FEED_DISABLE;
265 else 265 else
266 sprctl |= SPRITE_TRICKLE_FEED_DISABLE; 266 sprctl |= SPRITE_TRICKLE_FEED_DISABLE;
267 267
268 sprctl |= SPRITE_ENABLE; 268 sprctl |= SPRITE_ENABLE;
269 269
270 if (IS_HASWELL(dev)) 270 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
271 sprctl |= SPRITE_PIPE_CSC_ENABLE; 271 sprctl |= SPRITE_PIPE_CSC_ENABLE;
272 272
273 intel_update_sprite_watermarks(plane, crtc, src_w, pixel_size, true, 273 intel_update_sprite_watermarks(plane, crtc, src_w, pixel_size, true,
@@ -288,7 +288,7 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
288 dev_priv->sprite_scaling_enabled |= 1 << pipe; 288 dev_priv->sprite_scaling_enabled |= 1 << pipe;
289 289
290 if (!scaling_was_enabled) { 290 if (!scaling_was_enabled) {
291 intel_update_watermarks(dev); 291 intel_update_watermarks(crtc);
292 intel_wait_for_vblank(dev, pipe); 292 intel_wait_for_vblank(dev, pipe);
293 } 293 }
294 sprscale = SPRITE_SCALE_ENABLE | (src_w << 16) | src_h; 294 sprscale = SPRITE_SCALE_ENABLE | (src_w << 16) | src_h;
@@ -306,7 +306,7 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
306 306
307 /* HSW consolidates SPRTILEOFF and SPRLINOFF into a single SPROFFSET 307 /* HSW consolidates SPRTILEOFF and SPRLINOFF into a single SPROFFSET
308 * register */ 308 * register */
309 if (IS_HASWELL(dev)) 309 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
310 I915_WRITE(SPROFFSET(pipe), (y << 16) | x); 310 I915_WRITE(SPROFFSET(pipe), (y << 16) | x);
311 else if (obj->tiling_mode != I915_TILING_NONE) 311 else if (obj->tiling_mode != I915_TILING_NONE)
312 I915_WRITE(SPRTILEOFF(pipe), (y << 16) | x); 312 I915_WRITE(SPRTILEOFF(pipe), (y << 16) | x);
@@ -323,7 +323,7 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
323 323
324 /* potentially re-enable LP watermarks */ 324 /* potentially re-enable LP watermarks */
325 if (scaling_was_enabled && !dev_priv->sprite_scaling_enabled) 325 if (scaling_was_enabled && !dev_priv->sprite_scaling_enabled)
326 intel_update_watermarks(dev); 326 intel_update_watermarks(crtc);
327} 327}
328 328
329static void 329static void
@@ -349,7 +349,7 @@ ivb_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
349 349
350 /* potentially re-enable LP watermarks */ 350 /* potentially re-enable LP watermarks */
351 if (scaling_was_enabled && !dev_priv->sprite_scaling_enabled) 351 if (scaling_was_enabled && !dev_priv->sprite_scaling_enabled)
352 intel_update_watermarks(dev); 352 intel_update_watermarks(crtc);
353} 353}
354 354
355static int 355static int
@@ -521,13 +521,28 @@ intel_enable_primary(struct drm_crtc *crtc)
521 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 521 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
522 int reg = DSPCNTR(intel_crtc->plane); 522 int reg = DSPCNTR(intel_crtc->plane);
523 523
524 if (!intel_crtc->primary_disabled) 524 if (intel_crtc->primary_enabled)
525 return; 525 return;
526 526
527 intel_crtc->primary_disabled = false; 527 intel_crtc->primary_enabled = true;
528 intel_update_fbc(dev);
529 528
530 I915_WRITE(reg, I915_READ(reg) | DISPLAY_PLANE_ENABLE); 529 I915_WRITE(reg, I915_READ(reg) | DISPLAY_PLANE_ENABLE);
530 intel_flush_primary_plane(dev_priv, intel_crtc->plane);
531
532 /*
533 * FIXME IPS should be fine as long as one plane is
534 * enabled, but in practice it seems to have problems
535 * when going from primary only to sprite only and vice
536 * versa.
537 */
538 if (intel_crtc->config.ips_enabled) {
539 intel_wait_for_vblank(dev, intel_crtc->pipe);
540 hsw_enable_ips(intel_crtc);
541 }
542
543 mutex_lock(&dev->struct_mutex);
544 intel_update_fbc(dev);
545 mutex_unlock(&dev->struct_mutex);
531} 546}
532 547
533static void 548static void
@@ -538,13 +553,26 @@ intel_disable_primary(struct drm_crtc *crtc)
538 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 553 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
539 int reg = DSPCNTR(intel_crtc->plane); 554 int reg = DSPCNTR(intel_crtc->plane);
540 555
541 if (intel_crtc->primary_disabled) 556 if (!intel_crtc->primary_enabled)
542 return; 557 return;
543 558
544 I915_WRITE(reg, I915_READ(reg) & ~DISPLAY_PLANE_ENABLE); 559 intel_crtc->primary_enabled = false;
545 560
546 intel_crtc->primary_disabled = true; 561 mutex_lock(&dev->struct_mutex);
547 intel_update_fbc(dev); 562 if (dev_priv->fbc.plane == intel_crtc->plane)
563 intel_disable_fbc(dev);
564 mutex_unlock(&dev->struct_mutex);
565
566 /*
567 * FIXME IPS should be fine as long as one plane is
568 * enabled, but in practice it seems to have problems
569 * when going from primary only to sprite only and vice
570 * versa.
571 */
572 hsw_disable_ips(intel_crtc);
573
574 I915_WRITE(reg, I915_READ(reg) & ~DISPLAY_PLANE_ENABLE);
575 intel_flush_primary_plane(dev_priv, intel_crtc->plane);
548} 576}
549 577
550static int 578static int
@@ -623,15 +651,12 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
623 uint32_t src_w, uint32_t src_h) 651 uint32_t src_w, uint32_t src_h)
624{ 652{
625 struct drm_device *dev = plane->dev; 653 struct drm_device *dev = plane->dev;
626 struct drm_i915_private *dev_priv = dev->dev_private;
627 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 654 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
628 struct intel_plane *intel_plane = to_intel_plane(plane); 655 struct intel_plane *intel_plane = to_intel_plane(plane);
629 struct intel_framebuffer *intel_fb; 656 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
630 struct drm_i915_gem_object *obj, *old_obj; 657 struct drm_i915_gem_object *obj = intel_fb->obj;
631 int pipe = intel_plane->pipe; 658 struct drm_i915_gem_object *old_obj = intel_plane->obj;
632 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, 659 int ret;
633 pipe);
634 int ret = 0;
635 bool disable_primary = false; 660 bool disable_primary = false;
636 bool visible; 661 bool visible;
637 int hscale, vscale; 662 int hscale, vscale;
@@ -652,29 +677,23 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
652 .y2 = crtc_y + crtc_h, 677 .y2 = crtc_y + crtc_h,
653 }; 678 };
654 const struct drm_rect clip = { 679 const struct drm_rect clip = {
655 .x2 = crtc->mode.hdisplay, 680 .x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0,
656 .y2 = crtc->mode.vdisplay, 681 .y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0,
682 };
683 const struct {
684 int crtc_x, crtc_y;
685 unsigned int crtc_w, crtc_h;
686 uint32_t src_x, src_y, src_w, src_h;
687 } orig = {
688 .crtc_x = crtc_x,
689 .crtc_y = crtc_y,
690 .crtc_w = crtc_w,
691 .crtc_h = crtc_h,
692 .src_x = src_x,
693 .src_y = src_y,
694 .src_w = src_w,
695 .src_h = src_h,
657 }; 696 };
658
659 intel_fb = to_intel_framebuffer(fb);
660 obj = intel_fb->obj;
661
662 old_obj = intel_plane->obj;
663
664 intel_plane->crtc_x = crtc_x;
665 intel_plane->crtc_y = crtc_y;
666 intel_plane->crtc_w = crtc_w;
667 intel_plane->crtc_h = crtc_h;
668 intel_plane->src_x = src_x;
669 intel_plane->src_y = src_y;
670 intel_plane->src_w = src_w;
671 intel_plane->src_h = src_h;
672
673 /* Pipe must be running... */
674 if (!(I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_ENABLE)) {
675 DRM_DEBUG_KMS("Pipe disabled\n");
676 return -EINVAL;
677 }
678 697
679 /* Don't modify another pipe's plane */ 698 /* Don't modify another pipe's plane */
680 if (intel_plane->pipe != intel_crtc->pipe) { 699 if (intel_plane->pipe != intel_crtc->pipe) {
@@ -810,7 +829,7 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
810 * we can disable the primary and save power. 829 * we can disable the primary and save power.
811 */ 830 */
812 disable_primary = drm_rect_equals(&dst, &clip); 831 disable_primary = drm_rect_equals(&dst, &clip);
813 WARN_ON(disable_primary && !visible); 832 WARN_ON(disable_primary && !visible && intel_crtc->active);
814 833
815 mutex_lock(&dev->struct_mutex); 834 mutex_lock(&dev->struct_mutex);
816 835
@@ -820,27 +839,40 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
820 * the sprite planes only require 128KiB alignment and 32 PTE padding. 839 * the sprite planes only require 128KiB alignment and 32 PTE padding.
821 */ 840 */
822 ret = intel_pin_and_fence_fb_obj(dev, obj, NULL); 841 ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
823 if (ret)
824 goto out_unlock;
825 842
826 intel_plane->obj = obj; 843 mutex_unlock(&dev->struct_mutex);
827
828 /*
829 * Be sure to re-enable the primary before the sprite is no longer
830 * covering it fully.
831 */
832 if (!disable_primary)
833 intel_enable_primary(crtc);
834 844
835 if (visible) 845 if (ret)
836 intel_plane->update_plane(plane, crtc, fb, obj, 846 return ret;
837 crtc_x, crtc_y, crtc_w, crtc_h, 847
838 src_x, src_y, src_w, src_h); 848 intel_plane->crtc_x = orig.crtc_x;
839 else 849 intel_plane->crtc_y = orig.crtc_y;
840 intel_plane->disable_plane(plane, crtc); 850 intel_plane->crtc_w = orig.crtc_w;
851 intel_plane->crtc_h = orig.crtc_h;
852 intel_plane->src_x = orig.src_x;
853 intel_plane->src_y = orig.src_y;
854 intel_plane->src_w = orig.src_w;
855 intel_plane->src_h = orig.src_h;
856 intel_plane->obj = obj;
841 857
842 if (disable_primary) 858 if (intel_crtc->active) {
843 intel_disable_primary(crtc); 859 /*
860 * Be sure to re-enable the primary before the sprite is no longer
861 * covering it fully.
862 */
863 if (!disable_primary)
864 intel_enable_primary(crtc);
865
866 if (visible)
867 intel_plane->update_plane(plane, crtc, fb, obj,
868 crtc_x, crtc_y, crtc_w, crtc_h,
869 src_x, src_y, src_w, src_h);
870 else
871 intel_plane->disable_plane(plane, crtc);
872
873 if (disable_primary)
874 intel_disable_primary(crtc);
875 }
844 876
845 /* Unpin old obj after new one is active to avoid ugliness */ 877 /* Unpin old obj after new one is active to avoid ugliness */
846 if (old_obj) { 878 if (old_obj) {
@@ -850,17 +882,15 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
850 * wait for vblank to avoid ugliness, we only need to 882 * wait for vblank to avoid ugliness, we only need to
851 * do the pin & ref bookkeeping. 883 * do the pin & ref bookkeeping.
852 */ 884 */
853 if (old_obj != obj) { 885 if (old_obj != obj && intel_crtc->active)
854 mutex_unlock(&dev->struct_mutex); 886 intel_wait_for_vblank(dev, intel_crtc->pipe);
855 intel_wait_for_vblank(dev, to_intel_crtc(crtc)->pipe); 887
856 mutex_lock(&dev->struct_mutex); 888 mutex_lock(&dev->struct_mutex);
857 }
858 intel_unpin_fb_obj(old_obj); 889 intel_unpin_fb_obj(old_obj);
890 mutex_unlock(&dev->struct_mutex);
859 } 891 }
860 892
861out_unlock: 893 return 0;
862 mutex_unlock(&dev->struct_mutex);
863 return ret;
864} 894}
865 895
866static int 896static int
@@ -868,7 +898,7 @@ intel_disable_plane(struct drm_plane *plane)
868{ 898{
869 struct drm_device *dev = plane->dev; 899 struct drm_device *dev = plane->dev;
870 struct intel_plane *intel_plane = to_intel_plane(plane); 900 struct intel_plane *intel_plane = to_intel_plane(plane);
871 int ret = 0; 901 struct intel_crtc *intel_crtc;
872 902
873 if (!plane->fb) 903 if (!plane->fb)
874 return 0; 904 return 0;
@@ -876,21 +906,25 @@ intel_disable_plane(struct drm_plane *plane)
876 if (WARN_ON(!plane->crtc)) 906 if (WARN_ON(!plane->crtc))
877 return -EINVAL; 907 return -EINVAL;
878 908
879 intel_enable_primary(plane->crtc); 909 intel_crtc = to_intel_crtc(plane->crtc);
880 intel_plane->disable_plane(plane, plane->crtc);
881 910
882 if (!intel_plane->obj) 911 if (intel_crtc->active) {
883 goto out; 912 intel_enable_primary(plane->crtc);
913 intel_plane->disable_plane(plane, plane->crtc);
914 }
884 915
885 intel_wait_for_vblank(dev, intel_plane->pipe); 916 if (intel_plane->obj) {
917 if (intel_crtc->active)
918 intel_wait_for_vblank(dev, intel_plane->pipe);
886 919
887 mutex_lock(&dev->struct_mutex); 920 mutex_lock(&dev->struct_mutex);
888 intel_unpin_fb_obj(intel_plane->obj); 921 intel_unpin_fb_obj(intel_plane->obj);
889 intel_plane->obj = NULL; 922 mutex_unlock(&dev->struct_mutex);
890 mutex_unlock(&dev->struct_mutex);
891out:
892 923
893 return ret; 924 intel_plane->obj = NULL;
925 }
926
927 return 0;
894} 928}
895 929
896static void intel_destroy_plane(struct drm_plane *plane) 930static void intel_destroy_plane(struct drm_plane *plane)
@@ -921,7 +955,7 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
921 955
922 obj = drm_mode_object_find(dev, set->plane_id, DRM_MODE_OBJECT_PLANE); 956 obj = drm_mode_object_find(dev, set->plane_id, DRM_MODE_OBJECT_PLANE);
923 if (!obj) { 957 if (!obj) {
924 ret = -EINVAL; 958 ret = -ENOENT;
925 goto out_unlock; 959 goto out_unlock;
926 } 960 }
927 961
@@ -950,7 +984,7 @@ int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
950 984
951 obj = drm_mode_object_find(dev, get->plane_id, DRM_MODE_OBJECT_PLANE); 985 obj = drm_mode_object_find(dev, get->plane_id, DRM_MODE_OBJECT_PLANE);
952 if (!obj) { 986 if (!obj) {
953 ret = -EINVAL; 987 ret = -ENOENT;
954 goto out_unlock; 988 goto out_unlock;
955 } 989 }
956 990
@@ -1034,7 +1068,7 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
1034 if (INTEL_INFO(dev)->gen < 5) 1068 if (INTEL_INFO(dev)->gen < 5)
1035 return -ENODEV; 1069 return -ENODEV;
1036 1070
1037 intel_plane = kzalloc(sizeof(struct intel_plane), GFP_KERNEL); 1071 intel_plane = kzalloc(sizeof(*intel_plane), GFP_KERNEL);
1038 if (!intel_plane) 1072 if (!intel_plane)
1039 return -ENOMEM; 1073 return -ENOMEM;
1040 1074
@@ -1058,6 +1092,7 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
1058 break; 1092 break;
1059 1093
1060 case 7: 1094 case 7:
1095 case 8:
1061 if (IS_IVYBRIDGE(dev)) { 1096 if (IS_IVYBRIDGE(dev)) {
1062 intel_plane->can_scale = true; 1097 intel_plane->can_scale = true;
1063 intel_plane->max_downscale = 2; 1098 intel_plane->max_downscale = 2;
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index dd6f84bf6c22..18c406246a2d 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -912,7 +912,7 @@ intel_tv_compute_config(struct intel_encoder *encoder,
912 if (!tv_mode) 912 if (!tv_mode)
913 return false; 913 return false;
914 914
915 pipe_config->adjusted_mode.clock = tv_mode->clock; 915 pipe_config->adjusted_mode.crtc_clock = tv_mode->clock;
916 DRM_DEBUG_KMS("forcing bpc to 8 for TV\n"); 916 DRM_DEBUG_KMS("forcing bpc to 8 for TV\n");
917 pipe_config->pipe_bpp = 8*3; 917 pipe_config->pipe_bpp = 8*3;
918 918
@@ -1044,7 +1044,7 @@ static void intel_tv_mode_set(struct intel_encoder *encoder)
1044 tv_mode->dda3_inc << TV_SCDDA3_INC_SHIFT; 1044 tv_mode->dda3_inc << TV_SCDDA3_INC_SHIFT;
1045 1045
1046 /* Enable two fixes for the chips that need them. */ 1046 /* Enable two fixes for the chips that need them. */
1047 if (dev->pci_device < 0x2772) 1047 if (dev->pdev->device < 0x2772)
1048 tv_ctl |= TV_ENC_C0_FIX | TV_ENC_SDP_FIX; 1048 tv_ctl |= TV_ENC_C0_FIX | TV_ENC_SDP_FIX;
1049 1049
1050 I915_WRITE(TV_H_CTL_1, hctl1); 1050 I915_WRITE(TV_H_CTL_1, hctl1);
@@ -1094,7 +1094,7 @@ static void intel_tv_mode_set(struct intel_encoder *encoder)
1094 unsigned int xsize, ysize; 1094 unsigned int xsize, ysize;
1095 /* Pipe must be off here */ 1095 /* Pipe must be off here */
1096 I915_WRITE(dspcntr_reg, dspcntr & ~DISPLAY_PLANE_ENABLE); 1096 I915_WRITE(dspcntr_reg, dspcntr & ~DISPLAY_PLANE_ENABLE);
1097 intel_flush_display_plane(dev_priv, intel_crtc->plane); 1097 intel_flush_primary_plane(dev_priv, intel_crtc->plane);
1098 1098
1099 /* Wait for vblank for the disable to take effect */ 1099 /* Wait for vblank for the disable to take effect */
1100 if (IS_GEN2(dev)) 1100 if (IS_GEN2(dev))
@@ -1123,7 +1123,7 @@ static void intel_tv_mode_set(struct intel_encoder *encoder)
1123 1123
1124 I915_WRITE(pipeconf_reg, pipeconf); 1124 I915_WRITE(pipeconf_reg, pipeconf);
1125 I915_WRITE(dspcntr_reg, dspcntr); 1125 I915_WRITE(dspcntr_reg, dspcntr);
1126 intel_flush_display_plane(dev_priv, intel_crtc->plane); 1126 intel_flush_primary_plane(dev_priv, intel_crtc->plane);
1127 } 1127 }
1128 1128
1129 j = 0; 1129 j = 0;
@@ -1433,7 +1433,6 @@ intel_tv_get_modes(struct drm_connector *connector)
1433static void 1433static void
1434intel_tv_destroy(struct drm_connector *connector) 1434intel_tv_destroy(struct drm_connector *connector)
1435{ 1435{
1436 drm_sysfs_connector_remove(connector);
1437 drm_connector_cleanup(connector); 1436 drm_connector_cleanup(connector);
1438 kfree(connector); 1437 kfree(connector);
1439} 1438}
@@ -1518,7 +1517,7 @@ static const struct drm_encoder_funcs intel_tv_enc_funcs = {
1518static int tv_is_present_in_vbt(struct drm_device *dev) 1517static int tv_is_present_in_vbt(struct drm_device *dev)
1519{ 1518{
1520 struct drm_i915_private *dev_priv = dev->dev_private; 1519 struct drm_i915_private *dev_priv = dev->dev_private;
1521 struct child_device_config *p_child; 1520 union child_device_config *p_child;
1522 int i, ret; 1521 int i, ret;
1523 1522
1524 if (!dev_priv->vbt.child_dev_num) 1523 if (!dev_priv->vbt.child_dev_num)
@@ -1530,13 +1529,13 @@ static int tv_is_present_in_vbt(struct drm_device *dev)
1530 /* 1529 /*
1531 * If the device type is not TV, continue. 1530 * If the device type is not TV, continue.
1532 */ 1531 */
1533 if (p_child->device_type != DEVICE_TYPE_INT_TV && 1532 if (p_child->old.device_type != DEVICE_TYPE_INT_TV &&
1534 p_child->device_type != DEVICE_TYPE_TV) 1533 p_child->old.device_type != DEVICE_TYPE_TV)
1535 continue; 1534 continue;
1536 /* Only when the addin_offset is non-zero, it is regarded 1535 /* Only when the addin_offset is non-zero, it is regarded
1537 * as present. 1536 * as present.
1538 */ 1537 */
1539 if (p_child->addin_offset) { 1538 if (p_child->old.addin_offset) {
1540 ret = 1; 1539 ret = 1;
1541 break; 1540 break;
1542 } 1541 }
@@ -1590,12 +1589,12 @@ intel_tv_init(struct drm_device *dev)
1590 (tv_dac_off & TVDAC_STATE_CHG_EN) != 0) 1589 (tv_dac_off & TVDAC_STATE_CHG_EN) != 0)
1591 return; 1590 return;
1592 1591
1593 intel_tv = kzalloc(sizeof(struct intel_tv), GFP_KERNEL); 1592 intel_tv = kzalloc(sizeof(*intel_tv), GFP_KERNEL);
1594 if (!intel_tv) { 1593 if (!intel_tv) {
1595 return; 1594 return;
1596 } 1595 }
1597 1596
1598 intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); 1597 intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
1599 if (!intel_connector) { 1598 if (!intel_connector) {
1600 kfree(intel_tv); 1599 kfree(intel_tv);
1601 return; 1600 return;
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 8649f1c36b00..f9883ceff946 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -93,7 +93,7 @@ static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
93{ 93{
94 u32 forcewake_ack; 94 u32 forcewake_ack;
95 95
96 if (IS_HASWELL(dev_priv->dev)) 96 if (IS_HASWELL(dev_priv->dev) || IS_GEN8(dev_priv->dev))
97 forcewake_ack = FORCEWAKE_ACK_HSW; 97 forcewake_ack = FORCEWAKE_ACK_HSW;
98 else 98 else
99 forcewake_ack = FORCEWAKE_MT_ACK; 99 forcewake_ack = FORCEWAKE_MT_ACK;
@@ -112,7 +112,8 @@ static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
112 DRM_ERROR("Timed out waiting for forcewake to ack request.\n"); 112 DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
113 113
114 /* WaRsForcewakeWaitTC0:ivb,hsw */ 114 /* WaRsForcewakeWaitTC0:ivb,hsw */
115 __gen6_gt_wait_for_thread_c0(dev_priv); 115 if (INTEL_INFO(dev_priv->dev)->gen < 8)
116 __gen6_gt_wait_for_thread_c0(dev_priv);
116} 117}
117 118
118static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv) 119static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
@@ -204,60 +205,34 @@ static void vlv_force_wake_put(struct drm_i915_private *dev_priv)
204 gen6_gt_check_fifodbg(dev_priv); 205 gen6_gt_check_fifodbg(dev_priv);
205} 206}
206 207
207void intel_uncore_early_sanitize(struct drm_device *dev) 208static void gen6_force_wake_work(struct work_struct *work)
208{ 209{
209 struct drm_i915_private *dev_priv = dev->dev_private; 210 struct drm_i915_private *dev_priv =
211 container_of(work, typeof(*dev_priv), uncore.force_wake_work.work);
212 unsigned long irqflags;
210 213
211 if (HAS_FPGA_DBG_UNCLAIMED(dev)) 214 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
212 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); 215 if (--dev_priv->uncore.forcewake_count == 0)
216 dev_priv->uncore.funcs.force_wake_put(dev_priv);
217 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
213} 218}
214 219
215void intel_uncore_init(struct drm_device *dev) 220void intel_uncore_early_sanitize(struct drm_device *dev)
216{ 221{
217 struct drm_i915_private *dev_priv = dev->dev_private; 222 struct drm_i915_private *dev_priv = dev->dev_private;
218 223
219 if (IS_VALLEYVIEW(dev)) { 224 if (HAS_FPGA_DBG_UNCLAIMED(dev))
220 dev_priv->uncore.funcs.force_wake_get = vlv_force_wake_get; 225 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
221 dev_priv->uncore.funcs.force_wake_put = vlv_force_wake_put;
222 } else if (IS_HASWELL(dev)) {
223 dev_priv->uncore.funcs.force_wake_get = __gen6_gt_force_wake_mt_get;
224 dev_priv->uncore.funcs.force_wake_put = __gen6_gt_force_wake_mt_put;
225 } else if (IS_IVYBRIDGE(dev)) {
226 u32 ecobus;
227
228 /* IVB configs may use multi-threaded forcewake */
229
230 /* A small trick here - if the bios hasn't configured
231 * MT forcewake, and if the device is in RC6, then
232 * force_wake_mt_get will not wake the device and the
233 * ECOBUS read will return zero. Which will be
234 * (correctly) interpreted by the test below as MT
235 * forcewake being disabled.
236 */
237 mutex_lock(&dev->struct_mutex);
238 __gen6_gt_force_wake_mt_get(dev_priv);
239 ecobus = __raw_i915_read32(dev_priv, ECOBUS);
240 __gen6_gt_force_wake_mt_put(dev_priv);
241 mutex_unlock(&dev->struct_mutex);
242 226
243 if (ecobus & FORCEWAKE_MT_ENABLE) { 227 if (IS_HASWELL(dev) &&
244 dev_priv->uncore.funcs.force_wake_get = 228 (__raw_i915_read32(dev_priv, HSW_EDRAM_PRESENT) == 1)) {
245 __gen6_gt_force_wake_mt_get; 229 /* The docs do not explain exactly how the calculation can be
246 dev_priv->uncore.funcs.force_wake_put = 230 * made. It is somewhat guessable, but for now, it's always
247 __gen6_gt_force_wake_mt_put; 231 * 128MB.
248 } else { 232 * NB: We can't write IDICR yet because we do not have gt funcs
249 DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n"); 233 * set up */
250 DRM_INFO("when using vblank-synced partial screen updates.\n"); 234 dev_priv->ellc_size = 128;
251 dev_priv->uncore.funcs.force_wake_get = 235 DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size);
252 __gen6_gt_force_wake_get;
253 dev_priv->uncore.funcs.force_wake_put =
254 __gen6_gt_force_wake_put;
255 }
256 } else if (IS_GEN6(dev)) {
257 dev_priv->uncore.funcs.force_wake_get =
258 __gen6_gt_force_wake_get;
259 dev_priv->uncore.funcs.force_wake_put =
260 __gen6_gt_force_wake_put;
261 } 236 }
262} 237}
263 238
@@ -276,10 +251,26 @@ static void intel_uncore_forcewake_reset(struct drm_device *dev)
276 251
277void intel_uncore_sanitize(struct drm_device *dev) 252void intel_uncore_sanitize(struct drm_device *dev)
278{ 253{
254 struct drm_i915_private *dev_priv = dev->dev_private;
255 u32 reg_val;
256
279 intel_uncore_forcewake_reset(dev); 257 intel_uncore_forcewake_reset(dev);
280 258
281 /* BIOS often leaves RC6 enabled, but disable it for hw init */ 259 /* BIOS often leaves RC6 enabled, but disable it for hw init */
282 intel_disable_gt_powersave(dev); 260 intel_disable_gt_powersave(dev);
261
262 /* Turn off power gate, require especially for the BIOS less system */
263 if (IS_VALLEYVIEW(dev)) {
264
265 mutex_lock(&dev_priv->rps.hw_lock);
266 reg_val = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS);
267
268 if (reg_val & (RENDER_PWRGT | MEDIA_PWRGT | DISP2D_PWRGT))
269 vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, 0x0);
270
271 mutex_unlock(&dev_priv->rps.hw_lock);
272
273 }
283} 274}
284 275
285/* 276/*
@@ -292,6 +283,9 @@ void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
292{ 283{
293 unsigned long irqflags; 284 unsigned long irqflags;
294 285
286 if (!dev_priv->uncore.funcs.force_wake_get)
287 return;
288
295 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 289 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
296 if (dev_priv->uncore.forcewake_count++ == 0) 290 if (dev_priv->uncore.forcewake_count++ == 0)
297 dev_priv->uncore.funcs.force_wake_get(dev_priv); 291 dev_priv->uncore.funcs.force_wake_get(dev_priv);
@@ -305,17 +299,22 @@ void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
305{ 299{
306 unsigned long irqflags; 300 unsigned long irqflags;
307 301
302 if (!dev_priv->uncore.funcs.force_wake_put)
303 return;
304
308 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 305 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
309 if (--dev_priv->uncore.forcewake_count == 0) 306 if (--dev_priv->uncore.forcewake_count == 0) {
310 dev_priv->uncore.funcs.force_wake_put(dev_priv); 307 dev_priv->uncore.forcewake_count++;
308 mod_delayed_work(dev_priv->wq,
309 &dev_priv->uncore.force_wake_work,
310 1);
311 }
311 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 312 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
312} 313}
313 314
314/* We give fast paths for the really cool registers */ 315/* We give fast paths for the really cool registers */
315#define NEEDS_FORCE_WAKE(dev_priv, reg) \ 316#define NEEDS_FORCE_WAKE(dev_priv, reg) \
316 ((HAS_FORCE_WAKE((dev_priv)->dev)) && \ 317 ((reg) < 0x40000 && (reg) != FORCEWAKE)
317 ((reg) < 0x40000) && \
318 ((reg) != FORCEWAKE))
319 318
320static void 319static void
321ilk_dummy_write(struct drm_i915_private *dev_priv) 320ilk_dummy_write(struct drm_i915_private *dev_priv)
@@ -329,8 +328,7 @@ ilk_dummy_write(struct drm_i915_private *dev_priv)
329static void 328static void
330hsw_unclaimed_reg_clear(struct drm_i915_private *dev_priv, u32 reg) 329hsw_unclaimed_reg_clear(struct drm_i915_private *dev_priv, u32 reg)
331{ 330{
332 if (HAS_FPGA_DBG_UNCLAIMED(dev_priv->dev) && 331 if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
333 (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
334 DRM_ERROR("Unknown unclaimed register before writing to %x\n", 332 DRM_ERROR("Unknown unclaimed register before writing to %x\n",
335 reg); 333 reg);
336 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); 334 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
@@ -340,20 +338,43 @@ hsw_unclaimed_reg_clear(struct drm_i915_private *dev_priv, u32 reg)
340static void 338static void
341hsw_unclaimed_reg_check(struct drm_i915_private *dev_priv, u32 reg) 339hsw_unclaimed_reg_check(struct drm_i915_private *dev_priv, u32 reg)
342{ 340{
343 if (HAS_FPGA_DBG_UNCLAIMED(dev_priv->dev) && 341 if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
344 (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
345 DRM_ERROR("Unclaimed write to %x\n", reg); 342 DRM_ERROR("Unclaimed write to %x\n", reg);
346 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); 343 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
347 } 344 }
348} 345}
349 346
350#define __i915_read(x) \ 347#define REG_READ_HEADER(x) \
351u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg, bool trace) { \
352 unsigned long irqflags; \ 348 unsigned long irqflags; \
353 u##x val = 0; \ 349 u##x val = 0; \
354 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \ 350 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
355 if (dev_priv->info->gen == 5) \ 351
356 ilk_dummy_write(dev_priv); \ 352#define REG_READ_FOOTER \
353 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
354 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
355 return val
356
357#define __gen4_read(x) \
358static u##x \
359gen4_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
360 REG_READ_HEADER(x); \
361 val = __raw_i915_read##x(dev_priv, reg); \
362 REG_READ_FOOTER; \
363}
364
365#define __gen5_read(x) \
366static u##x \
367gen5_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
368 REG_READ_HEADER(x); \
369 ilk_dummy_write(dev_priv); \
370 val = __raw_i915_read##x(dev_priv, reg); \
371 REG_READ_FOOTER; \
372}
373
374#define __gen6_read(x) \
375static u##x \
376gen6_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
377 REG_READ_HEADER(x); \
357 if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ 378 if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
358 if (dev_priv->uncore.forcewake_count == 0) \ 379 if (dev_priv->uncore.forcewake_count == 0) \
359 dev_priv->uncore.funcs.force_wake_get(dev_priv); \ 380 dev_priv->uncore.funcs.force_wake_get(dev_priv); \
@@ -363,28 +384,73 @@ u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg, bool trace) { \
363 } else { \ 384 } else { \
364 val = __raw_i915_read##x(dev_priv, reg); \ 385 val = __raw_i915_read##x(dev_priv, reg); \
365 } \ 386 } \
387 REG_READ_FOOTER; \
388}
389
390__gen6_read(8)
391__gen6_read(16)
392__gen6_read(32)
393__gen6_read(64)
394__gen5_read(8)
395__gen5_read(16)
396__gen5_read(32)
397__gen5_read(64)
398__gen4_read(8)
399__gen4_read(16)
400__gen4_read(32)
401__gen4_read(64)
402
403#undef __gen6_read
404#undef __gen5_read
405#undef __gen4_read
406#undef REG_READ_FOOTER
407#undef REG_READ_HEADER
408
409#define REG_WRITE_HEADER \
410 unsigned long irqflags; \
411 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
412 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
413
414#define __gen4_write(x) \
415static void \
416gen4_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
417 REG_WRITE_HEADER; \
418 __raw_i915_write##x(dev_priv, reg, val); \
366 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \ 419 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
367 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
368 return val; \
369} 420}
370 421
371__i915_read(8) 422#define __gen5_write(x) \
372__i915_read(16) 423static void \
373__i915_read(32) 424gen5_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
374__i915_read(64) 425 REG_WRITE_HEADER; \
375#undef __i915_read 426 ilk_dummy_write(dev_priv); \
427 __raw_i915_write##x(dev_priv, reg, val); \
428 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
429}
376 430
377#define __i915_write(x) \ 431#define __gen6_write(x) \
378void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val, bool trace) { \ 432static void \
379 unsigned long irqflags; \ 433gen6_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
380 u32 __fifo_ret = 0; \ 434 u32 __fifo_ret = 0; \
381 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \ 435 REG_WRITE_HEADER; \
382 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \ 436 if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
437 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
438 } \
439 __raw_i915_write##x(dev_priv, reg, val); \
440 if (unlikely(__fifo_ret)) { \
441 gen6_gt_check_fifodbg(dev_priv); \
442 } \
443 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
444}
445
446#define __hsw_write(x) \
447static void \
448hsw_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
449 u32 __fifo_ret = 0; \
450 REG_WRITE_HEADER; \
383 if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ 451 if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
384 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \ 452 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
385 } \ 453 } \
386 if (dev_priv->info->gen == 5) \
387 ilk_dummy_write(dev_priv); \
388 hsw_unclaimed_reg_clear(dev_priv, reg); \ 454 hsw_unclaimed_reg_clear(dev_priv, reg); \
389 __raw_i915_write##x(dev_priv, reg, val); \ 455 __raw_i915_write##x(dev_priv, reg, val); \
390 if (unlikely(__fifo_ret)) { \ 456 if (unlikely(__fifo_ret)) { \
@@ -393,11 +459,185 @@ void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val, bool tr
393 hsw_unclaimed_reg_check(dev_priv, reg); \ 459 hsw_unclaimed_reg_check(dev_priv, reg); \
394 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \ 460 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
395} 461}
396__i915_write(8) 462
397__i915_write(16) 463static const u32 gen8_shadowed_regs[] = {
398__i915_write(32) 464 FORCEWAKE_MT,
399__i915_write(64) 465 GEN6_RPNSWREQ,
400#undef __i915_write 466 GEN6_RC_VIDEO_FREQ,
467 RING_TAIL(RENDER_RING_BASE),
468 RING_TAIL(GEN6_BSD_RING_BASE),
469 RING_TAIL(VEBOX_RING_BASE),
470 RING_TAIL(BLT_RING_BASE),
471 /* TODO: Other registers are not yet used */
472};
473
474static bool is_gen8_shadowed(struct drm_i915_private *dev_priv, u32 reg)
475{
476 int i;
477 for (i = 0; i < ARRAY_SIZE(gen8_shadowed_regs); i++)
478 if (reg == gen8_shadowed_regs[i])
479 return true;
480
481 return false;
482}
483
484#define __gen8_write(x) \
485static void \
486gen8_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
487 bool __needs_put = !is_gen8_shadowed(dev_priv, reg); \
488 REG_WRITE_HEADER; \
489 if (__needs_put) { \
490 dev_priv->uncore.funcs.force_wake_get(dev_priv); \
491 } \
492 __raw_i915_write##x(dev_priv, reg, val); \
493 if (__needs_put) { \
494 dev_priv->uncore.funcs.force_wake_put(dev_priv); \
495 } \
496 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
497}
498
499__gen8_write(8)
500__gen8_write(16)
501__gen8_write(32)
502__gen8_write(64)
503__hsw_write(8)
504__hsw_write(16)
505__hsw_write(32)
506__hsw_write(64)
507__gen6_write(8)
508__gen6_write(16)
509__gen6_write(32)
510__gen6_write(64)
511__gen5_write(8)
512__gen5_write(16)
513__gen5_write(32)
514__gen5_write(64)
515__gen4_write(8)
516__gen4_write(16)
517__gen4_write(32)
518__gen4_write(64)
519
520#undef __gen8_write
521#undef __hsw_write
522#undef __gen6_write
523#undef __gen5_write
524#undef __gen4_write
525#undef REG_WRITE_HEADER
526
527void intel_uncore_init(struct drm_device *dev)
528{
529 struct drm_i915_private *dev_priv = dev->dev_private;
530
531 INIT_DELAYED_WORK(&dev_priv->uncore.force_wake_work,
532 gen6_force_wake_work);
533
534 if (IS_VALLEYVIEW(dev)) {
535 dev_priv->uncore.funcs.force_wake_get = vlv_force_wake_get;
536 dev_priv->uncore.funcs.force_wake_put = vlv_force_wake_put;
537 } else if (IS_HASWELL(dev) || IS_GEN8(dev)) {
538 dev_priv->uncore.funcs.force_wake_get = __gen6_gt_force_wake_mt_get;
539 dev_priv->uncore.funcs.force_wake_put = __gen6_gt_force_wake_mt_put;
540 } else if (IS_IVYBRIDGE(dev)) {
541 u32 ecobus;
542
543 /* IVB configs may use multi-threaded forcewake */
544
545 /* A small trick here - if the bios hasn't configured
546 * MT forcewake, and if the device is in RC6, then
547 * force_wake_mt_get will not wake the device and the
548 * ECOBUS read will return zero. Which will be
549 * (correctly) interpreted by the test below as MT
550 * forcewake being disabled.
551 */
552 mutex_lock(&dev->struct_mutex);
553 __gen6_gt_force_wake_mt_get(dev_priv);
554 ecobus = __raw_i915_read32(dev_priv, ECOBUS);
555 __gen6_gt_force_wake_mt_put(dev_priv);
556 mutex_unlock(&dev->struct_mutex);
557
558 if (ecobus & FORCEWAKE_MT_ENABLE) {
559 dev_priv->uncore.funcs.force_wake_get =
560 __gen6_gt_force_wake_mt_get;
561 dev_priv->uncore.funcs.force_wake_put =
562 __gen6_gt_force_wake_mt_put;
563 } else {
564 DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
565 DRM_INFO("when using vblank-synced partial screen updates.\n");
566 dev_priv->uncore.funcs.force_wake_get =
567 __gen6_gt_force_wake_get;
568 dev_priv->uncore.funcs.force_wake_put =
569 __gen6_gt_force_wake_put;
570 }
571 } else if (IS_GEN6(dev)) {
572 dev_priv->uncore.funcs.force_wake_get =
573 __gen6_gt_force_wake_get;
574 dev_priv->uncore.funcs.force_wake_put =
575 __gen6_gt_force_wake_put;
576 }
577
578 switch (INTEL_INFO(dev)->gen) {
579 default:
580 dev_priv->uncore.funcs.mmio_writeb = gen8_write8;
581 dev_priv->uncore.funcs.mmio_writew = gen8_write16;
582 dev_priv->uncore.funcs.mmio_writel = gen8_write32;
583 dev_priv->uncore.funcs.mmio_writeq = gen8_write64;
584 dev_priv->uncore.funcs.mmio_readb = gen6_read8;
585 dev_priv->uncore.funcs.mmio_readw = gen6_read16;
586 dev_priv->uncore.funcs.mmio_readl = gen6_read32;
587 dev_priv->uncore.funcs.mmio_readq = gen6_read64;
588 break;
589 case 7:
590 case 6:
591 if (IS_HASWELL(dev)) {
592 dev_priv->uncore.funcs.mmio_writeb = hsw_write8;
593 dev_priv->uncore.funcs.mmio_writew = hsw_write16;
594 dev_priv->uncore.funcs.mmio_writel = hsw_write32;
595 dev_priv->uncore.funcs.mmio_writeq = hsw_write64;
596 } else {
597 dev_priv->uncore.funcs.mmio_writeb = gen6_write8;
598 dev_priv->uncore.funcs.mmio_writew = gen6_write16;
599 dev_priv->uncore.funcs.mmio_writel = gen6_write32;
600 dev_priv->uncore.funcs.mmio_writeq = gen6_write64;
601 }
602 dev_priv->uncore.funcs.mmio_readb = gen6_read8;
603 dev_priv->uncore.funcs.mmio_readw = gen6_read16;
604 dev_priv->uncore.funcs.mmio_readl = gen6_read32;
605 dev_priv->uncore.funcs.mmio_readq = gen6_read64;
606 break;
607 case 5:
608 dev_priv->uncore.funcs.mmio_writeb = gen5_write8;
609 dev_priv->uncore.funcs.mmio_writew = gen5_write16;
610 dev_priv->uncore.funcs.mmio_writel = gen5_write32;
611 dev_priv->uncore.funcs.mmio_writeq = gen5_write64;
612 dev_priv->uncore.funcs.mmio_readb = gen5_read8;
613 dev_priv->uncore.funcs.mmio_readw = gen5_read16;
614 dev_priv->uncore.funcs.mmio_readl = gen5_read32;
615 dev_priv->uncore.funcs.mmio_readq = gen5_read64;
616 break;
617 case 4:
618 case 3:
619 case 2:
620 dev_priv->uncore.funcs.mmio_writeb = gen4_write8;
621 dev_priv->uncore.funcs.mmio_writew = gen4_write16;
622 dev_priv->uncore.funcs.mmio_writel = gen4_write32;
623 dev_priv->uncore.funcs.mmio_writeq = gen4_write64;
624 dev_priv->uncore.funcs.mmio_readb = gen4_read8;
625 dev_priv->uncore.funcs.mmio_readw = gen4_read16;
626 dev_priv->uncore.funcs.mmio_readl = gen4_read32;
627 dev_priv->uncore.funcs.mmio_readq = gen4_read64;
628 break;
629 }
630}
631
632void intel_uncore_fini(struct drm_device *dev)
633{
634 struct drm_i915_private *dev_priv = dev->dev_private;
635
636 flush_delayed_work(&dev_priv->uncore.force_wake_work);
637
638 /* Paranoia: make sure we have disabled everything before we exit. */
639 intel_uncore_sanitize(dev);
640}
401 641
402static const struct register_whitelist { 642static const struct register_whitelist {
403 uint64_t offset; 643 uint64_t offset;
@@ -445,36 +685,6 @@ int i915_reg_read_ioctl(struct drm_device *dev,
445 return 0; 685 return 0;
446} 686}
447 687
448static int i8xx_do_reset(struct drm_device *dev)
449{
450 struct drm_i915_private *dev_priv = dev->dev_private;
451
452 if (IS_I85X(dev))
453 return -ENODEV;
454
455 I915_WRITE(D_STATE, I915_READ(D_STATE) | DSTATE_GFX_RESET_I830);
456 POSTING_READ(D_STATE);
457
458 if (IS_I830(dev) || IS_845G(dev)) {
459 I915_WRITE(DEBUG_RESET_I830,
460 DEBUG_RESET_DISPLAY |
461 DEBUG_RESET_RENDER |
462 DEBUG_RESET_FULL);
463 POSTING_READ(DEBUG_RESET_I830);
464 msleep(1);
465
466 I915_WRITE(DEBUG_RESET_I830, 0);
467 POSTING_READ(DEBUG_RESET_I830);
468 }
469
470 msleep(1);
471
472 I915_WRITE(D_STATE, I915_READ(D_STATE) & ~DSTATE_GFX_RESET_I830);
473 POSTING_READ(D_STATE);
474
475 return 0;
476}
477
478static int i965_reset_complete(struct drm_device *dev) 688static int i965_reset_complete(struct drm_device *dev)
479{ 689{
480 u8 gdrst; 690 u8 gdrst;
@@ -576,7 +786,6 @@ int intel_gpu_reset(struct drm_device *dev)
576 case 6: return gen6_do_reset(dev); 786 case 6: return gen6_do_reset(dev);
577 case 5: return ironlake_do_reset(dev); 787 case 5: return ironlake_do_reset(dev);
578 case 4: return i965_do_reset(dev); 788 case 4: return i965_do_reset(dev);
579 case 2: return i8xx_do_reset(dev);
580 default: return -ENODEV; 789 default: return -ENODEV;
581 } 790 }
582} 791}
diff --git a/drivers/gpu/drm/mga/mga_dma.c b/drivers/gpu/drm/mga/mga_dma.c
index cc3166dd445a..087db33f6cff 100644
--- a/drivers/gpu/drm/mga/mga_dma.c
+++ b/drivers/gpu/drm/mga/mga_dma.c
@@ -406,11 +406,6 @@ int mga_driver_load(struct drm_device *dev, unsigned long flags)
406 dev_priv->mmio_base = pci_resource_start(dev->pdev, 1); 406 dev_priv->mmio_base = pci_resource_start(dev->pdev, 1);
407 dev_priv->mmio_size = pci_resource_len(dev->pdev, 1); 407 dev_priv->mmio_size = pci_resource_len(dev->pdev, 1);
408 408
409 dev->counters += 3;
410 dev->types[6] = _DRM_STAT_IRQ;
411 dev->types[7] = _DRM_STAT_PRIMARY;
412 dev->types[8] = _DRM_STAT_SECONDARY;
413
414 ret = drm_vblank_init(dev, 1); 409 ret = drm_vblank_init(dev, 1);
415 410
416 if (ret) { 411 if (ret) {
diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
index 598c281def0a..2b0ceb8dc11b 100644
--- a/drivers/gpu/drm/mga/mga_irq.c
+++ b/drivers/gpu/drm/mga/mga_irq.c
@@ -169,5 +169,5 @@ void mga_driver_irq_uninstall(struct drm_device *dev)
169 /* Disable *all* interrupts */ 169 /* Disable *all* interrupts */
170 MGA_WRITE(MGA_IEN, 0); 170 MGA_WRITE(MGA_IEN, 0);
171 171
172 dev->irq_enabled = 0; 172 dev->irq_enabled = false;
173} 173}
diff --git a/drivers/gpu/drm/mgag200/Kconfig b/drivers/gpu/drm/mgag200/Kconfig
index b487cdec5ee7..3a1c5fbae54a 100644
--- a/drivers/gpu/drm/mgag200/Kconfig
+++ b/drivers/gpu/drm/mgag200/Kconfig
@@ -5,6 +5,7 @@ config DRM_MGAG200
5 select FB_SYS_COPYAREA 5 select FB_SYS_COPYAREA
6 select FB_SYS_IMAGEBLIT 6 select FB_SYS_IMAGEBLIT
7 select DRM_KMS_HELPER 7 select DRM_KMS_HELPER
8 select DRM_KMS_FB_HELPER
8 select DRM_TTM 9 select DRM_TTM
9 help 10 help
10 This is a KMS driver for the MGA G200 server chips, it 11 This is a KMS driver for the MGA G200 server chips, it
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c
index fcce7b2f8011..f15ea3c4a90a 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.c
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.c
@@ -99,7 +99,6 @@ static struct drm_driver driver = {
99 .minor = DRIVER_MINOR, 99 .minor = DRIVER_MINOR,
100 .patchlevel = DRIVER_PATCHLEVEL, 100 .patchlevel = DRIVER_PATCHLEVEL,
101 101
102 .gem_init_object = mgag200_gem_init_object,
103 .gem_free_object = mgag200_gem_free_object, 102 .gem_free_object = mgag200_gem_free_object,
104 .dumb_create = mgag200_dumb_create, 103 .dumb_create = mgag200_dumb_create,
105 .dumb_map_offset = mgag200_dumb_mmap_offset, 104 .dumb_map_offset = mgag200_dumb_mmap_offset,
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h
index baaae19332e2..cf11ee68a6d9 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.h
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.h
@@ -260,7 +260,6 @@ int mgag200_driver_unload(struct drm_device *dev);
260int mgag200_gem_create(struct drm_device *dev, 260int mgag200_gem_create(struct drm_device *dev,
261 u32 size, bool iskernel, 261 u32 size, bool iskernel,
262 struct drm_gem_object **obj); 262 struct drm_gem_object **obj);
263int mgag200_gem_init_object(struct drm_gem_object *obj);
264int mgag200_dumb_create(struct drm_file *file, 263int mgag200_dumb_create(struct drm_file *file,
265 struct drm_device *dev, 264 struct drm_device *dev,
266 struct drm_mode_create_dumb *args); 265 struct drm_mode_create_dumb *args);
diff --git a/drivers/gpu/drm/mgag200/mgag200_main.c b/drivers/gpu/drm/mgag200/mgag200_main.c
index 0f8b861b10b3..b1120cb1db6d 100644
--- a/drivers/gpu/drm/mgag200/mgag200_main.c
+++ b/drivers/gpu/drm/mgag200/mgag200_main.c
@@ -310,12 +310,6 @@ int mgag200_dumb_create(struct drm_file *file,
310 return 0; 310 return 0;
311} 311}
312 312
313int mgag200_gem_init_object(struct drm_gem_object *obj)
314{
315 BUG();
316 return 0;
317}
318
319void mgag200_bo_unref(struct mgag200_bo **bo) 313void mgag200_bo_unref(struct mgag200_bo **bo)
320{ 314{
321 struct ttm_buffer_object *tbo; 315 struct ttm_buffer_object *tbo;
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index 503a414cbdad..ee6ed633b7b1 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -765,8 +765,6 @@ static int mga_crtc_do_set_base(struct drm_crtc *crtc,
765 } 765 }
766 mgag200_bo_unreserve(bo); 766 mgag200_bo_unreserve(bo);
767 767
768 DRM_INFO("mga base %llx\n", gpu_addr);
769
770 mga_set_start_address(crtc, (u32)gpu_addr); 768 mga_set_start_address(crtc, (u32)gpu_addr);
771 769
772 return 0; 770 return 0;
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index a06c19cc56f8..f39ab7554fc9 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -14,6 +14,7 @@ config DRM_MSM
14config DRM_MSM_FBDEV 14config DRM_MSM_FBDEV
15 bool "Enable legacy fbdev support for MSM modesetting driver" 15 bool "Enable legacy fbdev support for MSM modesetting driver"
16 depends on DRM_MSM 16 depends on DRM_MSM
17 select DRM_KMS_FB_HELPER
17 select FB_SYS_FILLRECT 18 select FB_SYS_FILLRECT
18 select FB_SYS_COPYAREA 19 select FB_SYS_COPYAREA
19 select FB_SYS_IMAGEBLIT 20 select FB_SYS_IMAGEBLIT
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index e17914889e54..e5fa12b0d21e 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -21,6 +21,7 @@ msm-y := \
21 msm_drv.o \ 21 msm_drv.o \
22 msm_fb.o \ 22 msm_fb.o \
23 msm_gem.o \ 23 msm_gem.o \
24 msm_gem_prime.o \
24 msm_gem_submit.o \ 25 msm_gem_submit.o \
25 msm_gpu.o \ 26 msm_gpu.o \
26 msm_ringbuffer.o 27 msm_ringbuffer.o
diff --git a/drivers/gpu/drm/msm/adreno/a2xx.xml.h b/drivers/gpu/drm/msm/adreno/a2xx.xml.h
index 35463864b959..9588098741b5 100644
--- a/drivers/gpu/drm/msm/adreno/a2xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a2xx.xml.h
@@ -4,16 +4,16 @@
4/* Autogenerated file, DO NOT EDIT manually! 4/* Autogenerated file, DO NOT EDIT manually!
5 5
6This file was generated by the rules-ng-ng headergen tool in this git repository: 6This file was generated by the rules-ng-ng headergen tool in this git repository:
7http://0x04.net/cgit/index.cgi/rules-ng-ng 7http://github.com/freedreno/envytools/
8git clone git://0x04.net/rules-ng-ng 8git clone https://github.com/freedreno/envytools.git
9 9
10The rules-ng-ng source files this header was generated from are: 10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 327 bytes, from 2013-07-05 19:21:12) 11- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 327 bytes, from 2013-07-05 19:21:12)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
13- /home/robclark/src/freedreno/envytools/rnndb/a2xx/a2xx.xml ( 30005 bytes, from 2013-07-19 21:30:48) 13- /home/robclark/src/freedreno/envytools/rnndb/a2xx/a2xx.xml ( 31003 bytes, from 2013-09-19 18:50:16)
14- /home/robclark/src/freedreno/envytools/rnndb/adreno_common.xml ( 8983 bytes, from 2013-07-24 01:38:36) 14- /home/robclark/src/freedreno/envytools/rnndb/adreno_common.xml ( 8983 bytes, from 2013-07-24 01:38:36)
15- /home/robclark/src/freedreno/envytools/rnndb/adreno_pm4.xml ( 9712 bytes, from 2013-05-26 15:22:37) 15- /home/robclark/src/freedreno/envytools/rnndb/adreno_pm4.xml ( 9759 bytes, from 2013-09-10 00:52:33)
16- /home/robclark/src/freedreno/envytools/rnndb/a3xx/a3xx.xml ( 51415 bytes, from 2013-08-03 14:26:05) 16- /home/robclark/src/freedreno/envytools/rnndb/a3xx/a3xx.xml ( 51983 bytes, from 2013-09-10 00:52:32)
17 17
18Copyright (C) 2013 by the following authors: 18Copyright (C) 2013 by the following authors:
19- Rob Clark <robdclark@gmail.com> (robclark) 19- Rob Clark <robdclark@gmail.com> (robclark)
@@ -317,6 +317,38 @@ static inline uint32_t A2XX_RBBM_STATUS_CMDFIFO_AVAIL(uint32_t val)
317#define A2XX_RBBM_STATUS_RB_CNTX_BUSY 0x40000000 317#define A2XX_RBBM_STATUS_RB_CNTX_BUSY 0x40000000
318#define A2XX_RBBM_STATUS_GUI_ACTIVE 0x80000000 318#define A2XX_RBBM_STATUS_GUI_ACTIVE 0x80000000
319 319
320#define REG_A2XX_MH_ARBITER_CONFIG 0x00000a40
321#define A2XX_MH_ARBITER_CONFIG_SAME_PAGE_LIMIT__MASK 0x0000003f
322#define A2XX_MH_ARBITER_CONFIG_SAME_PAGE_LIMIT__SHIFT 0
323static inline uint32_t A2XX_MH_ARBITER_CONFIG_SAME_PAGE_LIMIT(uint32_t val)
324{
325 return ((val) << A2XX_MH_ARBITER_CONFIG_SAME_PAGE_LIMIT__SHIFT) & A2XX_MH_ARBITER_CONFIG_SAME_PAGE_LIMIT__MASK;
326}
327#define A2XX_MH_ARBITER_CONFIG_SAME_PAGE_GRANULARITY 0x00000040
328#define A2XX_MH_ARBITER_CONFIG_L1_ARB_ENABLE 0x00000080
329#define A2XX_MH_ARBITER_CONFIG_L1_ARB_HOLD_ENABLE 0x00000100
330#define A2XX_MH_ARBITER_CONFIG_L2_ARB_CONTROL 0x00000200
331#define A2XX_MH_ARBITER_CONFIG_PAGE_SIZE__MASK 0x00001c00
332#define A2XX_MH_ARBITER_CONFIG_PAGE_SIZE__SHIFT 10
333static inline uint32_t A2XX_MH_ARBITER_CONFIG_PAGE_SIZE(uint32_t val)
334{
335 return ((val) << A2XX_MH_ARBITER_CONFIG_PAGE_SIZE__SHIFT) & A2XX_MH_ARBITER_CONFIG_PAGE_SIZE__MASK;
336}
337#define A2XX_MH_ARBITER_CONFIG_TC_REORDER_ENABLE 0x00002000
338#define A2XX_MH_ARBITER_CONFIG_TC_ARB_HOLD_ENABLE 0x00004000
339#define A2XX_MH_ARBITER_CONFIG_IN_FLIGHT_LIMIT_ENABLE 0x00008000
340#define A2XX_MH_ARBITER_CONFIG_IN_FLIGHT_LIMIT__MASK 0x003f0000
341#define A2XX_MH_ARBITER_CONFIG_IN_FLIGHT_LIMIT__SHIFT 16
342static inline uint32_t A2XX_MH_ARBITER_CONFIG_IN_FLIGHT_LIMIT(uint32_t val)
343{
344 return ((val) << A2XX_MH_ARBITER_CONFIG_IN_FLIGHT_LIMIT__SHIFT) & A2XX_MH_ARBITER_CONFIG_IN_FLIGHT_LIMIT__MASK;
345}
346#define A2XX_MH_ARBITER_CONFIG_CP_CLNT_ENABLE 0x00400000
347#define A2XX_MH_ARBITER_CONFIG_VGT_CLNT_ENABLE 0x00800000
348#define A2XX_MH_ARBITER_CONFIG_TC_CLNT_ENABLE 0x01000000
349#define A2XX_MH_ARBITER_CONFIG_RB_CLNT_ENABLE 0x02000000
350#define A2XX_MH_ARBITER_CONFIG_PA_CLNT_ENABLE 0x04000000
351
320#define REG_A2XX_A220_VSC_BIN_SIZE 0x00000c01 352#define REG_A2XX_A220_VSC_BIN_SIZE 0x00000c01
321#define A2XX_A220_VSC_BIN_SIZE_WIDTH__MASK 0x0000001f 353#define A2XX_A220_VSC_BIN_SIZE_WIDTH__MASK 0x0000001f
322#define A2XX_A220_VSC_BIN_SIZE_WIDTH__SHIFT 0 354#define A2XX_A220_VSC_BIN_SIZE_WIDTH__SHIFT 0
diff --git a/drivers/gpu/drm/msm/adreno/a3xx.xml.h b/drivers/gpu/drm/msm/adreno/a3xx.xml.h
index d183516067b4..d4afdf657559 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a3xx.xml.h
@@ -4,16 +4,16 @@
4/* Autogenerated file, DO NOT EDIT manually! 4/* Autogenerated file, DO NOT EDIT manually!
5 5
6This file was generated by the rules-ng-ng headergen tool in this git repository: 6This file was generated by the rules-ng-ng headergen tool in this git repository:
7http://0x04.net/cgit/index.cgi/rules-ng-ng 7http://github.com/freedreno/envytools/
8git clone git://0x04.net/rules-ng-ng 8git clone https://github.com/freedreno/envytools.git
9 9
10The rules-ng-ng source files this header was generated from are: 10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 327 bytes, from 2013-07-05 19:21:12) 11- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 327 bytes, from 2013-07-05 19:21:12)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
13- /home/robclark/src/freedreno/envytools/rnndb/a2xx/a2xx.xml ( 30005 bytes, from 2013-07-19 21:30:48) 13- /home/robclark/src/freedreno/envytools/rnndb/a2xx/a2xx.xml ( 31003 bytes, from 2013-09-19 18:50:16)
14- /home/robclark/src/freedreno/envytools/rnndb/adreno_common.xml ( 8983 bytes, from 2013-07-24 01:38:36) 14- /home/robclark/src/freedreno/envytools/rnndb/adreno_common.xml ( 8983 bytes, from 2013-07-24 01:38:36)
15- /home/robclark/src/freedreno/envytools/rnndb/adreno_pm4.xml ( 9712 bytes, from 2013-05-26 15:22:37) 15- /home/robclark/src/freedreno/envytools/rnndb/adreno_pm4.xml ( 9759 bytes, from 2013-09-10 00:52:33)
16- /home/robclark/src/freedreno/envytools/rnndb/a3xx/a3xx.xml ( 51415 bytes, from 2013-08-03 14:26:05) 16- /home/robclark/src/freedreno/envytools/rnndb/a3xx/a3xx.xml ( 51983 bytes, from 2013-09-10 00:52:32)
17 17
18Copyright (C) 2013 by the following authors: 18Copyright (C) 2013 by the following authors:
19- Rob Clark <robdclark@gmail.com> (robclark) 19- Rob Clark <robdclark@gmail.com> (robclark)
@@ -637,11 +637,12 @@ static inline uint32_t A3XX_GRAS_SU_POLY_OFFSET_OFFSET(float val)
637#define REG_A3XX_GRAS_SU_MODE_CONTROL 0x00002070 637#define REG_A3XX_GRAS_SU_MODE_CONTROL 0x00002070
638#define A3XX_GRAS_SU_MODE_CONTROL_CULL_FRONT 0x00000001 638#define A3XX_GRAS_SU_MODE_CONTROL_CULL_FRONT 0x00000001
639#define A3XX_GRAS_SU_MODE_CONTROL_CULL_BACK 0x00000002 639#define A3XX_GRAS_SU_MODE_CONTROL_CULL_BACK 0x00000002
640#define A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__MASK 0x000007fc 640#define A3XX_GRAS_SU_MODE_CONTROL_FRONT_CW 0x00000004
641#define A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__SHIFT 2 641#define A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__MASK 0x000007f8
642static inline uint32_t A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH(uint32_t val) 642#define A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__SHIFT 3
643static inline uint32_t A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH(float val)
643{ 644{
644 return ((val) << A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__SHIFT) & A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__MASK; 645 return ((((uint32_t)(val * 4.0))) << A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__SHIFT) & A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__MASK;
645} 646}
646#define A3XX_GRAS_SU_MODE_CONTROL_POLY_OFFSET 0x00000800 647#define A3XX_GRAS_SU_MODE_CONTROL_POLY_OFFSET 0x00000800
647 648
@@ -745,6 +746,7 @@ static inline uint32_t A3XX_RB_RENDER_CONTROL_BIN_WIDTH(uint32_t val)
745} 746}
746#define A3XX_RB_RENDER_CONTROL_DISABLE_COLOR_PIPE 0x00001000 747#define A3XX_RB_RENDER_CONTROL_DISABLE_COLOR_PIPE 0x00001000
747#define A3XX_RB_RENDER_CONTROL_ENABLE_GMEM 0x00002000 748#define A3XX_RB_RENDER_CONTROL_ENABLE_GMEM 0x00002000
749#define A3XX_RB_RENDER_CONTROL_ALPHA_TEST 0x00400000
748#define A3XX_RB_RENDER_CONTROL_ALPHA_TEST_FUNC__MASK 0x07000000 750#define A3XX_RB_RENDER_CONTROL_ALPHA_TEST_FUNC__MASK 0x07000000
749#define A3XX_RB_RENDER_CONTROL_ALPHA_TEST_FUNC__SHIFT 24 751#define A3XX_RB_RENDER_CONTROL_ALPHA_TEST_FUNC__SHIFT 24
750static inline uint32_t A3XX_RB_RENDER_CONTROL_ALPHA_TEST_FUNC(enum adreno_compare_func val) 752static inline uint32_t A3XX_RB_RENDER_CONTROL_ALPHA_TEST_FUNC(enum adreno_compare_func val)
@@ -767,7 +769,19 @@ static inline uint32_t A3XX_RB_MSAA_CONTROL_SAMPLE_MASK(uint32_t val)
767 return ((val) << A3XX_RB_MSAA_CONTROL_SAMPLE_MASK__SHIFT) & A3XX_RB_MSAA_CONTROL_SAMPLE_MASK__MASK; 769 return ((val) << A3XX_RB_MSAA_CONTROL_SAMPLE_MASK__SHIFT) & A3XX_RB_MSAA_CONTROL_SAMPLE_MASK__MASK;
768} 770}
769 771
770#define REG_A3XX_UNKNOWN_20C3 0x000020c3 772#define REG_A3XX_RB_ALPHA_REF 0x000020c3
773#define A3XX_RB_ALPHA_REF_UINT__MASK 0x0000ff00
774#define A3XX_RB_ALPHA_REF_UINT__SHIFT 8
775static inline uint32_t A3XX_RB_ALPHA_REF_UINT(uint32_t val)
776{
777 return ((val) << A3XX_RB_ALPHA_REF_UINT__SHIFT) & A3XX_RB_ALPHA_REF_UINT__MASK;
778}
779#define A3XX_RB_ALPHA_REF_FLOAT__MASK 0xffff0000
780#define A3XX_RB_ALPHA_REF_FLOAT__SHIFT 16
781static inline uint32_t A3XX_RB_ALPHA_REF_FLOAT(float val)
782{
783 return ((util_float_to_half(val)) << A3XX_RB_ALPHA_REF_FLOAT__SHIFT) & A3XX_RB_ALPHA_REF_FLOAT__MASK;
784}
771 785
772static inline uint32_t REG_A3XX_RB_MRT(uint32_t i0) { return 0x000020c4 + 0x4*i0; } 786static inline uint32_t REG_A3XX_RB_MRT(uint32_t i0) { return 0x000020c4 + 0x4*i0; }
773 787
@@ -1002,7 +1016,7 @@ static inline uint32_t A3XX_RB_COPY_DEST_INFO_ENDIAN(enum adreno_rb_surface_endi
1002#define REG_A3XX_RB_DEPTH_CONTROL 0x00002100 1016#define REG_A3XX_RB_DEPTH_CONTROL 0x00002100
1003#define A3XX_RB_DEPTH_CONTROL_Z_ENABLE 0x00000002 1017#define A3XX_RB_DEPTH_CONTROL_Z_ENABLE 0x00000002
1004#define A3XX_RB_DEPTH_CONTROL_Z_WRITE_ENABLE 0x00000004 1018#define A3XX_RB_DEPTH_CONTROL_Z_WRITE_ENABLE 0x00000004
1005#define A3XX_RB_DEPTH_CONTROL_EARLY_Z_ENABLE 0x00000008 1019#define A3XX_RB_DEPTH_CONTROL_EARLY_Z_DISABLE 0x00000008
1006#define A3XX_RB_DEPTH_CONTROL_ZFUNC__MASK 0x00000070 1020#define A3XX_RB_DEPTH_CONTROL_ZFUNC__MASK 0x00000070
1007#define A3XX_RB_DEPTH_CONTROL_ZFUNC__SHIFT 4 1021#define A3XX_RB_DEPTH_CONTROL_ZFUNC__SHIFT 4
1008static inline uint32_t A3XX_RB_DEPTH_CONTROL_ZFUNC(enum adreno_compare_func val) 1022static inline uint32_t A3XX_RB_DEPTH_CONTROL_ZFUNC(enum adreno_compare_func val)
@@ -1038,7 +1052,8 @@ static inline uint32_t A3XX_RB_DEPTH_PITCH(uint32_t val)
1038 1052
1039#define REG_A3XX_RB_STENCIL_CONTROL 0x00002104 1053#define REG_A3XX_RB_STENCIL_CONTROL 0x00002104
1040#define A3XX_RB_STENCIL_CONTROL_STENCIL_ENABLE 0x00000001 1054#define A3XX_RB_STENCIL_CONTROL_STENCIL_ENABLE 0x00000001
1041#define A3XX_RB_STENCIL_CONTROL_STENCIL_ENABLE_BF 0x00000004 1055#define A3XX_RB_STENCIL_CONTROL_STENCIL_ENABLE_BF 0x00000002
1056#define A3XX_RB_STENCIL_CONTROL_STENCIL_READ 0x00000004
1042#define A3XX_RB_STENCIL_CONTROL_FUNC__MASK 0x00000700 1057#define A3XX_RB_STENCIL_CONTROL_FUNC__MASK 0x00000700
1043#define A3XX_RB_STENCIL_CONTROL_FUNC__SHIFT 8 1058#define A3XX_RB_STENCIL_CONTROL_FUNC__SHIFT 8
1044static inline uint32_t A3XX_RB_STENCIL_CONTROL_FUNC(enum adreno_compare_func val) 1059static inline uint32_t A3XX_RB_STENCIL_CONTROL_FUNC(enum adreno_compare_func val)
@@ -2074,6 +2089,7 @@ static inline uint32_t A3XX_UCHE_CACHE_INVALIDATE1_REG_OPCODE(enum a3xx_cache_op
2074#define REG_A3XX_TP_PERFCOUNTER5_SELECT 0x00000f09 2089#define REG_A3XX_TP_PERFCOUNTER5_SELECT 0x00000f09
2075 2090
2076#define REG_A3XX_TEX_SAMP_0 0x00000000 2091#define REG_A3XX_TEX_SAMP_0 0x00000000
2092#define A3XX_TEX_SAMP_0_MIPFILTER_LINEAR 0x00000002
2077#define A3XX_TEX_SAMP_0_XY_MAG__MASK 0x0000000c 2093#define A3XX_TEX_SAMP_0_XY_MAG__MASK 0x0000000c
2078#define A3XX_TEX_SAMP_0_XY_MAG__SHIFT 2 2094#define A3XX_TEX_SAMP_0_XY_MAG__SHIFT 2
2079static inline uint32_t A3XX_TEX_SAMP_0_XY_MAG(enum a3xx_tex_filter val) 2095static inline uint32_t A3XX_TEX_SAMP_0_XY_MAG(enum a3xx_tex_filter val)
@@ -2134,6 +2150,12 @@ static inline uint32_t A3XX_TEX_CONST_0_SWIZ_W(enum a3xx_tex_swiz val)
2134{ 2150{
2135 return ((val) << A3XX_TEX_CONST_0_SWIZ_W__SHIFT) & A3XX_TEX_CONST_0_SWIZ_W__MASK; 2151 return ((val) << A3XX_TEX_CONST_0_SWIZ_W__SHIFT) & A3XX_TEX_CONST_0_SWIZ_W__MASK;
2136} 2152}
2153#define A3XX_TEX_CONST_0_MIPLVLS__MASK 0x000f0000
2154#define A3XX_TEX_CONST_0_MIPLVLS__SHIFT 16
2155static inline uint32_t A3XX_TEX_CONST_0_MIPLVLS(uint32_t val)
2156{
2157 return ((val) << A3XX_TEX_CONST_0_MIPLVLS__SHIFT) & A3XX_TEX_CONST_0_MIPLVLS__MASK;
2158}
2137#define A3XX_TEX_CONST_0_FMT__MASK 0x1fc00000 2159#define A3XX_TEX_CONST_0_FMT__MASK 0x1fc00000
2138#define A3XX_TEX_CONST_0_FMT__SHIFT 22 2160#define A3XX_TEX_CONST_0_FMT__SHIFT 22
2139static inline uint32_t A3XX_TEX_CONST_0_FMT(enum a3xx_tex_fmt val) 2161static inline uint32_t A3XX_TEX_CONST_0_FMT(enum a3xx_tex_fmt val)
diff --git a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
index 61979d458ac0..33dcc606c7c5 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
@@ -4,16 +4,16 @@
4/* Autogenerated file, DO NOT EDIT manually! 4/* Autogenerated file, DO NOT EDIT manually!
5 5
6This file was generated by the rules-ng-ng headergen tool in this git repository: 6This file was generated by the rules-ng-ng headergen tool in this git repository:
7http://0x04.net/cgit/index.cgi/rules-ng-ng 7http://github.com/freedreno/envytools/
8git clone git://0x04.net/rules-ng-ng 8git clone https://github.com/freedreno/envytools.git
9 9
10The rules-ng-ng source files this header was generated from are: 10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 327 bytes, from 2013-07-05 19:21:12) 11- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 327 bytes, from 2013-07-05 19:21:12)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
13- /home/robclark/src/freedreno/envytools/rnndb/a2xx/a2xx.xml ( 30005 bytes, from 2013-07-19 21:30:48) 13- /home/robclark/src/freedreno/envytools/rnndb/a2xx/a2xx.xml ( 31003 bytes, from 2013-09-19 18:50:16)
14- /home/robclark/src/freedreno/envytools/rnndb/adreno_common.xml ( 8983 bytes, from 2013-07-24 01:38:36) 14- /home/robclark/src/freedreno/envytools/rnndb/adreno_common.xml ( 8983 bytes, from 2013-07-24 01:38:36)
15- /home/robclark/src/freedreno/envytools/rnndb/adreno_pm4.xml ( 9712 bytes, from 2013-05-26 15:22:37) 15- /home/robclark/src/freedreno/envytools/rnndb/adreno_pm4.xml ( 9759 bytes, from 2013-09-10 00:52:33)
16- /home/robclark/src/freedreno/envytools/rnndb/a3xx/a3xx.xml ( 51415 bytes, from 2013-08-03 14:26:05) 16- /home/robclark/src/freedreno/envytools/rnndb/a3xx/a3xx.xml ( 51983 bytes, from 2013-09-10 00:52:32)
17 17
18Copyright (C) 2013 by the following authors: 18Copyright (C) 2013 by the following authors:
19- Rob Clark <robdclark@gmail.com> (robclark) 19- Rob Clark <robdclark@gmail.com> (robclark)
diff --git a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
index 94c13f418e75..259ad709b0cc 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
@@ -4,16 +4,16 @@
4/* Autogenerated file, DO NOT EDIT manually! 4/* Autogenerated file, DO NOT EDIT manually!
5 5
6This file was generated by the rules-ng-ng headergen tool in this git repository: 6This file was generated by the rules-ng-ng headergen tool in this git repository:
7http://0x04.net/cgit/index.cgi/rules-ng-ng 7http://github.com/freedreno/envytools/
8git clone git://0x04.net/rules-ng-ng 8git clone https://github.com/freedreno/envytools.git
9 9
10The rules-ng-ng source files this header was generated from are: 10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 327 bytes, from 2013-07-05 19:21:12) 11- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 327 bytes, from 2013-07-05 19:21:12)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
13- /home/robclark/src/freedreno/envytools/rnndb/a2xx/a2xx.xml ( 30005 bytes, from 2013-07-19 21:30:48) 13- /home/robclark/src/freedreno/envytools/rnndb/a2xx/a2xx.xml ( 31003 bytes, from 2013-09-19 18:50:16)
14- /home/robclark/src/freedreno/envytools/rnndb/adreno_common.xml ( 8983 bytes, from 2013-07-24 01:38:36) 14- /home/robclark/src/freedreno/envytools/rnndb/adreno_common.xml ( 8983 bytes, from 2013-07-24 01:38:36)
15- /home/robclark/src/freedreno/envytools/rnndb/adreno_pm4.xml ( 9712 bytes, from 2013-05-26 15:22:37) 15- /home/robclark/src/freedreno/envytools/rnndb/adreno_pm4.xml ( 9759 bytes, from 2013-09-10 00:52:33)
16- /home/robclark/src/freedreno/envytools/rnndb/a3xx/a3xx.xml ( 51415 bytes, from 2013-08-03 14:26:05) 16- /home/robclark/src/freedreno/envytools/rnndb/a3xx/a3xx.xml ( 51983 bytes, from 2013-09-10 00:52:32)
17 17
18Copyright (C) 2013 by the following authors: 18Copyright (C) 2013 by the following authors:
19- Rob Clark <robdclark@gmail.com> (robclark) 19- Rob Clark <robdclark@gmail.com> (robclark)
diff --git a/drivers/gpu/drm/msm/dsi/dsi.xml.h b/drivers/gpu/drm/msm/dsi/dsi.xml.h
index 6f8396be431d..6d4c62bf70dc 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.xml.h
+++ b/drivers/gpu/drm/msm/dsi/dsi.xml.h
@@ -4,13 +4,13 @@
4/* Autogenerated file, DO NOT EDIT manually! 4/* Autogenerated file, DO NOT EDIT manually!
5 5
6This file was generated by the rules-ng-ng headergen tool in this git repository: 6This file was generated by the rules-ng-ng headergen tool in this git repository:
7http://0x04.net/cgit/index.cgi/rules-ng-ng 7http://github.com/freedreno/envytools/
8git clone git://0x04.net/rules-ng-ng 8git clone https://github.com/freedreno/envytools.git
9 9
10The rules-ng-ng source files this header was generated from are: 10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 595 bytes, from 2013-07-05 19:21:12) 11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 595 bytes, from 2013-07-05 19:21:12)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
13- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml ( 19332 bytes, from 2013-08-16 22:16:36) 13- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml ( 19332 bytes, from 2013-10-07 16:36:48)
14- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) 14- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
15- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) 15- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
16- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05) 16- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
diff --git a/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h b/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
index aefc1b8feae9..d1df38bf5747 100644
--- a/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
+++ b/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
@@ -4,13 +4,13 @@
4/* Autogenerated file, DO NOT EDIT manually! 4/* Autogenerated file, DO NOT EDIT manually!
5 5
6This file was generated by the rules-ng-ng headergen tool in this git repository: 6This file was generated by the rules-ng-ng headergen tool in this git repository:
7http://0x04.net/cgit/index.cgi/rules-ng-ng 7http://github.com/freedreno/envytools/
8git clone git://0x04.net/rules-ng-ng 8git clone https://github.com/freedreno/envytools.git
9 9
10The rules-ng-ng source files this header was generated from are: 10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 595 bytes, from 2013-07-05 19:21:12) 11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 595 bytes, from 2013-07-05 19:21:12)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
13- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml ( 19332 bytes, from 2013-08-16 22:16:36) 13- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml ( 19332 bytes, from 2013-10-07 16:36:48)
14- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) 14- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
15- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) 15- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
16- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05) 16- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
diff --git a/drivers/gpu/drm/msm/dsi/sfpb.xml.h b/drivers/gpu/drm/msm/dsi/sfpb.xml.h
index a225e8170b2a..0030a111302d 100644
--- a/drivers/gpu/drm/msm/dsi/sfpb.xml.h
+++ b/drivers/gpu/drm/msm/dsi/sfpb.xml.h
@@ -4,13 +4,13 @@
4/* Autogenerated file, DO NOT EDIT manually! 4/* Autogenerated file, DO NOT EDIT manually!
5 5
6This file was generated by the rules-ng-ng headergen tool in this git repository: 6This file was generated by the rules-ng-ng headergen tool in this git repository:
7http://0x04.net/cgit/index.cgi/rules-ng-ng 7http://github.com/freedreno/envytools/
8git clone git://0x04.net/rules-ng-ng 8git clone https://github.com/freedreno/envytools.git
9 9
10The rules-ng-ng source files this header was generated from are: 10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 595 bytes, from 2013-07-05 19:21:12) 11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 595 bytes, from 2013-07-05 19:21:12)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
13- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml ( 19332 bytes, from 2013-08-16 22:16:36) 13- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml ( 19332 bytes, from 2013-10-07 16:36:48)
14- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) 14- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
15- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) 15- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
16- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05) 16- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.xml.h b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
index f5fa4865e059..4e939f82918c 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
@@ -4,13 +4,13 @@
4/* Autogenerated file, DO NOT EDIT manually! 4/* Autogenerated file, DO NOT EDIT manually!
5 5
6This file was generated by the rules-ng-ng headergen tool in this git repository: 6This file was generated by the rules-ng-ng headergen tool in this git repository:
7http://0x04.net/cgit/index.cgi/rules-ng-ng 7http://github.com/freedreno/envytools/
8git clone git://0x04.net/rules-ng-ng 8git clone https://github.com/freedreno/envytools.git
9 9
10The rules-ng-ng source files this header was generated from are: 10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 595 bytes, from 2013-07-05 19:21:12) 11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 595 bytes, from 2013-07-05 19:21:12)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
13- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml ( 19332 bytes, from 2013-08-16 22:16:36) 13- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml ( 19332 bytes, from 2013-10-07 16:36:48)
14- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) 14- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
15- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) 15- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
16- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05) 16- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
diff --git a/drivers/gpu/drm/msm/hdmi/qfprom.xml.h b/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
index bee36363bcd0..dbde4f6339b9 100644
--- a/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
+++ b/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
@@ -4,13 +4,13 @@
4/* Autogenerated file, DO NOT EDIT manually! 4/* Autogenerated file, DO NOT EDIT manually!
5 5
6This file was generated by the rules-ng-ng headergen tool in this git repository: 6This file was generated by the rules-ng-ng headergen tool in this git repository:
7http://0x04.net/cgit/index.cgi/rules-ng-ng 7http://github.com/freedreno/envytools/
8git clone git://0x04.net/rules-ng-ng 8git clone https://github.com/freedreno/envytools.git
9 9
10The rules-ng-ng source files this header was generated from are: 10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 595 bytes, from 2013-07-05 19:21:12) 11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 595 bytes, from 2013-07-05 19:21:12)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
13- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml ( 19332 bytes, from 2013-08-16 22:16:36) 13- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml ( 19332 bytes, from 2013-10-07 16:36:48)
14- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) 14- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
15- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) 15- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
16- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05) 16- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4.xml.h b/drivers/gpu/drm/msm/mdp4/mdp4.xml.h
index bbeeebe2db55..9908ffe1c3ad 100644
--- a/drivers/gpu/drm/msm/mdp4/mdp4.xml.h
+++ b/drivers/gpu/drm/msm/mdp4/mdp4.xml.h
@@ -4,13 +4,13 @@
4/* Autogenerated file, DO NOT EDIT manually! 4/* Autogenerated file, DO NOT EDIT manually!
5 5
6This file was generated by the rules-ng-ng headergen tool in this git repository: 6This file was generated by the rules-ng-ng headergen tool in this git repository:
7http://0x04.net/cgit/index.cgi/rules-ng-ng 7http://github.com/freedreno/envytools/
8git clone git://0x04.net/rules-ng-ng 8git clone https://github.com/freedreno/envytools.git
9 9
10The rules-ng-ng source files this header was generated from are: 10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 595 bytes, from 2013-07-05 19:21:12) 11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 595 bytes, from 2013-07-05 19:21:12)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
13- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml ( 19332 bytes, from 2013-08-16 22:16:36) 13- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml ( 19332 bytes, from 2013-10-07 16:36:48)
14- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) 14- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
15- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) 15- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
16- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05) 16- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
@@ -42,28 +42,28 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
42*/ 42*/
43 43
44 44
45enum mpd4_bpc { 45enum mdp4_bpc {
46 BPC1 = 0, 46 BPC1 = 0,
47 BPC5 = 1, 47 BPC5 = 1,
48 BPC6 = 2, 48 BPC6 = 2,
49 BPC8 = 3, 49 BPC8 = 3,
50}; 50};
51 51
52enum mpd4_bpc_alpha { 52enum mdp4_bpc_alpha {
53 BPC1A = 0, 53 BPC1A = 0,
54 BPC4A = 1, 54 BPC4A = 1,
55 BPC6A = 2, 55 BPC6A = 2,
56 BPC8A = 3, 56 BPC8A = 3,
57}; 57};
58 58
59enum mpd4_alpha_type { 59enum mdp4_alpha_type {
60 FG_CONST = 0, 60 FG_CONST = 0,
61 BG_CONST = 1, 61 BG_CONST = 1,
62 FG_PIXEL = 2, 62 FG_PIXEL = 2,
63 BG_PIXEL = 3, 63 BG_PIXEL = 3,
64}; 64};
65 65
66enum mpd4_pipe { 66enum mdp4_pipe {
67 VG1 = 0, 67 VG1 = 0,
68 VG2 = 1, 68 VG2 = 1,
69 RGB1 = 2, 69 RGB1 = 2,
@@ -73,13 +73,13 @@ enum mpd4_pipe {
73 VG4 = 6, 73 VG4 = 6,
74}; 74};
75 75
76enum mpd4_mixer { 76enum mdp4_mixer {
77 MIXER0 = 0, 77 MIXER0 = 0,
78 MIXER1 = 1, 78 MIXER1 = 1,
79 MIXER2 = 2, 79 MIXER2 = 2,
80}; 80};
81 81
82enum mpd4_mixer_stage_id { 82enum mdp4_mixer_stage_id {
83 STAGE_UNUSED = 0, 83 STAGE_UNUSED = 0,
84 STAGE_BASE = 1, 84 STAGE_BASE = 1,
85 STAGE0 = 2, 85 STAGE0 = 2,
@@ -194,56 +194,56 @@ static inline uint32_t MDP4_DISP_INTF_SEL_EXT(enum mdp4_intf val)
194#define REG_MDP4_LAYERMIXER2_IN_CFG 0x000100f0 194#define REG_MDP4_LAYERMIXER2_IN_CFG 0x000100f0
195#define MDP4_LAYERMIXER2_IN_CFG_PIPE0__MASK 0x00000007 195#define MDP4_LAYERMIXER2_IN_CFG_PIPE0__MASK 0x00000007
196#define MDP4_LAYERMIXER2_IN_CFG_PIPE0__SHIFT 0 196#define MDP4_LAYERMIXER2_IN_CFG_PIPE0__SHIFT 0
197static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE0(enum mpd4_mixer_stage_id val) 197static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE0(enum mdp4_mixer_stage_id val)
198{ 198{
199 return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE0__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE0__MASK; 199 return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE0__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE0__MASK;
200} 200}
201#define MDP4_LAYERMIXER2_IN_CFG_PIPE0_MIXER1 0x00000008 201#define MDP4_LAYERMIXER2_IN_CFG_PIPE0_MIXER1 0x00000008
202#define MDP4_LAYERMIXER2_IN_CFG_PIPE1__MASK 0x00000070 202#define MDP4_LAYERMIXER2_IN_CFG_PIPE1__MASK 0x00000070
203#define MDP4_LAYERMIXER2_IN_CFG_PIPE1__SHIFT 4 203#define MDP4_LAYERMIXER2_IN_CFG_PIPE1__SHIFT 4
204static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE1(enum mpd4_mixer_stage_id val) 204static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE1(enum mdp4_mixer_stage_id val)
205{ 205{
206 return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE1__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE1__MASK; 206 return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE1__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE1__MASK;
207} 207}
208#define MDP4_LAYERMIXER2_IN_CFG_PIPE1_MIXER1 0x00000080 208#define MDP4_LAYERMIXER2_IN_CFG_PIPE1_MIXER1 0x00000080
209#define MDP4_LAYERMIXER2_IN_CFG_PIPE2__MASK 0x00000700 209#define MDP4_LAYERMIXER2_IN_CFG_PIPE2__MASK 0x00000700
210#define MDP4_LAYERMIXER2_IN_CFG_PIPE2__SHIFT 8 210#define MDP4_LAYERMIXER2_IN_CFG_PIPE2__SHIFT 8
211static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE2(enum mpd4_mixer_stage_id val) 211static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE2(enum mdp4_mixer_stage_id val)
212{ 212{
213 return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE2__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE2__MASK; 213 return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE2__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE2__MASK;
214} 214}
215#define MDP4_LAYERMIXER2_IN_CFG_PIPE2_MIXER1 0x00000800 215#define MDP4_LAYERMIXER2_IN_CFG_PIPE2_MIXER1 0x00000800
216#define MDP4_LAYERMIXER2_IN_CFG_PIPE3__MASK 0x00007000 216#define MDP4_LAYERMIXER2_IN_CFG_PIPE3__MASK 0x00007000
217#define MDP4_LAYERMIXER2_IN_CFG_PIPE3__SHIFT 12 217#define MDP4_LAYERMIXER2_IN_CFG_PIPE3__SHIFT 12
218static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE3(enum mpd4_mixer_stage_id val) 218static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE3(enum mdp4_mixer_stage_id val)
219{ 219{
220 return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE3__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE3__MASK; 220 return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE3__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE3__MASK;
221} 221}
222#define MDP4_LAYERMIXER2_IN_CFG_PIPE3_MIXER1 0x00008000 222#define MDP4_LAYERMIXER2_IN_CFG_PIPE3_MIXER1 0x00008000
223#define MDP4_LAYERMIXER2_IN_CFG_PIPE4__MASK 0x00070000 223#define MDP4_LAYERMIXER2_IN_CFG_PIPE4__MASK 0x00070000
224#define MDP4_LAYERMIXER2_IN_CFG_PIPE4__SHIFT 16 224#define MDP4_LAYERMIXER2_IN_CFG_PIPE4__SHIFT 16
225static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE4(enum mpd4_mixer_stage_id val) 225static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE4(enum mdp4_mixer_stage_id val)
226{ 226{
227 return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE4__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE4__MASK; 227 return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE4__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE4__MASK;
228} 228}
229#define MDP4_LAYERMIXER2_IN_CFG_PIPE4_MIXER1 0x00080000 229#define MDP4_LAYERMIXER2_IN_CFG_PIPE4_MIXER1 0x00080000
230#define MDP4_LAYERMIXER2_IN_CFG_PIPE5__MASK 0x00700000 230#define MDP4_LAYERMIXER2_IN_CFG_PIPE5__MASK 0x00700000
231#define MDP4_LAYERMIXER2_IN_CFG_PIPE5__SHIFT 20 231#define MDP4_LAYERMIXER2_IN_CFG_PIPE5__SHIFT 20
232static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE5(enum mpd4_mixer_stage_id val) 232static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE5(enum mdp4_mixer_stage_id val)
233{ 233{
234 return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE5__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE5__MASK; 234 return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE5__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE5__MASK;
235} 235}
236#define MDP4_LAYERMIXER2_IN_CFG_PIPE5_MIXER1 0x00800000 236#define MDP4_LAYERMIXER2_IN_CFG_PIPE5_MIXER1 0x00800000
237#define MDP4_LAYERMIXER2_IN_CFG_PIPE6__MASK 0x07000000 237#define MDP4_LAYERMIXER2_IN_CFG_PIPE6__MASK 0x07000000
238#define MDP4_LAYERMIXER2_IN_CFG_PIPE6__SHIFT 24 238#define MDP4_LAYERMIXER2_IN_CFG_PIPE6__SHIFT 24
239static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE6(enum mpd4_mixer_stage_id val) 239static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE6(enum mdp4_mixer_stage_id val)
240{ 240{
241 return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE6__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE6__MASK; 241 return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE6__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE6__MASK;
242} 242}
243#define MDP4_LAYERMIXER2_IN_CFG_PIPE6_MIXER1 0x08000000 243#define MDP4_LAYERMIXER2_IN_CFG_PIPE6_MIXER1 0x08000000
244#define MDP4_LAYERMIXER2_IN_CFG_PIPE7__MASK 0x70000000 244#define MDP4_LAYERMIXER2_IN_CFG_PIPE7__MASK 0x70000000
245#define MDP4_LAYERMIXER2_IN_CFG_PIPE7__SHIFT 28 245#define MDP4_LAYERMIXER2_IN_CFG_PIPE7__SHIFT 28
246static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE7(enum mpd4_mixer_stage_id val) 246static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE7(enum mdp4_mixer_stage_id val)
247{ 247{
248 return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE7__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE7__MASK; 248 return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE7__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE7__MASK;
249} 249}
@@ -254,56 +254,56 @@ static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE7(enum mpd4_mixer_stage_id va
254#define REG_MDP4_LAYERMIXER_IN_CFG 0x00010100 254#define REG_MDP4_LAYERMIXER_IN_CFG 0x00010100
255#define MDP4_LAYERMIXER_IN_CFG_PIPE0__MASK 0x00000007 255#define MDP4_LAYERMIXER_IN_CFG_PIPE0__MASK 0x00000007
256#define MDP4_LAYERMIXER_IN_CFG_PIPE0__SHIFT 0 256#define MDP4_LAYERMIXER_IN_CFG_PIPE0__SHIFT 0
257static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE0(enum mpd4_mixer_stage_id val) 257static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE0(enum mdp4_mixer_stage_id val)
258{ 258{
259 return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE0__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE0__MASK; 259 return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE0__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE0__MASK;
260} 260}
261#define MDP4_LAYERMIXER_IN_CFG_PIPE0_MIXER1 0x00000008 261#define MDP4_LAYERMIXER_IN_CFG_PIPE0_MIXER1 0x00000008
262#define MDP4_LAYERMIXER_IN_CFG_PIPE1__MASK 0x00000070 262#define MDP4_LAYERMIXER_IN_CFG_PIPE1__MASK 0x00000070
263#define MDP4_LAYERMIXER_IN_CFG_PIPE1__SHIFT 4 263#define MDP4_LAYERMIXER_IN_CFG_PIPE1__SHIFT 4
264static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE1(enum mpd4_mixer_stage_id val) 264static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE1(enum mdp4_mixer_stage_id val)
265{ 265{
266 return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE1__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE1__MASK; 266 return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE1__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE1__MASK;
267} 267}
268#define MDP4_LAYERMIXER_IN_CFG_PIPE1_MIXER1 0x00000080 268#define MDP4_LAYERMIXER_IN_CFG_PIPE1_MIXER1 0x00000080
269#define MDP4_LAYERMIXER_IN_CFG_PIPE2__MASK 0x00000700 269#define MDP4_LAYERMIXER_IN_CFG_PIPE2__MASK 0x00000700
270#define MDP4_LAYERMIXER_IN_CFG_PIPE2__SHIFT 8 270#define MDP4_LAYERMIXER_IN_CFG_PIPE2__SHIFT 8
271static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE2(enum mpd4_mixer_stage_id val) 271static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE2(enum mdp4_mixer_stage_id val)
272{ 272{
273 return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE2__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE2__MASK; 273 return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE2__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE2__MASK;
274} 274}
275#define MDP4_LAYERMIXER_IN_CFG_PIPE2_MIXER1 0x00000800 275#define MDP4_LAYERMIXER_IN_CFG_PIPE2_MIXER1 0x00000800
276#define MDP4_LAYERMIXER_IN_CFG_PIPE3__MASK 0x00007000 276#define MDP4_LAYERMIXER_IN_CFG_PIPE3__MASK 0x00007000
277#define MDP4_LAYERMIXER_IN_CFG_PIPE3__SHIFT 12 277#define MDP4_LAYERMIXER_IN_CFG_PIPE3__SHIFT 12
278static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE3(enum mpd4_mixer_stage_id val) 278static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE3(enum mdp4_mixer_stage_id val)
279{ 279{
280 return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE3__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE3__MASK; 280 return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE3__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE3__MASK;
281} 281}
282#define MDP4_LAYERMIXER_IN_CFG_PIPE3_MIXER1 0x00008000 282#define MDP4_LAYERMIXER_IN_CFG_PIPE3_MIXER1 0x00008000
283#define MDP4_LAYERMIXER_IN_CFG_PIPE4__MASK 0x00070000 283#define MDP4_LAYERMIXER_IN_CFG_PIPE4__MASK 0x00070000
284#define MDP4_LAYERMIXER_IN_CFG_PIPE4__SHIFT 16 284#define MDP4_LAYERMIXER_IN_CFG_PIPE4__SHIFT 16
285static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE4(enum mpd4_mixer_stage_id val) 285static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE4(enum mdp4_mixer_stage_id val)
286{ 286{
287 return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE4__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE4__MASK; 287 return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE4__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE4__MASK;
288} 288}
289#define MDP4_LAYERMIXER_IN_CFG_PIPE4_MIXER1 0x00080000 289#define MDP4_LAYERMIXER_IN_CFG_PIPE4_MIXER1 0x00080000
290#define MDP4_LAYERMIXER_IN_CFG_PIPE5__MASK 0x00700000 290#define MDP4_LAYERMIXER_IN_CFG_PIPE5__MASK 0x00700000
291#define MDP4_LAYERMIXER_IN_CFG_PIPE5__SHIFT 20 291#define MDP4_LAYERMIXER_IN_CFG_PIPE5__SHIFT 20
292static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE5(enum mpd4_mixer_stage_id val) 292static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE5(enum mdp4_mixer_stage_id val)
293{ 293{
294 return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE5__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE5__MASK; 294 return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE5__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE5__MASK;
295} 295}
296#define MDP4_LAYERMIXER_IN_CFG_PIPE5_MIXER1 0x00800000 296#define MDP4_LAYERMIXER_IN_CFG_PIPE5_MIXER1 0x00800000
297#define MDP4_LAYERMIXER_IN_CFG_PIPE6__MASK 0x07000000 297#define MDP4_LAYERMIXER_IN_CFG_PIPE6__MASK 0x07000000
298#define MDP4_LAYERMIXER_IN_CFG_PIPE6__SHIFT 24 298#define MDP4_LAYERMIXER_IN_CFG_PIPE6__SHIFT 24
299static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE6(enum mpd4_mixer_stage_id val) 299static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE6(enum mdp4_mixer_stage_id val)
300{ 300{
301 return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE6__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE6__MASK; 301 return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE6__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE6__MASK;
302} 302}
303#define MDP4_LAYERMIXER_IN_CFG_PIPE6_MIXER1 0x08000000 303#define MDP4_LAYERMIXER_IN_CFG_PIPE6_MIXER1 0x08000000
304#define MDP4_LAYERMIXER_IN_CFG_PIPE7__MASK 0x70000000 304#define MDP4_LAYERMIXER_IN_CFG_PIPE7__MASK 0x70000000
305#define MDP4_LAYERMIXER_IN_CFG_PIPE7__SHIFT 28 305#define MDP4_LAYERMIXER_IN_CFG_PIPE7__SHIFT 28
306static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE7(enum mpd4_mixer_stage_id val) 306static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE7(enum mdp4_mixer_stage_id val)
307{ 307{
308 return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE7__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE7__MASK; 308 return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE7__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE7__MASK;
309} 309}
@@ -369,7 +369,7 @@ static inline uint32_t REG_MDP4_OVLP_STAGE(uint32_t i0, uint32_t i1) { return 0x
369static inline uint32_t REG_MDP4_OVLP_STAGE_OP(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_OVLP(i0) + __offset_STAGE(i1); } 369static inline uint32_t REG_MDP4_OVLP_STAGE_OP(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_OVLP(i0) + __offset_STAGE(i1); }
370#define MDP4_OVLP_STAGE_OP_FG_ALPHA__MASK 0x00000003 370#define MDP4_OVLP_STAGE_OP_FG_ALPHA__MASK 0x00000003
371#define MDP4_OVLP_STAGE_OP_FG_ALPHA__SHIFT 0 371#define MDP4_OVLP_STAGE_OP_FG_ALPHA__SHIFT 0
372static inline uint32_t MDP4_OVLP_STAGE_OP_FG_ALPHA(enum mpd4_alpha_type val) 372static inline uint32_t MDP4_OVLP_STAGE_OP_FG_ALPHA(enum mdp4_alpha_type val)
373{ 373{
374 return ((val) << MDP4_OVLP_STAGE_OP_FG_ALPHA__SHIFT) & MDP4_OVLP_STAGE_OP_FG_ALPHA__MASK; 374 return ((val) << MDP4_OVLP_STAGE_OP_FG_ALPHA__SHIFT) & MDP4_OVLP_STAGE_OP_FG_ALPHA__MASK;
375} 375}
@@ -377,7 +377,7 @@ static inline uint32_t MDP4_OVLP_STAGE_OP_FG_ALPHA(enum mpd4_alpha_type val)
377#define MDP4_OVLP_STAGE_OP_FG_MOD_ALPHA 0x00000008 377#define MDP4_OVLP_STAGE_OP_FG_MOD_ALPHA 0x00000008
378#define MDP4_OVLP_STAGE_OP_BG_ALPHA__MASK 0x00000030 378#define MDP4_OVLP_STAGE_OP_BG_ALPHA__MASK 0x00000030
379#define MDP4_OVLP_STAGE_OP_BG_ALPHA__SHIFT 4 379#define MDP4_OVLP_STAGE_OP_BG_ALPHA__SHIFT 4
380static inline uint32_t MDP4_OVLP_STAGE_OP_BG_ALPHA(enum mpd4_alpha_type val) 380static inline uint32_t MDP4_OVLP_STAGE_OP_BG_ALPHA(enum mdp4_alpha_type val)
381{ 381{
382 return ((val) << MDP4_OVLP_STAGE_OP_BG_ALPHA__SHIFT) & MDP4_OVLP_STAGE_OP_BG_ALPHA__MASK; 382 return ((val) << MDP4_OVLP_STAGE_OP_BG_ALPHA__SHIFT) & MDP4_OVLP_STAGE_OP_BG_ALPHA__MASK;
383} 383}
@@ -472,19 +472,19 @@ static inline uint32_t REG_MDP4_DMA(enum mdp4_dma i0) { return 0x00000000 + __of
472static inline uint32_t REG_MDP4_DMA_CONFIG(enum mdp4_dma i0) { return 0x00000000 + __offset_DMA(i0); } 472static inline uint32_t REG_MDP4_DMA_CONFIG(enum mdp4_dma i0) { return 0x00000000 + __offset_DMA(i0); }
473#define MDP4_DMA_CONFIG_G_BPC__MASK 0x00000003 473#define MDP4_DMA_CONFIG_G_BPC__MASK 0x00000003
474#define MDP4_DMA_CONFIG_G_BPC__SHIFT 0 474#define MDP4_DMA_CONFIG_G_BPC__SHIFT 0
475static inline uint32_t MDP4_DMA_CONFIG_G_BPC(enum mpd4_bpc val) 475static inline uint32_t MDP4_DMA_CONFIG_G_BPC(enum mdp4_bpc val)
476{ 476{
477 return ((val) << MDP4_DMA_CONFIG_G_BPC__SHIFT) & MDP4_DMA_CONFIG_G_BPC__MASK; 477 return ((val) << MDP4_DMA_CONFIG_G_BPC__SHIFT) & MDP4_DMA_CONFIG_G_BPC__MASK;
478} 478}
479#define MDP4_DMA_CONFIG_B_BPC__MASK 0x0000000c 479#define MDP4_DMA_CONFIG_B_BPC__MASK 0x0000000c
480#define MDP4_DMA_CONFIG_B_BPC__SHIFT 2 480#define MDP4_DMA_CONFIG_B_BPC__SHIFT 2
481static inline uint32_t MDP4_DMA_CONFIG_B_BPC(enum mpd4_bpc val) 481static inline uint32_t MDP4_DMA_CONFIG_B_BPC(enum mdp4_bpc val)
482{ 482{
483 return ((val) << MDP4_DMA_CONFIG_B_BPC__SHIFT) & MDP4_DMA_CONFIG_B_BPC__MASK; 483 return ((val) << MDP4_DMA_CONFIG_B_BPC__SHIFT) & MDP4_DMA_CONFIG_B_BPC__MASK;
484} 484}
485#define MDP4_DMA_CONFIG_R_BPC__MASK 0x00000030 485#define MDP4_DMA_CONFIG_R_BPC__MASK 0x00000030
486#define MDP4_DMA_CONFIG_R_BPC__SHIFT 4 486#define MDP4_DMA_CONFIG_R_BPC__SHIFT 4
487static inline uint32_t MDP4_DMA_CONFIG_R_BPC(enum mpd4_bpc val) 487static inline uint32_t MDP4_DMA_CONFIG_R_BPC(enum mdp4_bpc val)
488{ 488{
489 return ((val) << MDP4_DMA_CONFIG_R_BPC__SHIFT) & MDP4_DMA_CONFIG_R_BPC__MASK; 489 return ((val) << MDP4_DMA_CONFIG_R_BPC__SHIFT) & MDP4_DMA_CONFIG_R_BPC__MASK;
490} 490}
@@ -601,9 +601,9 @@ static inline uint32_t REG_MDP4_DMA_CSC_POST_LV(enum mdp4_dma i0, uint32_t i1) {
601 601
602static inline uint32_t REG_MDP4_DMA_CSC_POST_LV_VAL(enum mdp4_dma i0, uint32_t i1) { return 0x00003680 + __offset_DMA(i0) + 0x4*i1; } 602static inline uint32_t REG_MDP4_DMA_CSC_POST_LV_VAL(enum mdp4_dma i0, uint32_t i1) { return 0x00003680 + __offset_DMA(i0) + 0x4*i1; }
603 603
604static inline uint32_t REG_MDP4_PIPE(enum mpd4_pipe i0) { return 0x00020000 + 0x10000*i0; } 604static inline uint32_t REG_MDP4_PIPE(enum mdp4_pipe i0) { return 0x00020000 + 0x10000*i0; }
605 605
606static inline uint32_t REG_MDP4_PIPE_SRC_SIZE(enum mpd4_pipe i0) { return 0x00020000 + 0x10000*i0; } 606static inline uint32_t REG_MDP4_PIPE_SRC_SIZE(enum mdp4_pipe i0) { return 0x00020000 + 0x10000*i0; }
607#define MDP4_PIPE_SRC_SIZE_HEIGHT__MASK 0xffff0000 607#define MDP4_PIPE_SRC_SIZE_HEIGHT__MASK 0xffff0000
608#define MDP4_PIPE_SRC_SIZE_HEIGHT__SHIFT 16 608#define MDP4_PIPE_SRC_SIZE_HEIGHT__SHIFT 16
609static inline uint32_t MDP4_PIPE_SRC_SIZE_HEIGHT(uint32_t val) 609static inline uint32_t MDP4_PIPE_SRC_SIZE_HEIGHT(uint32_t val)
@@ -617,7 +617,7 @@ static inline uint32_t MDP4_PIPE_SRC_SIZE_WIDTH(uint32_t val)
617 return ((val) << MDP4_PIPE_SRC_SIZE_WIDTH__SHIFT) & MDP4_PIPE_SRC_SIZE_WIDTH__MASK; 617 return ((val) << MDP4_PIPE_SRC_SIZE_WIDTH__SHIFT) & MDP4_PIPE_SRC_SIZE_WIDTH__MASK;
618} 618}
619 619
620static inline uint32_t REG_MDP4_PIPE_SRC_XY(enum mpd4_pipe i0) { return 0x00020004 + 0x10000*i0; } 620static inline uint32_t REG_MDP4_PIPE_SRC_XY(enum mdp4_pipe i0) { return 0x00020004 + 0x10000*i0; }
621#define MDP4_PIPE_SRC_XY_Y__MASK 0xffff0000 621#define MDP4_PIPE_SRC_XY_Y__MASK 0xffff0000
622#define MDP4_PIPE_SRC_XY_Y__SHIFT 16 622#define MDP4_PIPE_SRC_XY_Y__SHIFT 16
623static inline uint32_t MDP4_PIPE_SRC_XY_Y(uint32_t val) 623static inline uint32_t MDP4_PIPE_SRC_XY_Y(uint32_t val)
@@ -631,7 +631,7 @@ static inline uint32_t MDP4_PIPE_SRC_XY_X(uint32_t val)
631 return ((val) << MDP4_PIPE_SRC_XY_X__SHIFT) & MDP4_PIPE_SRC_XY_X__MASK; 631 return ((val) << MDP4_PIPE_SRC_XY_X__SHIFT) & MDP4_PIPE_SRC_XY_X__MASK;
632} 632}
633 633
634static inline uint32_t REG_MDP4_PIPE_DST_SIZE(enum mpd4_pipe i0) { return 0x00020008 + 0x10000*i0; } 634static inline uint32_t REG_MDP4_PIPE_DST_SIZE(enum mdp4_pipe i0) { return 0x00020008 + 0x10000*i0; }
635#define MDP4_PIPE_DST_SIZE_HEIGHT__MASK 0xffff0000 635#define MDP4_PIPE_DST_SIZE_HEIGHT__MASK 0xffff0000
636#define MDP4_PIPE_DST_SIZE_HEIGHT__SHIFT 16 636#define MDP4_PIPE_DST_SIZE_HEIGHT__SHIFT 16
637static inline uint32_t MDP4_PIPE_DST_SIZE_HEIGHT(uint32_t val) 637static inline uint32_t MDP4_PIPE_DST_SIZE_HEIGHT(uint32_t val)
@@ -645,7 +645,7 @@ static inline uint32_t MDP4_PIPE_DST_SIZE_WIDTH(uint32_t val)
645 return ((val) << MDP4_PIPE_DST_SIZE_WIDTH__SHIFT) & MDP4_PIPE_DST_SIZE_WIDTH__MASK; 645 return ((val) << MDP4_PIPE_DST_SIZE_WIDTH__SHIFT) & MDP4_PIPE_DST_SIZE_WIDTH__MASK;
646} 646}
647 647
648static inline uint32_t REG_MDP4_PIPE_DST_XY(enum mpd4_pipe i0) { return 0x0002000c + 0x10000*i0; } 648static inline uint32_t REG_MDP4_PIPE_DST_XY(enum mdp4_pipe i0) { return 0x0002000c + 0x10000*i0; }
649#define MDP4_PIPE_DST_XY_Y__MASK 0xffff0000 649#define MDP4_PIPE_DST_XY_Y__MASK 0xffff0000
650#define MDP4_PIPE_DST_XY_Y__SHIFT 16 650#define MDP4_PIPE_DST_XY_Y__SHIFT 16
651static inline uint32_t MDP4_PIPE_DST_XY_Y(uint32_t val) 651static inline uint32_t MDP4_PIPE_DST_XY_Y(uint32_t val)
@@ -659,13 +659,13 @@ static inline uint32_t MDP4_PIPE_DST_XY_X(uint32_t val)
659 return ((val) << MDP4_PIPE_DST_XY_X__SHIFT) & MDP4_PIPE_DST_XY_X__MASK; 659 return ((val) << MDP4_PIPE_DST_XY_X__SHIFT) & MDP4_PIPE_DST_XY_X__MASK;
660} 660}
661 661
662static inline uint32_t REG_MDP4_PIPE_SRCP0_BASE(enum mpd4_pipe i0) { return 0x00020010 + 0x10000*i0; } 662static inline uint32_t REG_MDP4_PIPE_SRCP0_BASE(enum mdp4_pipe i0) { return 0x00020010 + 0x10000*i0; }
663 663
664static inline uint32_t REG_MDP4_PIPE_SRCP1_BASE(enum mpd4_pipe i0) { return 0x00020014 + 0x10000*i0; } 664static inline uint32_t REG_MDP4_PIPE_SRCP1_BASE(enum mdp4_pipe i0) { return 0x00020014 + 0x10000*i0; }
665 665
666static inline uint32_t REG_MDP4_PIPE_SRCP2_BASE(enum mpd4_pipe i0) { return 0x00020018 + 0x10000*i0; } 666static inline uint32_t REG_MDP4_PIPE_SRCP2_BASE(enum mdp4_pipe i0) { return 0x00020018 + 0x10000*i0; }
667 667
668static inline uint32_t REG_MDP4_PIPE_SRC_STRIDE_A(enum mpd4_pipe i0) { return 0x00020040 + 0x10000*i0; } 668static inline uint32_t REG_MDP4_PIPE_SRC_STRIDE_A(enum mdp4_pipe i0) { return 0x00020040 + 0x10000*i0; }
669#define MDP4_PIPE_SRC_STRIDE_A_P0__MASK 0x0000ffff 669#define MDP4_PIPE_SRC_STRIDE_A_P0__MASK 0x0000ffff
670#define MDP4_PIPE_SRC_STRIDE_A_P0__SHIFT 0 670#define MDP4_PIPE_SRC_STRIDE_A_P0__SHIFT 0
671static inline uint32_t MDP4_PIPE_SRC_STRIDE_A_P0(uint32_t val) 671static inline uint32_t MDP4_PIPE_SRC_STRIDE_A_P0(uint32_t val)
@@ -679,7 +679,7 @@ static inline uint32_t MDP4_PIPE_SRC_STRIDE_A_P1(uint32_t val)
679 return ((val) << MDP4_PIPE_SRC_STRIDE_A_P1__SHIFT) & MDP4_PIPE_SRC_STRIDE_A_P1__MASK; 679 return ((val) << MDP4_PIPE_SRC_STRIDE_A_P1__SHIFT) & MDP4_PIPE_SRC_STRIDE_A_P1__MASK;
680} 680}
681 681
682static inline uint32_t REG_MDP4_PIPE_SRC_STRIDE_B(enum mpd4_pipe i0) { return 0x00020044 + 0x10000*i0; } 682static inline uint32_t REG_MDP4_PIPE_SRC_STRIDE_B(enum mdp4_pipe i0) { return 0x00020044 + 0x10000*i0; }
683#define MDP4_PIPE_SRC_STRIDE_B_P2__MASK 0x0000ffff 683#define MDP4_PIPE_SRC_STRIDE_B_P2__MASK 0x0000ffff
684#define MDP4_PIPE_SRC_STRIDE_B_P2__SHIFT 0 684#define MDP4_PIPE_SRC_STRIDE_B_P2__SHIFT 0
685static inline uint32_t MDP4_PIPE_SRC_STRIDE_B_P2(uint32_t val) 685static inline uint32_t MDP4_PIPE_SRC_STRIDE_B_P2(uint32_t val)
@@ -693,7 +693,7 @@ static inline uint32_t MDP4_PIPE_SRC_STRIDE_B_P3(uint32_t val)
693 return ((val) << MDP4_PIPE_SRC_STRIDE_B_P3__SHIFT) & MDP4_PIPE_SRC_STRIDE_B_P3__MASK; 693 return ((val) << MDP4_PIPE_SRC_STRIDE_B_P3__SHIFT) & MDP4_PIPE_SRC_STRIDE_B_P3__MASK;
694} 694}
695 695
696static inline uint32_t REG_MDP4_PIPE_FRAME_SIZE(enum mpd4_pipe i0) { return 0x00020048 + 0x10000*i0; } 696static inline uint32_t REG_MDP4_PIPE_FRAME_SIZE(enum mdp4_pipe i0) { return 0x00020048 + 0x10000*i0; }
697#define MDP4_PIPE_FRAME_SIZE_HEIGHT__MASK 0xffff0000 697#define MDP4_PIPE_FRAME_SIZE_HEIGHT__MASK 0xffff0000
698#define MDP4_PIPE_FRAME_SIZE_HEIGHT__SHIFT 16 698#define MDP4_PIPE_FRAME_SIZE_HEIGHT__SHIFT 16
699static inline uint32_t MDP4_PIPE_FRAME_SIZE_HEIGHT(uint32_t val) 699static inline uint32_t MDP4_PIPE_FRAME_SIZE_HEIGHT(uint32_t val)
@@ -707,28 +707,28 @@ static inline uint32_t MDP4_PIPE_FRAME_SIZE_WIDTH(uint32_t val)
707 return ((val) << MDP4_PIPE_FRAME_SIZE_WIDTH__SHIFT) & MDP4_PIPE_FRAME_SIZE_WIDTH__MASK; 707 return ((val) << MDP4_PIPE_FRAME_SIZE_WIDTH__SHIFT) & MDP4_PIPE_FRAME_SIZE_WIDTH__MASK;
708} 708}
709 709
710static inline uint32_t REG_MDP4_PIPE_SRC_FORMAT(enum mpd4_pipe i0) { return 0x00020050 + 0x10000*i0; } 710static inline uint32_t REG_MDP4_PIPE_SRC_FORMAT(enum mdp4_pipe i0) { return 0x00020050 + 0x10000*i0; }
711#define MDP4_PIPE_SRC_FORMAT_G_BPC__MASK 0x00000003 711#define MDP4_PIPE_SRC_FORMAT_G_BPC__MASK 0x00000003
712#define MDP4_PIPE_SRC_FORMAT_G_BPC__SHIFT 0 712#define MDP4_PIPE_SRC_FORMAT_G_BPC__SHIFT 0
713static inline uint32_t MDP4_PIPE_SRC_FORMAT_G_BPC(enum mpd4_bpc val) 713static inline uint32_t MDP4_PIPE_SRC_FORMAT_G_BPC(enum mdp4_bpc val)
714{ 714{
715 return ((val) << MDP4_PIPE_SRC_FORMAT_G_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_G_BPC__MASK; 715 return ((val) << MDP4_PIPE_SRC_FORMAT_G_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_G_BPC__MASK;
716} 716}
717#define MDP4_PIPE_SRC_FORMAT_B_BPC__MASK 0x0000000c 717#define MDP4_PIPE_SRC_FORMAT_B_BPC__MASK 0x0000000c
718#define MDP4_PIPE_SRC_FORMAT_B_BPC__SHIFT 2 718#define MDP4_PIPE_SRC_FORMAT_B_BPC__SHIFT 2
719static inline uint32_t MDP4_PIPE_SRC_FORMAT_B_BPC(enum mpd4_bpc val) 719static inline uint32_t MDP4_PIPE_SRC_FORMAT_B_BPC(enum mdp4_bpc val)
720{ 720{
721 return ((val) << MDP4_PIPE_SRC_FORMAT_B_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_B_BPC__MASK; 721 return ((val) << MDP4_PIPE_SRC_FORMAT_B_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_B_BPC__MASK;
722} 722}
723#define MDP4_PIPE_SRC_FORMAT_R_BPC__MASK 0x00000030 723#define MDP4_PIPE_SRC_FORMAT_R_BPC__MASK 0x00000030
724#define MDP4_PIPE_SRC_FORMAT_R_BPC__SHIFT 4 724#define MDP4_PIPE_SRC_FORMAT_R_BPC__SHIFT 4
725static inline uint32_t MDP4_PIPE_SRC_FORMAT_R_BPC(enum mpd4_bpc val) 725static inline uint32_t MDP4_PIPE_SRC_FORMAT_R_BPC(enum mdp4_bpc val)
726{ 726{
727 return ((val) << MDP4_PIPE_SRC_FORMAT_R_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_R_BPC__MASK; 727 return ((val) << MDP4_PIPE_SRC_FORMAT_R_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_R_BPC__MASK;
728} 728}
729#define MDP4_PIPE_SRC_FORMAT_A_BPC__MASK 0x000000c0 729#define MDP4_PIPE_SRC_FORMAT_A_BPC__MASK 0x000000c0
730#define MDP4_PIPE_SRC_FORMAT_A_BPC__SHIFT 6 730#define MDP4_PIPE_SRC_FORMAT_A_BPC__SHIFT 6
731static inline uint32_t MDP4_PIPE_SRC_FORMAT_A_BPC(enum mpd4_bpc_alpha val) 731static inline uint32_t MDP4_PIPE_SRC_FORMAT_A_BPC(enum mdp4_bpc_alpha val)
732{ 732{
733 return ((val) << MDP4_PIPE_SRC_FORMAT_A_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_A_BPC__MASK; 733 return ((val) << MDP4_PIPE_SRC_FORMAT_A_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_A_BPC__MASK;
734} 734}
@@ -750,7 +750,7 @@ static inline uint32_t MDP4_PIPE_SRC_FORMAT_UNPACK_COUNT(uint32_t val)
750#define MDP4_PIPE_SRC_FORMAT_UNPACK_ALIGN_MSB 0x00040000 750#define MDP4_PIPE_SRC_FORMAT_UNPACK_ALIGN_MSB 0x00040000
751#define MDP4_PIPE_SRC_FORMAT_SOLID_FILL 0x00400000 751#define MDP4_PIPE_SRC_FORMAT_SOLID_FILL 0x00400000
752 752
753static inline uint32_t REG_MDP4_PIPE_SRC_UNPACK(enum mpd4_pipe i0) { return 0x00020054 + 0x10000*i0; } 753static inline uint32_t REG_MDP4_PIPE_SRC_UNPACK(enum mdp4_pipe i0) { return 0x00020054 + 0x10000*i0; }
754#define MDP4_PIPE_SRC_UNPACK_ELEM0__MASK 0x000000ff 754#define MDP4_PIPE_SRC_UNPACK_ELEM0__MASK 0x000000ff
755#define MDP4_PIPE_SRC_UNPACK_ELEM0__SHIFT 0 755#define MDP4_PIPE_SRC_UNPACK_ELEM0__SHIFT 0
756static inline uint32_t MDP4_PIPE_SRC_UNPACK_ELEM0(uint32_t val) 756static inline uint32_t MDP4_PIPE_SRC_UNPACK_ELEM0(uint32_t val)
@@ -776,7 +776,7 @@ static inline uint32_t MDP4_PIPE_SRC_UNPACK_ELEM3(uint32_t val)
776 return ((val) << MDP4_PIPE_SRC_UNPACK_ELEM3__SHIFT) & MDP4_PIPE_SRC_UNPACK_ELEM3__MASK; 776 return ((val) << MDP4_PIPE_SRC_UNPACK_ELEM3__SHIFT) & MDP4_PIPE_SRC_UNPACK_ELEM3__MASK;
777} 777}
778 778
779static inline uint32_t REG_MDP4_PIPE_OP_MODE(enum mpd4_pipe i0) { return 0x00020058 + 0x10000*i0; } 779static inline uint32_t REG_MDP4_PIPE_OP_MODE(enum mdp4_pipe i0) { return 0x00020058 + 0x10000*i0; }
780#define MDP4_PIPE_OP_MODE_SCALEX_EN 0x00000001 780#define MDP4_PIPE_OP_MODE_SCALEX_EN 0x00000001
781#define MDP4_PIPE_OP_MODE_SCALEY_EN 0x00000002 781#define MDP4_PIPE_OP_MODE_SCALEY_EN 0x00000002
782#define MDP4_PIPE_OP_MODE_SRC_YCBCR 0x00000200 782#define MDP4_PIPE_OP_MODE_SRC_YCBCR 0x00000200
@@ -789,36 +789,36 @@ static inline uint32_t REG_MDP4_PIPE_OP_MODE(enum mpd4_pipe i0) { return 0x00020
789#define MDP4_PIPE_OP_MODE_DEINT_EN 0x00040000 789#define MDP4_PIPE_OP_MODE_DEINT_EN 0x00040000
790#define MDP4_PIPE_OP_MODE_DEINT_ODD_REF 0x00080000 790#define MDP4_PIPE_OP_MODE_DEINT_ODD_REF 0x00080000
791 791
792static inline uint32_t REG_MDP4_PIPE_PHASEX_STEP(enum mpd4_pipe i0) { return 0x0002005c + 0x10000*i0; } 792static inline uint32_t REG_MDP4_PIPE_PHASEX_STEP(enum mdp4_pipe i0) { return 0x0002005c + 0x10000*i0; }
793 793
794static inline uint32_t REG_MDP4_PIPE_PHASEY_STEP(enum mpd4_pipe i0) { return 0x00020060 + 0x10000*i0; } 794static inline uint32_t REG_MDP4_PIPE_PHASEY_STEP(enum mdp4_pipe i0) { return 0x00020060 + 0x10000*i0; }
795 795
796static inline uint32_t REG_MDP4_PIPE_FETCH_CONFIG(enum mpd4_pipe i0) { return 0x00021004 + 0x10000*i0; } 796static inline uint32_t REG_MDP4_PIPE_FETCH_CONFIG(enum mdp4_pipe i0) { return 0x00021004 + 0x10000*i0; }
797 797
798static inline uint32_t REG_MDP4_PIPE_SOLID_COLOR(enum mpd4_pipe i0) { return 0x00021008 + 0x10000*i0; } 798static inline uint32_t REG_MDP4_PIPE_SOLID_COLOR(enum mdp4_pipe i0) { return 0x00021008 + 0x10000*i0; }
799 799
800static inline uint32_t REG_MDP4_PIPE_CSC(enum mpd4_pipe i0) { return 0x00024000 + 0x10000*i0; } 800static inline uint32_t REG_MDP4_PIPE_CSC(enum mdp4_pipe i0) { return 0x00024000 + 0x10000*i0; }
801 801
802 802
803static inline uint32_t REG_MDP4_PIPE_CSC_MV(enum mpd4_pipe i0, uint32_t i1) { return 0x00024400 + 0x10000*i0 + 0x4*i1; } 803static inline uint32_t REG_MDP4_PIPE_CSC_MV(enum mdp4_pipe i0, uint32_t i1) { return 0x00024400 + 0x10000*i0 + 0x4*i1; }
804 804
805static inline uint32_t REG_MDP4_PIPE_CSC_MV_VAL(enum mpd4_pipe i0, uint32_t i1) { return 0x00024400 + 0x10000*i0 + 0x4*i1; } 805static inline uint32_t REG_MDP4_PIPE_CSC_MV_VAL(enum mdp4_pipe i0, uint32_t i1) { return 0x00024400 + 0x10000*i0 + 0x4*i1; }
806 806
807static inline uint32_t REG_MDP4_PIPE_CSC_PRE_BV(enum mpd4_pipe i0, uint32_t i1) { return 0x00024500 + 0x10000*i0 + 0x4*i1; } 807static inline uint32_t REG_MDP4_PIPE_CSC_PRE_BV(enum mdp4_pipe i0, uint32_t i1) { return 0x00024500 + 0x10000*i0 + 0x4*i1; }
808 808
809static inline uint32_t REG_MDP4_PIPE_CSC_PRE_BV_VAL(enum mpd4_pipe i0, uint32_t i1) { return 0x00024500 + 0x10000*i0 + 0x4*i1; } 809static inline uint32_t REG_MDP4_PIPE_CSC_PRE_BV_VAL(enum mdp4_pipe i0, uint32_t i1) { return 0x00024500 + 0x10000*i0 + 0x4*i1; }
810 810
811static inline uint32_t REG_MDP4_PIPE_CSC_POST_BV(enum mpd4_pipe i0, uint32_t i1) { return 0x00024580 + 0x10000*i0 + 0x4*i1; } 811static inline uint32_t REG_MDP4_PIPE_CSC_POST_BV(enum mdp4_pipe i0, uint32_t i1) { return 0x00024580 + 0x10000*i0 + 0x4*i1; }
812 812
813static inline uint32_t REG_MDP4_PIPE_CSC_POST_BV_VAL(enum mpd4_pipe i0, uint32_t i1) { return 0x00024580 + 0x10000*i0 + 0x4*i1; } 813static inline uint32_t REG_MDP4_PIPE_CSC_POST_BV_VAL(enum mdp4_pipe i0, uint32_t i1) { return 0x00024580 + 0x10000*i0 + 0x4*i1; }
814 814
815static inline uint32_t REG_MDP4_PIPE_CSC_PRE_LV(enum mpd4_pipe i0, uint32_t i1) { return 0x00024600 + 0x10000*i0 + 0x4*i1; } 815static inline uint32_t REG_MDP4_PIPE_CSC_PRE_LV(enum mdp4_pipe i0, uint32_t i1) { return 0x00024600 + 0x10000*i0 + 0x4*i1; }
816 816
817static inline uint32_t REG_MDP4_PIPE_CSC_PRE_LV_VAL(enum mpd4_pipe i0, uint32_t i1) { return 0x00024600 + 0x10000*i0 + 0x4*i1; } 817static inline uint32_t REG_MDP4_PIPE_CSC_PRE_LV_VAL(enum mdp4_pipe i0, uint32_t i1) { return 0x00024600 + 0x10000*i0 + 0x4*i1; }
818 818
819static inline uint32_t REG_MDP4_PIPE_CSC_POST_LV(enum mpd4_pipe i0, uint32_t i1) { return 0x00024680 + 0x10000*i0 + 0x4*i1; } 819static inline uint32_t REG_MDP4_PIPE_CSC_POST_LV(enum mdp4_pipe i0, uint32_t i1) { return 0x00024680 + 0x10000*i0 + 0x4*i1; }
820 820
821static inline uint32_t REG_MDP4_PIPE_CSC_POST_LV_VAL(enum mpd4_pipe i0, uint32_t i1) { return 0x00024680 + 0x10000*i0 + 0x4*i1; } 821static inline uint32_t REG_MDP4_PIPE_CSC_POST_LV_VAL(enum mdp4_pipe i0, uint32_t i1) { return 0x00024680 + 0x10000*i0 + 0x4*i1; }
822 822
823#define REG_MDP4_LCDC 0x000c0000 823#define REG_MDP4_LCDC 0x000c0000
824 824
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/mdp4/mdp4_crtc.c
index de6bea297cda..019d530187ff 100644
--- a/drivers/gpu/drm/msm/mdp4/mdp4_crtc.c
+++ b/drivers/gpu/drm/msm/mdp4/mdp4_crtc.c
@@ -26,6 +26,7 @@ struct mdp4_crtc {
26 struct drm_crtc base; 26 struct drm_crtc base;
27 char name[8]; 27 char name[8];
28 struct drm_plane *plane; 28 struct drm_plane *plane;
29 struct drm_plane *planes[8];
29 int id; 30 int id;
30 int ovlp; 31 int ovlp;
31 enum mdp4_dma dma; 32 enum mdp4_dma dma;
@@ -50,7 +51,11 @@ struct mdp4_crtc {
50 51
51 /* if there is a pending flip, these will be non-null: */ 52 /* if there is a pending flip, these will be non-null: */
52 struct drm_pending_vblank_event *event; 53 struct drm_pending_vblank_event *event;
53 struct work_struct pageflip_work; 54 struct msm_fence_cb pageflip_cb;
55
56#define PENDING_CURSOR 0x1
57#define PENDING_FLIP 0x2
58 atomic_t pending;
54 59
55 /* the fb that we currently hold a scanout ref to: */ 60 /* the fb that we currently hold a scanout ref to: */
56 struct drm_framebuffer *fb; 61 struct drm_framebuffer *fb;
@@ -92,7 +97,8 @@ static void update_fb(struct drm_crtc *crtc, bool async,
92 } 97 }
93} 98}
94 99
95static void complete_flip(struct drm_crtc *crtc, bool canceled) 100/* if file!=NULL, this is preclose potential cancel-flip path */
101static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
96{ 102{
97 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); 103 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
98 struct drm_device *dev = crtc->dev; 104 struct drm_device *dev = crtc->dev;
@@ -102,11 +108,14 @@ static void complete_flip(struct drm_crtc *crtc, bool canceled)
102 spin_lock_irqsave(&dev->event_lock, flags); 108 spin_lock_irqsave(&dev->event_lock, flags);
103 event = mdp4_crtc->event; 109 event = mdp4_crtc->event;
104 if (event) { 110 if (event) {
105 mdp4_crtc->event = NULL; 111 /* if regular vblank case (!file) or if cancel-flip from
106 if (canceled) 112 * preclose on file that requested flip, then send the
107 event->base.destroy(&event->base); 113 * event:
108 else 114 */
115 if (!file || (event->base.file_priv == file)) {
116 mdp4_crtc->event = NULL;
109 drm_send_vblank_event(dev, mdp4_crtc->id, event); 117 drm_send_vblank_event(dev, mdp4_crtc->id, event);
118 }
110 } 119 }
111 spin_unlock_irqrestore(&dev->event_lock, flags); 120 spin_unlock_irqrestore(&dev->event_lock, flags);
112} 121}
@@ -115,9 +124,15 @@ static void crtc_flush(struct drm_crtc *crtc)
115{ 124{
116 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); 125 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
117 struct mdp4_kms *mdp4_kms = get_kms(crtc); 126 struct mdp4_kms *mdp4_kms = get_kms(crtc);
118 uint32_t flush = 0; 127 uint32_t i, flush = 0;
119 128
120 flush |= pipe2flush(mdp4_plane_pipe(mdp4_crtc->plane)); 129 for (i = 0; i < ARRAY_SIZE(mdp4_crtc->planes); i++) {
130 struct drm_plane *plane = mdp4_crtc->planes[i];
131 if (plane) {
132 enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane);
133 flush |= pipe2flush(pipe_id);
134 }
135 }
121 flush |= ovlp2flush(mdp4_crtc->ovlp); 136 flush |= ovlp2flush(mdp4_crtc->ovlp);
122 137
123 DBG("%s: flush=%08x", mdp4_crtc->name, flush); 138 DBG("%s: flush=%08x", mdp4_crtc->name, flush);
@@ -125,17 +140,29 @@ static void crtc_flush(struct drm_crtc *crtc)
125 mdp4_write(mdp4_kms, REG_MDP4_OVERLAY_FLUSH, flush); 140 mdp4_write(mdp4_kms, REG_MDP4_OVERLAY_FLUSH, flush);
126} 141}
127 142
128static void pageflip_worker(struct work_struct *work) 143static void request_pending(struct drm_crtc *crtc, uint32_t pending)
144{
145 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
146
147 atomic_or(pending, &mdp4_crtc->pending);
148 mdp4_irq_register(get_kms(crtc), &mdp4_crtc->vblank);
149}
150
151static void pageflip_cb(struct msm_fence_cb *cb)
129{ 152{
130 struct mdp4_crtc *mdp4_crtc = 153 struct mdp4_crtc *mdp4_crtc =
131 container_of(work, struct mdp4_crtc, pageflip_work); 154 container_of(cb, struct mdp4_crtc, pageflip_cb);
132 struct drm_crtc *crtc = &mdp4_crtc->base; 155 struct drm_crtc *crtc = &mdp4_crtc->base;
156 struct drm_framebuffer *fb = crtc->fb;
133 157
134 mdp4_plane_set_scanout(mdp4_crtc->plane, crtc->fb); 158 if (!fb)
159 return;
160
161 mdp4_plane_set_scanout(mdp4_crtc->plane, fb);
135 crtc_flush(crtc); 162 crtc_flush(crtc);
136 163
137 /* enable vblank to complete flip: */ 164 /* enable vblank to complete flip: */
138 mdp4_irq_register(get_kms(crtc), &mdp4_crtc->vblank); 165 request_pending(crtc, PENDING_FLIP);
139} 166}
140 167
141static void unref_fb_worker(struct drm_flip_work *work, void *val) 168static void unref_fb_worker(struct drm_flip_work *work, void *val)
@@ -205,67 +232,69 @@ static void blend_setup(struct drm_crtc *crtc)
205 struct mdp4_kms *mdp4_kms = get_kms(crtc); 232 struct mdp4_kms *mdp4_kms = get_kms(crtc);
206 int i, ovlp = mdp4_crtc->ovlp; 233 int i, ovlp = mdp4_crtc->ovlp;
207 uint32_t mixer_cfg = 0; 234 uint32_t mixer_cfg = 0;
208 235 static const enum mdp4_mixer_stage_id stages[] = {
209 /* 236 STAGE_BASE, STAGE0, STAGE1, STAGE2, STAGE3,
210 * This probably would also need to be triggered by any attached 237 };
211 * plane when it changes.. for now since we are only using a single 238 /* statically (for now) map planes to mixer stage (z-order): */
212 * private plane, the configuration is hard-coded: 239 static const int idxs[] = {
213 */ 240 [VG1] = 1,
241 [VG2] = 2,
242 [RGB1] = 0,
243 [RGB2] = 0,
244 [RGB3] = 0,
245 [VG3] = 3,
246 [VG4] = 4,
247
248 };
249 bool alpha[4]= { false, false, false, false };
214 250
215 mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_LOW0(ovlp), 0); 251 mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_LOW0(ovlp), 0);
216 mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_LOW1(ovlp), 0); 252 mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_LOW1(ovlp), 0);
217 mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_HIGH0(ovlp), 0); 253 mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_HIGH0(ovlp), 0);
218 mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_HIGH1(ovlp), 0); 254 mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_HIGH1(ovlp), 0);
219 255
256 /* TODO single register for all CRTCs, so this won't work properly
257 * when multiple CRTCs are active..
258 */
259 for (i = 0; i < ARRAY_SIZE(mdp4_crtc->planes); i++) {
260 struct drm_plane *plane = mdp4_crtc->planes[i];
261 if (plane) {
262 enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane);
263 int idx = idxs[pipe_id];
264 if (idx > 0) {
265 const struct mdp4_format *format =
266 to_mdp4_format(msm_framebuffer_format(plane->fb));
267 alpha[idx-1] = format->alpha_enable;
268 }
269 mixer_cfg |= mixercfg(mdp4_crtc->mixer, pipe_id, stages[idx]);
270 }
271 }
272
273 /* this shouldn't happen.. and seems to cause underflow: */
274 WARN_ON(!mixer_cfg);
275
220 for (i = 0; i < 4; i++) { 276 for (i = 0; i < 4; i++) {
221 mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_FG_ALPHA(ovlp, i), 0); 277 uint32_t op;
222 mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_BG_ALPHA(ovlp, i), 0); 278
223 mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_OP(ovlp, i), 279 if (alpha[i]) {
224 MDP4_OVLP_STAGE_OP_FG_ALPHA(FG_CONST) | 280 op = MDP4_OVLP_STAGE_OP_FG_ALPHA(FG_PIXEL) |
225 MDP4_OVLP_STAGE_OP_BG_ALPHA(BG_CONST)); 281 MDP4_OVLP_STAGE_OP_BG_ALPHA(FG_PIXEL) |
226 mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_CO3(ovlp, i), 0); 282 MDP4_OVLP_STAGE_OP_BG_INV_ALPHA;
283 } else {
284 op = MDP4_OVLP_STAGE_OP_FG_ALPHA(FG_CONST) |
285 MDP4_OVLP_STAGE_OP_BG_ALPHA(BG_CONST);
286 }
287
288 mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_FG_ALPHA(ovlp, i), 0xff);
289 mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_BG_ALPHA(ovlp, i), 0x00);
290 mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_OP(ovlp, i), op);
291 mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_CO3(ovlp, i), 1);
227 mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_LOW0(ovlp, i), 0); 292 mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_LOW0(ovlp, i), 0);
228 mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_LOW1(ovlp, i), 0); 293 mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_LOW1(ovlp, i), 0);
229 mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_HIGH0(ovlp, i), 0); 294 mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_HIGH0(ovlp, i), 0);
230 mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_HIGH1(ovlp, i), 0); 295 mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_HIGH1(ovlp, i), 0);
231 } 296 }
232 297
233 /* TODO single register for all CRTCs, so this won't work properly
234 * when multiple CRTCs are active..
235 */
236 switch (mdp4_plane_pipe(mdp4_crtc->plane)) {
237 case VG1:
238 mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE0(STAGE_BASE) |
239 COND(mdp4_crtc->mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE0_MIXER1);
240 break;
241 case VG2:
242 mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE1(STAGE_BASE) |
243 COND(mdp4_crtc->mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE1_MIXER1);
244 break;
245 case RGB1:
246 mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE2(STAGE_BASE) |
247 COND(mdp4_crtc->mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE2_MIXER1);
248 break;
249 case RGB2:
250 mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE3(STAGE_BASE) |
251 COND(mdp4_crtc->mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE3_MIXER1);
252 break;
253 case RGB3:
254 mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE4(STAGE_BASE) |
255 COND(mdp4_crtc->mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE4_MIXER1);
256 break;
257 case VG3:
258 mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE5(STAGE_BASE) |
259 COND(mdp4_crtc->mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE5_MIXER1);
260 break;
261 case VG4:
262 mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE6(STAGE_BASE) |
263 COND(mdp4_crtc->mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE6_MIXER1);
264 break;
265 default:
266 WARN_ON("invalid pipe");
267 break;
268 }
269 mdp4_write(mdp4_kms, REG_MDP4_LAYERMIXER_IN_CFG, mixer_cfg); 298 mdp4_write(mdp4_kms, REG_MDP4_LAYERMIXER_IN_CFG, mixer_cfg);
270} 299}
271 300
@@ -377,6 +406,7 @@ static int mdp4_crtc_page_flip(struct drm_crtc *crtc,
377 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); 406 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
378 struct drm_device *dev = crtc->dev; 407 struct drm_device *dev = crtc->dev;
379 struct drm_gem_object *obj; 408 struct drm_gem_object *obj;
409 unsigned long flags;
380 410
381 if (mdp4_crtc->event) { 411 if (mdp4_crtc->event) {
382 dev_err(dev->dev, "already pending flip!\n"); 412 dev_err(dev->dev, "already pending flip!\n");
@@ -385,11 +415,13 @@ static int mdp4_crtc_page_flip(struct drm_crtc *crtc,
385 415
386 obj = msm_framebuffer_bo(new_fb, 0); 416 obj = msm_framebuffer_bo(new_fb, 0);
387 417
418 spin_lock_irqsave(&dev->event_lock, flags);
388 mdp4_crtc->event = event; 419 mdp4_crtc->event = event;
420 spin_unlock_irqrestore(&dev->event_lock, flags);
421
389 update_fb(crtc, true, new_fb); 422 update_fb(crtc, true, new_fb);
390 423
391 return msm_gem_queue_inactive_work(obj, 424 return msm_gem_queue_inactive_cb(obj, &mdp4_crtc->pageflip_cb);
392 &mdp4_crtc->pageflip_work);
393} 425}
394 426
395static int mdp4_crtc_set_property(struct drm_crtc *crtc, 427static int mdp4_crtc_set_property(struct drm_crtc *crtc,
@@ -498,6 +530,8 @@ static int mdp4_crtc_cursor_set(struct drm_crtc *crtc,
498 drm_gem_object_unreference_unlocked(old_bo); 530 drm_gem_object_unreference_unlocked(old_bo);
499 } 531 }
500 532
533 request_pending(crtc, PENDING_CURSOR);
534
501 return 0; 535 return 0;
502 536
503fail: 537fail:
@@ -542,13 +576,21 @@ static void mdp4_crtc_vblank_irq(struct mdp4_irq *irq, uint32_t irqstatus)
542 struct mdp4_crtc *mdp4_crtc = container_of(irq, struct mdp4_crtc, vblank); 576 struct mdp4_crtc *mdp4_crtc = container_of(irq, struct mdp4_crtc, vblank);
543 struct drm_crtc *crtc = &mdp4_crtc->base; 577 struct drm_crtc *crtc = &mdp4_crtc->base;
544 struct msm_drm_private *priv = crtc->dev->dev_private; 578 struct msm_drm_private *priv = crtc->dev->dev_private;
579 unsigned pending;
545 580
546 update_cursor(crtc);
547 complete_flip(crtc, false);
548 mdp4_irq_unregister(get_kms(crtc), &mdp4_crtc->vblank); 581 mdp4_irq_unregister(get_kms(crtc), &mdp4_crtc->vblank);
549 582
550 drm_flip_work_commit(&mdp4_crtc->unref_fb_work, priv->wq); 583 pending = atomic_xchg(&mdp4_crtc->pending, 0);
551 drm_flip_work_commit(&mdp4_crtc->unref_cursor_work, priv->wq); 584
585 if (pending & PENDING_FLIP) {
586 complete_flip(crtc, NULL);
587 drm_flip_work_commit(&mdp4_crtc->unref_fb_work, priv->wq);
588 }
589
590 if (pending & PENDING_CURSOR) {
591 update_cursor(crtc);
592 drm_flip_work_commit(&mdp4_crtc->unref_cursor_work, priv->wq);
593 }
552} 594}
553 595
554static void mdp4_crtc_err_irq(struct mdp4_irq *irq, uint32_t irqstatus) 596static void mdp4_crtc_err_irq(struct mdp4_irq *irq, uint32_t irqstatus)
@@ -565,9 +607,10 @@ uint32_t mdp4_crtc_vblank(struct drm_crtc *crtc)
565 return mdp4_crtc->vblank.irqmask; 607 return mdp4_crtc->vblank.irqmask;
566} 608}
567 609
568void mdp4_crtc_cancel_pending_flip(struct drm_crtc *crtc) 610void mdp4_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file)
569{ 611{
570 complete_flip(crtc, true); 612 DBG("cancel: %p", file);
613 complete_flip(crtc, file);
571} 614}
572 615
573/* set dma config, ie. the format the encoder wants. */ 616/* set dma config, ie. the format the encoder wants. */
@@ -622,6 +665,32 @@ void mdp4_crtc_set_intf(struct drm_crtc *crtc, enum mdp4_intf intf)
622 mdp4_write(mdp4_kms, REG_MDP4_DISP_INTF_SEL, intf_sel); 665 mdp4_write(mdp4_kms, REG_MDP4_DISP_INTF_SEL, intf_sel);
623} 666}
624 667
668static void set_attach(struct drm_crtc *crtc, enum mdp4_pipe pipe_id,
669 struct drm_plane *plane)
670{
671 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
672
673 BUG_ON(pipe_id >= ARRAY_SIZE(mdp4_crtc->planes));
674
675 if (mdp4_crtc->planes[pipe_id] == plane)
676 return;
677
678 mdp4_crtc->planes[pipe_id] = plane;
679 blend_setup(crtc);
680 if (mdp4_crtc->enabled && (plane != mdp4_crtc->plane))
681 crtc_flush(crtc);
682}
683
684void mdp4_crtc_attach(struct drm_crtc *crtc, struct drm_plane *plane)
685{
686 set_attach(crtc, mdp4_plane_pipe(plane), plane);
687}
688
689void mdp4_crtc_detach(struct drm_crtc *crtc, struct drm_plane *plane)
690{
691 set_attach(crtc, mdp4_plane_pipe(plane), NULL);
692}
693
625static const char *dma_names[] = { 694static const char *dma_names[] = {
626 "DMA_P", "DMA_S", "DMA_E", 695 "DMA_P", "DMA_S", "DMA_E",
627}; 696};
@@ -644,7 +713,6 @@ struct drm_crtc *mdp4_crtc_init(struct drm_device *dev,
644 crtc = &mdp4_crtc->base; 713 crtc = &mdp4_crtc->base;
645 714
646 mdp4_crtc->plane = plane; 715 mdp4_crtc->plane = plane;
647 mdp4_crtc->plane->crtc = crtc;
648 716
649 mdp4_crtc->ovlp = ovlp_id; 717 mdp4_crtc->ovlp = ovlp_id;
650 mdp4_crtc->dma = dma_id; 718 mdp4_crtc->dma = dma_id;
@@ -668,7 +736,7 @@ struct drm_crtc *mdp4_crtc_init(struct drm_device *dev,
668 ret = drm_flip_work_init(&mdp4_crtc->unref_cursor_work, 64, 736 ret = drm_flip_work_init(&mdp4_crtc->unref_cursor_work, 64,
669 "unref cursor", unref_cursor_worker); 737 "unref cursor", unref_cursor_worker);
670 738
671 INIT_WORK(&mdp4_crtc->pageflip_work, pageflip_worker); 739 INIT_FENCE_CB(&mdp4_crtc->pageflip_cb, pageflip_cb);
672 740
673 drm_crtc_init(dev, crtc, &mdp4_crtc_funcs); 741 drm_crtc_init(dev, crtc, &mdp4_crtc_funcs);
674 drm_crtc_helper_add(crtc, &mdp4_crtc_helper_funcs); 742 drm_crtc_helper_add(crtc, &mdp4_crtc_helper_funcs);
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_format.c b/drivers/gpu/drm/msm/mdp4/mdp4_format.c
index 7b645f2e837a..17330b0927b2 100644
--- a/drivers/gpu/drm/msm/mdp4/mdp4_format.c
+++ b/drivers/gpu/drm/msm/mdp4/mdp4_format.c
@@ -44,6 +44,22 @@ static const struct mdp4_format formats[] = {
44 FMT(BGR565, 0, 5, 6, 5, 2, 0, 1, 0, false, true, 2, 3), 44 FMT(BGR565, 0, 5, 6, 5, 2, 0, 1, 0, false, true, 2, 3),
45}; 45};
46 46
47uint32_t mdp4_get_formats(enum mdp4_pipe pipe_id, uint32_t *pixel_formats,
48 uint32_t max_formats)
49{
50 uint32_t i;
51 for (i = 0; i < ARRAY_SIZE(formats); i++) {
52 const struct mdp4_format *f = &formats[i];
53
54 if (i == max_formats)
55 break;
56
57 pixel_formats[i] = f->base.pixel_format;
58 }
59
60 return i;
61}
62
47const struct msm_format *mdp4_get_format(struct msm_kms *kms, uint32_t format) 63const struct msm_format *mdp4_get_format(struct msm_kms *kms, uint32_t format)
48{ 64{
49 int i; 65 int i;
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/mdp4/mdp4_kms.c
index bc7fd11ad8be..8972ac35a43d 100644
--- a/drivers/gpu/drm/msm/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/mdp4/mdp4_kms.c
@@ -135,7 +135,7 @@ static void mdp4_preclose(struct msm_kms *kms, struct drm_file *file)
135 unsigned i; 135 unsigned i;
136 136
137 for (i = 0; i < priv->num_crtcs; i++) 137 for (i = 0; i < priv->num_crtcs; i++)
138 mdp4_crtc_cancel_pending_flip(priv->crtcs[i]); 138 mdp4_crtc_cancel_pending_flip(priv->crtcs[i], file);
139} 139}
140 140
141static void mdp4_destroy(struct msm_kms *kms) 141static void mdp4_destroy(struct msm_kms *kms)
@@ -196,6 +196,23 @@ static int modeset_init(struct mdp4_kms *mdp4_kms)
196 * for more than just RGB1->DMA_E->DTV->HDMI 196 * for more than just RGB1->DMA_E->DTV->HDMI
197 */ 197 */
198 198
199 /* construct non-private planes: */
200 plane = mdp4_plane_init(dev, VG1, false);
201 if (IS_ERR(plane)) {
202 dev_err(dev->dev, "failed to construct plane for VG1\n");
203 ret = PTR_ERR(plane);
204 goto fail;
205 }
206 priv->planes[priv->num_planes++] = plane;
207
208 plane = mdp4_plane_init(dev, VG2, false);
209 if (IS_ERR(plane)) {
210 dev_err(dev->dev, "failed to construct plane for VG2\n");
211 ret = PTR_ERR(plane);
212 goto fail;
213 }
214 priv->planes[priv->num_planes++] = plane;
215
199 /* the CRTCs get constructed with a private plane: */ 216 /* the CRTCs get constructed with a private plane: */
200 plane = mdp4_plane_init(dev, RGB1, true); 217 plane = mdp4_plane_init(dev, RGB1, true);
201 if (IS_ERR(plane)) { 218 if (IS_ERR(plane)) {
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_kms.h b/drivers/gpu/drm/msm/mdp4/mdp4_kms.h
index 1e83554955f3..eb015c834087 100644
--- a/drivers/gpu/drm/msm/mdp4/mdp4_kms.h
+++ b/drivers/gpu/drm/msm/mdp4/mdp4_kms.h
@@ -75,8 +75,8 @@ struct mdp4_platform_config {
75 75
76struct mdp4_format { 76struct mdp4_format {
77 struct msm_format base; 77 struct msm_format base;
78 enum mpd4_bpc bpc_r, bpc_g, bpc_b; 78 enum mdp4_bpc bpc_r, bpc_g, bpc_b;
79 enum mpd4_bpc_alpha bpc_a; 79 enum mdp4_bpc_alpha bpc_a;
80 uint8_t unpack[4]; 80 uint8_t unpack[4];
81 bool alpha_enable, unpack_tight; 81 bool alpha_enable, unpack_tight;
82 uint8_t cpp, unpack_count; 82 uint8_t cpp, unpack_count;
@@ -93,7 +93,7 @@ static inline u32 mdp4_read(struct mdp4_kms *mdp4_kms, u32 reg)
93 return msm_readl(mdp4_kms->mmio + reg); 93 return msm_readl(mdp4_kms->mmio + reg);
94} 94}
95 95
96static inline uint32_t pipe2flush(enum mpd4_pipe pipe) 96static inline uint32_t pipe2flush(enum mdp4_pipe pipe)
97{ 97{
98 switch (pipe) { 98 switch (pipe) {
99 case VG1: return MDP4_OVERLAY_FLUSH_VG1; 99 case VG1: return MDP4_OVERLAY_FLUSH_VG1;
@@ -133,6 +133,48 @@ static inline uint32_t dma2err(enum mdp4_dma dma)
133 } 133 }
134} 134}
135 135
136static inline uint32_t mixercfg(int mixer, enum mdp4_pipe pipe,
137 enum mdp4_mixer_stage_id stage)
138{
139 uint32_t mixer_cfg = 0;
140
141 switch (pipe) {
142 case VG1:
143 mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE0(stage) |
144 COND(mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE0_MIXER1);
145 break;
146 case VG2:
147 mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE1(stage) |
148 COND(mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE1_MIXER1);
149 break;
150 case RGB1:
151 mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE2(stage) |
152 COND(mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE2_MIXER1);
153 break;
154 case RGB2:
155 mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE3(stage) |
156 COND(mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE3_MIXER1);
157 break;
158 case RGB3:
159 mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE4(stage) |
160 COND(mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE4_MIXER1);
161 break;
162 case VG3:
163 mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE5(stage) |
164 COND(mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE5_MIXER1);
165 break;
166 case VG4:
167 mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE6(stage) |
168 COND(mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE6_MIXER1);
169 break;
170 default:
171 WARN_ON("invalid pipe");
172 break;
173 }
174
175 return mixer_cfg;
176}
177
136int mdp4_disable(struct mdp4_kms *mdp4_kms); 178int mdp4_disable(struct mdp4_kms *mdp4_kms);
137int mdp4_enable(struct mdp4_kms *mdp4_kms); 179int mdp4_enable(struct mdp4_kms *mdp4_kms);
138 180
@@ -146,6 +188,8 @@ void mdp4_irq_unregister(struct mdp4_kms *mdp4_kms, struct mdp4_irq *irq);
146int mdp4_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc); 188int mdp4_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
147void mdp4_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc); 189void mdp4_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
148 190
191uint32_t mdp4_get_formats(enum mdp4_pipe pipe_id, uint32_t *formats,
192 uint32_t max_formats);
149const struct msm_format *mdp4_get_format(struct msm_kms *kms, uint32_t format); 193const struct msm_format *mdp4_get_format(struct msm_kms *kms, uint32_t format);
150 194
151void mdp4_plane_install_properties(struct drm_plane *plane, 195void mdp4_plane_install_properties(struct drm_plane *plane,
@@ -158,14 +202,16 @@ int mdp4_plane_mode_set(struct drm_plane *plane,
158 unsigned int crtc_w, unsigned int crtc_h, 202 unsigned int crtc_w, unsigned int crtc_h,
159 uint32_t src_x, uint32_t src_y, 203 uint32_t src_x, uint32_t src_y,
160 uint32_t src_w, uint32_t src_h); 204 uint32_t src_w, uint32_t src_h);
161enum mpd4_pipe mdp4_plane_pipe(struct drm_plane *plane); 205enum mdp4_pipe mdp4_plane_pipe(struct drm_plane *plane);
162struct drm_plane *mdp4_plane_init(struct drm_device *dev, 206struct drm_plane *mdp4_plane_init(struct drm_device *dev,
163 enum mpd4_pipe pipe_id, bool private_plane); 207 enum mdp4_pipe pipe_id, bool private_plane);
164 208
165uint32_t mdp4_crtc_vblank(struct drm_crtc *crtc); 209uint32_t mdp4_crtc_vblank(struct drm_crtc *crtc);
166void mdp4_crtc_cancel_pending_flip(struct drm_crtc *crtc); 210void mdp4_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file);
167void mdp4_crtc_set_config(struct drm_crtc *crtc, uint32_t config); 211void mdp4_crtc_set_config(struct drm_crtc *crtc, uint32_t config);
168void mdp4_crtc_set_intf(struct drm_crtc *crtc, enum mdp4_intf intf); 212void mdp4_crtc_set_intf(struct drm_crtc *crtc, enum mdp4_intf intf);
213void mdp4_crtc_attach(struct drm_crtc *crtc, struct drm_plane *plane);
214void mdp4_crtc_detach(struct drm_crtc *crtc, struct drm_plane *plane);
169struct drm_crtc *mdp4_crtc_init(struct drm_device *dev, 215struct drm_crtc *mdp4_crtc_init(struct drm_device *dev,
170 struct drm_plane *plane, int id, int ovlp_id, 216 struct drm_plane *plane, int id, int ovlp_id,
171 enum mdp4_dma dma_id); 217 enum mdp4_dma dma_id);
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/mdp4/mdp4_plane.c
index 3468229d58b3..0f0af243f6fc 100644
--- a/drivers/gpu/drm/msm/mdp4/mdp4_plane.c
+++ b/drivers/gpu/drm/msm/mdp4/mdp4_plane.c
@@ -22,7 +22,7 @@ struct mdp4_plane {
22 struct drm_plane base; 22 struct drm_plane base;
23 const char *name; 23 const char *name;
24 24
25 enum mpd4_pipe pipe; 25 enum mdp4_pipe pipe;
26 26
27 uint32_t nformats; 27 uint32_t nformats;
28 uint32_t formats[32]; 28 uint32_t formats[32];
@@ -61,7 +61,9 @@ static int mdp4_plane_update(struct drm_plane *plane,
61static int mdp4_plane_disable(struct drm_plane *plane) 61static int mdp4_plane_disable(struct drm_plane *plane)
62{ 62{
63 struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane); 63 struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
64 DBG("%s: TODO", mdp4_plane->name); // XXX 64 DBG("%s: disable", mdp4_plane->name);
65 if (plane->crtc)
66 mdp4_crtc_detach(plane->crtc, plane);
65 return 0; 67 return 0;
66} 68}
67 69
@@ -101,7 +103,7 @@ void mdp4_plane_set_scanout(struct drm_plane *plane,
101{ 103{
102 struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane); 104 struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
103 struct mdp4_kms *mdp4_kms = get_kms(plane); 105 struct mdp4_kms *mdp4_kms = get_kms(plane);
104 enum mpd4_pipe pipe = mdp4_plane->pipe; 106 enum mdp4_pipe pipe = mdp4_plane->pipe;
105 uint32_t iova; 107 uint32_t iova;
106 108
107 mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_STRIDE_A(pipe), 109 mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_STRIDE_A(pipe),
@@ -129,7 +131,7 @@ int mdp4_plane_mode_set(struct drm_plane *plane,
129{ 131{
130 struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane); 132 struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
131 struct mdp4_kms *mdp4_kms = get_kms(plane); 133 struct mdp4_kms *mdp4_kms = get_kms(plane);
132 enum mpd4_pipe pipe = mdp4_plane->pipe; 134 enum mdp4_pipe pipe = mdp4_plane->pipe;
133 const struct mdp4_format *format; 135 const struct mdp4_format *format;
134 uint32_t op_mode = 0; 136 uint32_t op_mode = 0;
135 uint32_t phasex_step = MDP4_VG_PHASE_STEP_DEFAULT; 137 uint32_t phasex_step = MDP4_VG_PHASE_STEP_DEFAULT;
@@ -141,6 +143,10 @@ int mdp4_plane_mode_set(struct drm_plane *plane,
141 src_w = src_w >> 16; 143 src_w = src_w >> 16;
142 src_h = src_h >> 16; 144 src_h = src_h >> 16;
143 145
146 DBG("%s: FB[%u] %u,%u,%u,%u -> CRTC[%u] %d,%d,%u,%u", mdp4_plane->name,
147 fb->base.id, src_x, src_y, src_w, src_h,
148 crtc->base.id, crtc_x, crtc_y, crtc_w, crtc_h);
149
144 if (src_w != crtc_w) { 150 if (src_w != crtc_w) {
145 op_mode |= MDP4_PIPE_OP_MODE_SCALEX_EN; 151 op_mode |= MDP4_PIPE_OP_MODE_SCALEX_EN;
146 /* TODO calc phasex_step */ 152 /* TODO calc phasex_step */
@@ -191,7 +197,8 @@ int mdp4_plane_mode_set(struct drm_plane *plane,
191 mdp4_write(mdp4_kms, REG_MDP4_PIPE_PHASEX_STEP(pipe), phasex_step); 197 mdp4_write(mdp4_kms, REG_MDP4_PIPE_PHASEX_STEP(pipe), phasex_step);
192 mdp4_write(mdp4_kms, REG_MDP4_PIPE_PHASEY_STEP(pipe), phasey_step); 198 mdp4_write(mdp4_kms, REG_MDP4_PIPE_PHASEY_STEP(pipe), phasey_step);
193 199
194 plane->crtc = crtc; 200 /* TODO detach from old crtc (if we had more than one) */
201 mdp4_crtc_attach(crtc, plane);
195 202
196 return 0; 203 return 0;
197} 204}
@@ -202,7 +209,7 @@ static const char *pipe_names[] = {
202 "VG3", "VG4", 209 "VG3", "VG4",
203}; 210};
204 211
205enum mpd4_pipe mdp4_plane_pipe(struct drm_plane *plane) 212enum mdp4_pipe mdp4_plane_pipe(struct drm_plane *plane)
206{ 213{
207 struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane); 214 struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
208 return mdp4_plane->pipe; 215 return mdp4_plane->pipe;
@@ -210,9 +217,8 @@ enum mpd4_pipe mdp4_plane_pipe(struct drm_plane *plane)
210 217
211/* initialize plane */ 218/* initialize plane */
212struct drm_plane *mdp4_plane_init(struct drm_device *dev, 219struct drm_plane *mdp4_plane_init(struct drm_device *dev,
213 enum mpd4_pipe pipe_id, bool private_plane) 220 enum mdp4_pipe pipe_id, bool private_plane)
214{ 221{
215 struct msm_drm_private *priv = dev->dev_private;
216 struct drm_plane *plane = NULL; 222 struct drm_plane *plane = NULL;
217 struct mdp4_plane *mdp4_plane; 223 struct mdp4_plane *mdp4_plane;
218 int ret; 224 int ret;
@@ -228,8 +234,12 @@ struct drm_plane *mdp4_plane_init(struct drm_device *dev,
228 mdp4_plane->pipe = pipe_id; 234 mdp4_plane->pipe = pipe_id;
229 mdp4_plane->name = pipe_names[pipe_id]; 235 mdp4_plane->name = pipe_names[pipe_id];
230 236
231 drm_plane_init(dev, plane, (1 << priv->num_crtcs) - 1, &mdp4_plane_funcs, 237 mdp4_plane->nformats = mdp4_get_formats(pipe_id, mdp4_plane->formats,
232 mdp4_plane->formats, mdp4_plane->nformats, private_plane); 238 ARRAY_SIZE(mdp4_plane->formats));
239
240 drm_plane_init(dev, plane, 0xff, &mdp4_plane_funcs,
241 mdp4_plane->formats, mdp4_plane->nformats,
242 private_plane);
233 243
234 mdp4_plane_install_properties(plane, &plane->base); 244 mdp4_plane_install_properties(plane, &plane->base);
235 245
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index b3a2f1629041..86537692e45c 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -187,6 +187,7 @@ static int msm_load(struct drm_device *dev, unsigned long flags)
187 init_waitqueue_head(&priv->fence_event); 187 init_waitqueue_head(&priv->fence_event);
188 188
189 INIT_LIST_HEAD(&priv->inactive_list); 189 INIT_LIST_HEAD(&priv->inactive_list);
190 INIT_LIST_HEAD(&priv->fence_cbs);
190 191
191 drm_mode_config_init(dev); 192 drm_mode_config_init(dev);
192 193
@@ -539,15 +540,36 @@ int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence,
539 return ret; 540 return ret;
540} 541}
541 542
542/* call under struct_mutex */ 543/* called from workqueue */
543void msm_update_fence(struct drm_device *dev, uint32_t fence) 544void msm_update_fence(struct drm_device *dev, uint32_t fence)
544{ 545{
545 struct msm_drm_private *priv = dev->dev_private; 546 struct msm_drm_private *priv = dev->dev_private;
546 547
547 if (fence > priv->completed_fence) { 548 mutex_lock(&dev->struct_mutex);
548 priv->completed_fence = fence; 549 priv->completed_fence = max(fence, priv->completed_fence);
549 wake_up_all(&priv->fence_event); 550
551 while (!list_empty(&priv->fence_cbs)) {
552 struct msm_fence_cb *cb;
553
554 cb = list_first_entry(&priv->fence_cbs,
555 struct msm_fence_cb, work.entry);
556
557 if (cb->fence > priv->completed_fence)
558 break;
559
560 list_del_init(&cb->work.entry);
561 queue_work(priv->wq, &cb->work);
550 } 562 }
563
564 mutex_unlock(&dev->struct_mutex);
565
566 wake_up_all(&priv->fence_event);
567}
568
569void __msm_fence_worker(struct work_struct *work)
570{
571 struct msm_fence_cb *cb = container_of(work, struct msm_fence_cb, work);
572 cb->func(cb);
551} 573}
552 574
553/* 575/*
@@ -650,13 +672,13 @@ static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
650} 672}
651 673
652static const struct drm_ioctl_desc msm_ioctls[] = { 674static const struct drm_ioctl_desc msm_ioctls[] = {
653 DRM_IOCTL_DEF_DRV(MSM_GET_PARAM, msm_ioctl_get_param, DRM_UNLOCKED|DRM_AUTH), 675 DRM_IOCTL_DEF_DRV(MSM_GET_PARAM, msm_ioctl_get_param, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
654 DRM_IOCTL_DEF_DRV(MSM_GEM_NEW, msm_ioctl_gem_new, DRM_UNLOCKED|DRM_AUTH), 676 DRM_IOCTL_DEF_DRV(MSM_GEM_NEW, msm_ioctl_gem_new, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
655 DRM_IOCTL_DEF_DRV(MSM_GEM_INFO, msm_ioctl_gem_info, DRM_UNLOCKED|DRM_AUTH), 677 DRM_IOCTL_DEF_DRV(MSM_GEM_INFO, msm_ioctl_gem_info, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
656 DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_PREP, msm_ioctl_gem_cpu_prep, DRM_UNLOCKED|DRM_AUTH), 678 DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_PREP, msm_ioctl_gem_cpu_prep, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
657 DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_FINI, msm_ioctl_gem_cpu_fini, DRM_UNLOCKED|DRM_AUTH), 679 DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_FINI, msm_ioctl_gem_cpu_fini, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
658 DRM_IOCTL_DEF_DRV(MSM_GEM_SUBMIT, msm_ioctl_gem_submit, DRM_UNLOCKED|DRM_AUTH), 680 DRM_IOCTL_DEF_DRV(MSM_GEM_SUBMIT, msm_ioctl_gem_submit, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
659 DRM_IOCTL_DEF_DRV(MSM_WAIT_FENCE, msm_ioctl_wait_fence, DRM_UNLOCKED|DRM_AUTH), 681 DRM_IOCTL_DEF_DRV(MSM_WAIT_FENCE, msm_ioctl_wait_fence, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
660}; 682};
661 683
662static const struct vm_operations_struct vm_ops = { 684static const struct vm_operations_struct vm_ops = {
@@ -680,7 +702,11 @@ static const struct file_operations fops = {
680}; 702};
681 703
682static struct drm_driver msm_driver = { 704static struct drm_driver msm_driver = {
683 .driver_features = DRIVER_HAVE_IRQ | DRIVER_GEM | DRIVER_MODESET, 705 .driver_features = DRIVER_HAVE_IRQ |
706 DRIVER_GEM |
707 DRIVER_PRIME |
708 DRIVER_RENDER |
709 DRIVER_MODESET,
684 .load = msm_load, 710 .load = msm_load,
685 .unload = msm_unload, 711 .unload = msm_unload,
686 .open = msm_open, 712 .open = msm_open,
@@ -698,6 +724,16 @@ static struct drm_driver msm_driver = {
698 .dumb_create = msm_gem_dumb_create, 724 .dumb_create = msm_gem_dumb_create,
699 .dumb_map_offset = msm_gem_dumb_map_offset, 725 .dumb_map_offset = msm_gem_dumb_map_offset,
700 .dumb_destroy = drm_gem_dumb_destroy, 726 .dumb_destroy = drm_gem_dumb_destroy,
727 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
728 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
729 .gem_prime_export = drm_gem_prime_export,
730 .gem_prime_import = drm_gem_prime_import,
731 .gem_prime_pin = msm_gem_prime_pin,
732 .gem_prime_unpin = msm_gem_prime_unpin,
733 .gem_prime_get_sg_table = msm_gem_prime_get_sg_table,
734 .gem_prime_import_sg_table = msm_gem_prime_import_sg_table,
735 .gem_prime_vmap = msm_gem_prime_vmap,
736 .gem_prime_vunmap = msm_gem_prime_vunmap,
701#ifdef CONFIG_DEBUG_FS 737#ifdef CONFIG_DEBUG_FS
702 .debugfs_init = msm_debugfs_init, 738 .debugfs_init = msm_debugfs_init,
703 .debugfs_cleanup = msm_debugfs_cleanup, 739 .debugfs_cleanup = msm_debugfs_cleanup,
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index df8f1d084bc1..d39f0862b19e 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -73,10 +73,16 @@ struct msm_drm_private {
73 73
74 struct workqueue_struct *wq; 74 struct workqueue_struct *wq;
75 75
76 /* callbacks deferred until bo is inactive: */
77 struct list_head fence_cbs;
78
76 /* registered IOMMU domains: */ 79 /* registered IOMMU domains: */
77 unsigned int num_iommus; 80 unsigned int num_iommus;
78 struct iommu_domain *iommus[NUM_DOMAINS]; 81 struct iommu_domain *iommus[NUM_DOMAINS];
79 82
83 unsigned int num_planes;
84 struct drm_plane *planes[8];
85
80 unsigned int num_crtcs; 86 unsigned int num_crtcs;
81 struct drm_crtc *crtcs[8]; 87 struct drm_crtc *crtcs[8];
82 88
@@ -94,6 +100,20 @@ struct msm_format {
94 uint32_t pixel_format; 100 uint32_t pixel_format;
95}; 101};
96 102
103/* callback from wq once fence has passed: */
104struct msm_fence_cb {
105 struct work_struct work;
106 uint32_t fence;
107 void (*func)(struct msm_fence_cb *cb);
108};
109
110void __msm_fence_worker(struct work_struct *work);
111
112#define INIT_FENCE_CB(_cb, _func) do { \
113 INIT_WORK(&(_cb)->work, __msm_fence_worker); \
114 (_cb)->func = _func; \
115 } while (0)
116
97/* As there are different display controller blocks depending on the 117/* As there are different display controller blocks depending on the
98 * snapdragon version, the kms support is split out and the appropriate 118 * snapdragon version, the kms support is split out and the appropriate
99 * implementation is loaded at runtime. The kms module is responsible 119 * implementation is loaded at runtime. The kms module is responsible
@@ -141,17 +161,24 @@ uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj);
141int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id, 161int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
142 uint32_t *iova); 162 uint32_t *iova);
143int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova); 163int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova);
164struct page **msm_gem_get_pages(struct drm_gem_object *obj);
165void msm_gem_put_pages(struct drm_gem_object *obj);
144void msm_gem_put_iova(struct drm_gem_object *obj, int id); 166void msm_gem_put_iova(struct drm_gem_object *obj, int id);
145int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev, 167int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
146 struct drm_mode_create_dumb *args); 168 struct drm_mode_create_dumb *args);
147int msm_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
148 uint32_t handle);
149int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, 169int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
150 uint32_t handle, uint64_t *offset); 170 uint32_t handle, uint64_t *offset);
171struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj);
172void *msm_gem_prime_vmap(struct drm_gem_object *obj);
173void msm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
174struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
175 size_t size, struct sg_table *sg);
176int msm_gem_prime_pin(struct drm_gem_object *obj);
177void msm_gem_prime_unpin(struct drm_gem_object *obj);
151void *msm_gem_vaddr_locked(struct drm_gem_object *obj); 178void *msm_gem_vaddr_locked(struct drm_gem_object *obj);
152void *msm_gem_vaddr(struct drm_gem_object *obj); 179void *msm_gem_vaddr(struct drm_gem_object *obj);
153int msm_gem_queue_inactive_work(struct drm_gem_object *obj, 180int msm_gem_queue_inactive_cb(struct drm_gem_object *obj,
154 struct work_struct *work); 181 struct msm_fence_cb *cb);
155void msm_gem_move_to_active(struct drm_gem_object *obj, 182void msm_gem_move_to_active(struct drm_gem_object *obj,
156 struct msm_gpu *gpu, bool write, uint32_t fence); 183 struct msm_gpu *gpu, bool write, uint32_t fence);
157void msm_gem_move_to_inactive(struct drm_gem_object *obj); 184void msm_gem_move_to_inactive(struct drm_gem_object *obj);
@@ -163,6 +190,8 @@ int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
163 uint32_t size, uint32_t flags, uint32_t *handle); 190 uint32_t size, uint32_t flags, uint32_t *handle);
164struct drm_gem_object *msm_gem_new(struct drm_device *dev, 191struct drm_gem_object *msm_gem_new(struct drm_device *dev,
165 uint32_t size, uint32_t flags); 192 uint32_t size, uint32_t flags);
193struct drm_gem_object *msm_gem_import(struct drm_device *dev,
194 uint32_t size, struct sg_table *sgt);
166 195
167struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane); 196struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane);
168const struct msm_format *msm_framebuffer_format(struct drm_framebuffer *fb); 197const struct msm_format *msm_framebuffer_format(struct drm_framebuffer *fb);
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 2bae46c66a30..e587d251c590 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -17,6 +17,7 @@
17 17
18#include <linux/spinlock.h> 18#include <linux/spinlock.h>
19#include <linux/shmem_fs.h> 19#include <linux/shmem_fs.h>
20#include <linux/dma-buf.h>
20 21
21#include "msm_drv.h" 22#include "msm_drv.h"
22#include "msm_gem.h" 23#include "msm_gem.h"
@@ -77,6 +78,21 @@ static void put_pages(struct drm_gem_object *obj)
77 } 78 }
78} 79}
79 80
81struct page **msm_gem_get_pages(struct drm_gem_object *obj)
82{
83 struct drm_device *dev = obj->dev;
84 struct page **p;
85 mutex_lock(&dev->struct_mutex);
86 p = get_pages(obj);
87 mutex_unlock(&dev->struct_mutex);
88 return p;
89}
90
91void msm_gem_put_pages(struct drm_gem_object *obj)
92{
93 /* when we start tracking the pin count, then do something here */
94}
95
80int msm_gem_mmap_obj(struct drm_gem_object *obj, 96int msm_gem_mmap_obj(struct drm_gem_object *obj,
81 struct vm_area_struct *vma) 97 struct vm_area_struct *vma)
82{ 98{
@@ -162,6 +178,11 @@ out:
162 case 0: 178 case 0:
163 case -ERESTARTSYS: 179 case -ERESTARTSYS:
164 case -EINTR: 180 case -EINTR:
181 case -EBUSY:
182 /*
183 * EBUSY is ok: this just means that another thread
184 * already did the job.
185 */
165 return VM_FAULT_NOPAGE; 186 return VM_FAULT_NOPAGE;
166 case -ENOMEM: 187 case -ENOMEM:
167 return VM_FAULT_OOM; 188 return VM_FAULT_OOM;
@@ -293,7 +314,17 @@ int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
293 314
294int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova) 315int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova)
295{ 316{
317 struct msm_gem_object *msm_obj = to_msm_bo(obj);
296 int ret; 318 int ret;
319
320 /* this is safe right now because we don't unmap until the
321 * bo is deleted:
322 */
323 if (msm_obj->domain[id].iova) {
324 *iova = msm_obj->domain[id].iova;
325 return 0;
326 }
327
297 mutex_lock(&obj->dev->struct_mutex); 328 mutex_lock(&obj->dev->struct_mutex);
298 ret = msm_gem_get_iova_locked(obj, id, iova); 329 ret = msm_gem_get_iova_locked(obj, id, iova);
299 mutex_unlock(&obj->dev->struct_mutex); 330 mutex_unlock(&obj->dev->struct_mutex);
@@ -363,8 +394,11 @@ void *msm_gem_vaddr(struct drm_gem_object *obj)
363 return ret; 394 return ret;
364} 395}
365 396
366int msm_gem_queue_inactive_work(struct drm_gem_object *obj, 397/* setup callback for when bo is no longer busy..
367 struct work_struct *work) 398 * TODO probably want to differentiate read vs write..
399 */
400int msm_gem_queue_inactive_cb(struct drm_gem_object *obj,
401 struct msm_fence_cb *cb)
368{ 402{
369 struct drm_device *dev = obj->dev; 403 struct drm_device *dev = obj->dev;
370 struct msm_drm_private *priv = dev->dev_private; 404 struct msm_drm_private *priv = dev->dev_private;
@@ -372,12 +406,13 @@ int msm_gem_queue_inactive_work(struct drm_gem_object *obj,
372 int ret = 0; 406 int ret = 0;
373 407
374 mutex_lock(&dev->struct_mutex); 408 mutex_lock(&dev->struct_mutex);
375 if (!list_empty(&work->entry)) { 409 if (!list_empty(&cb->work.entry)) {
376 ret = -EINVAL; 410 ret = -EINVAL;
377 } else if (is_active(msm_obj)) { 411 } else if (is_active(msm_obj)) {
378 list_add_tail(&work->entry, &msm_obj->inactive_work); 412 cb->fence = max(msm_obj->read_fence, msm_obj->write_fence);
413 list_add_tail(&cb->work.entry, &priv->fence_cbs);
379 } else { 414 } else {
380 queue_work(priv->wq, work); 415 queue_work(priv->wq, &cb->work);
381 } 416 }
382 mutex_unlock(&dev->struct_mutex); 417 mutex_unlock(&dev->struct_mutex);
383 418
@@ -410,16 +445,6 @@ void msm_gem_move_to_inactive(struct drm_gem_object *obj)
410 msm_obj->write_fence = 0; 445 msm_obj->write_fence = 0;
411 list_del_init(&msm_obj->mm_list); 446 list_del_init(&msm_obj->mm_list);
412 list_add_tail(&msm_obj->mm_list, &priv->inactive_list); 447 list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
413
414 while (!list_empty(&msm_obj->inactive_work)) {
415 struct work_struct *work;
416
417 work = list_first_entry(&msm_obj->inactive_work,
418 struct work_struct, entry);
419
420 list_del_init(&work->entry);
421 queue_work(priv->wq, work);
422 }
423} 448}
424 449
425int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, 450int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op,
@@ -510,10 +535,21 @@ void msm_gem_free_object(struct drm_gem_object *obj)
510 535
511 drm_gem_free_mmap_offset(obj); 536 drm_gem_free_mmap_offset(obj);
512 537
513 if (msm_obj->vaddr) 538 if (obj->import_attach) {
514 vunmap(msm_obj->vaddr); 539 if (msm_obj->vaddr)
540 dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr);
515 541
516 put_pages(obj); 542 /* Don't drop the pages for imported dmabuf, as they are not
543 * ours, just free the array we allocated:
544 */
545 if (msm_obj->pages)
546 drm_free_large(msm_obj->pages);
547
548 } else {
549 if (msm_obj->vaddr)
550 vunmap(msm_obj->vaddr);
551 put_pages(obj);
552 }
517 553
518 if (msm_obj->resv == &msm_obj->_resv) 554 if (msm_obj->resv == &msm_obj->_resv)
519 reservation_object_fini(msm_obj->resv); 555 reservation_object_fini(msm_obj->resv);
@@ -549,17 +585,12 @@ int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
549 return ret; 585 return ret;
550} 586}
551 587
552struct drm_gem_object *msm_gem_new(struct drm_device *dev, 588static int msm_gem_new_impl(struct drm_device *dev,
553 uint32_t size, uint32_t flags) 589 uint32_t size, uint32_t flags,
590 struct drm_gem_object **obj)
554{ 591{
555 struct msm_drm_private *priv = dev->dev_private; 592 struct msm_drm_private *priv = dev->dev_private;
556 struct msm_gem_object *msm_obj; 593 struct msm_gem_object *msm_obj;
557 struct drm_gem_object *obj = NULL;
558 int ret;
559
560 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
561
562 size = PAGE_ALIGN(size);
563 594
564 switch (flags & MSM_BO_CACHE_MASK) { 595 switch (flags & MSM_BO_CACHE_MASK) {
565 case MSM_BO_UNCACHED: 596 case MSM_BO_UNCACHED:
@@ -569,21 +600,12 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev,
569 default: 600 default:
570 dev_err(dev->dev, "invalid cache flag: %x\n", 601 dev_err(dev->dev, "invalid cache flag: %x\n",
571 (flags & MSM_BO_CACHE_MASK)); 602 (flags & MSM_BO_CACHE_MASK));
572 ret = -EINVAL; 603 return -EINVAL;
573 goto fail;
574 } 604 }
575 605
576 msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL); 606 msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
577 if (!msm_obj) { 607 if (!msm_obj)
578 ret = -ENOMEM; 608 return -ENOMEM;
579 goto fail;
580 }
581
582 obj = &msm_obj->base;
583
584 ret = drm_gem_object_init(dev, obj, size);
585 if (ret)
586 goto fail;
587 609
588 msm_obj->flags = flags; 610 msm_obj->flags = flags;
589 611
@@ -591,9 +613,69 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev,
591 reservation_object_init(msm_obj->resv); 613 reservation_object_init(msm_obj->resv);
592 614
593 INIT_LIST_HEAD(&msm_obj->submit_entry); 615 INIT_LIST_HEAD(&msm_obj->submit_entry);
594 INIT_LIST_HEAD(&msm_obj->inactive_work);
595 list_add_tail(&msm_obj->mm_list, &priv->inactive_list); 616 list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
596 617
618 *obj = &msm_obj->base;
619
620 return 0;
621}
622
623struct drm_gem_object *msm_gem_new(struct drm_device *dev,
624 uint32_t size, uint32_t flags)
625{
626 struct drm_gem_object *obj;
627 int ret;
628
629 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
630
631 size = PAGE_ALIGN(size);
632
633 ret = msm_gem_new_impl(dev, size, flags, &obj);
634 if (ret)
635 goto fail;
636
637 ret = drm_gem_object_init(dev, obj, size);
638 if (ret)
639 goto fail;
640
641 return obj;
642
643fail:
644 if (obj)
645 drm_gem_object_unreference_unlocked(obj);
646
647 return ERR_PTR(ret);
648}
649
650struct drm_gem_object *msm_gem_import(struct drm_device *dev,
651 uint32_t size, struct sg_table *sgt)
652{
653 struct msm_gem_object *msm_obj;
654 struct drm_gem_object *obj;
655 int ret, npages;
656
657 size = PAGE_ALIGN(size);
658
659 ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj);
660 if (ret)
661 goto fail;
662
663 drm_gem_private_object_init(dev, obj, size);
664
665 npages = size / PAGE_SIZE;
666
667 msm_obj = to_msm_bo(obj);
668 msm_obj->sgt = sgt;
669 msm_obj->pages = drm_malloc_ab(npages, sizeof(struct page *));
670 if (!msm_obj->pages) {
671 ret = -ENOMEM;
672 goto fail;
673 }
674
675 ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages);
676 if (ret)
677 goto fail;
678
597 return obj; 679 return obj;
598 680
599fail: 681fail:
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index 0676f32e2c6a..f4f23a578d9d 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -45,9 +45,6 @@ struct msm_gem_object {
45 */ 45 */
46 struct list_head submit_entry; 46 struct list_head submit_entry;
47 47
48 /* work defered until bo is inactive: */
49 struct list_head inactive_work;
50
51 struct page **pages; 48 struct page **pages;
52 struct sg_table *sgt; 49 struct sg_table *sgt;
53 void *vaddr; 50 void *vaddr;
diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c
new file mode 100644
index 000000000000..d48f9fc5129b
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_gem_prime.c
@@ -0,0 +1,56 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include "msm_drv.h"
19#include "msm_gem.h"
20
21
22struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj)
23{
24 struct msm_gem_object *msm_obj = to_msm_bo(obj);
25 BUG_ON(!msm_obj->sgt); /* should have already pinned! */
26 return msm_obj->sgt;
27}
28
29void *msm_gem_prime_vmap(struct drm_gem_object *obj)
30{
31 return msm_gem_vaddr(obj);
32}
33
34void msm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
35{
36 /* TODO msm_gem_vunmap() */
37}
38
39struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
40 size_t size, struct sg_table *sg)
41{
42 return msm_gem_import(dev, size, sg);
43}
44
45int msm_gem_prime_pin(struct drm_gem_object *obj)
46{
47 if (!obj->import_attach)
48 msm_gem_get_pages(obj);
49 return 0;
50}
51
52void msm_gem_prime_unpin(struct drm_gem_object *obj)
53{
54 if (!obj->import_attach)
55 msm_gem_put_pages(obj);
56}
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 3bab937965d1..4583d61556f5 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -268,6 +268,8 @@ static void retire_worker(struct work_struct *work)
268 struct drm_device *dev = gpu->dev; 268 struct drm_device *dev = gpu->dev;
269 uint32_t fence = gpu->funcs->last_fence(gpu); 269 uint32_t fence = gpu->funcs->last_fence(gpu);
270 270
271 msm_update_fence(gpu->dev, fence);
272
271 mutex_lock(&dev->struct_mutex); 273 mutex_lock(&dev->struct_mutex);
272 274
273 while (!list_empty(&gpu->active_list)) { 275 while (!list_empty(&gpu->active_list)) {
@@ -287,8 +289,6 @@ static void retire_worker(struct work_struct *work)
287 } 289 }
288 } 290 }
289 291
290 msm_update_fence(gpu->dev, fence);
291
292 mutex_unlock(&dev->struct_mutex); 292 mutex_unlock(&dev->struct_mutex);
293} 293}
294 294
diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig
index ff80f12480ea..7cf787d697b1 100644
--- a/drivers/gpu/drm/nouveau/Kconfig
+++ b/drivers/gpu/drm/nouveau/Kconfig
@@ -3,6 +3,7 @@ config DRM_NOUVEAU
3 depends on DRM && PCI 3 depends on DRM && PCI
4 select FW_LOADER 4 select FW_LOADER
5 select DRM_KMS_HELPER 5 select DRM_KMS_HELPER
6 select DRM_KMS_FB_HELPER
6 select DRM_TTM 7 select DRM_TTM
7 select FB_CFB_FILLRECT 8 select FB_CFB_FILLRECT
8 select FB_CFB_COPYAREA 9 select FB_CFB_COPYAREA
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile
index d939a1da3203..edcf801613e6 100644
--- a/drivers/gpu/drm/nouveau/Makefile
+++ b/drivers/gpu/drm/nouveau/Makefile
@@ -28,7 +28,9 @@ nouveau-y += core/subdev/bar/nv50.o
28nouveau-y += core/subdev/bar/nvc0.o 28nouveau-y += core/subdev/bar/nvc0.o
29nouveau-y += core/subdev/bios/base.o 29nouveau-y += core/subdev/bios/base.o
30nouveau-y += core/subdev/bios/bit.o 30nouveau-y += core/subdev/bios/bit.o
31nouveau-y += core/subdev/bios/boost.o
31nouveau-y += core/subdev/bios/conn.o 32nouveau-y += core/subdev/bios/conn.o
33nouveau-y += core/subdev/bios/cstep.o
32nouveau-y += core/subdev/bios/dcb.o 34nouveau-y += core/subdev/bios/dcb.o
33nouveau-y += core/subdev/bios/disp.o 35nouveau-y += core/subdev/bios/disp.o
34nouveau-y += core/subdev/bios/dp.o 36nouveau-y += core/subdev/bios/dp.o
@@ -39,17 +41,26 @@ nouveau-y += core/subdev/bios/init.o
39nouveau-y += core/subdev/bios/mxm.o 41nouveau-y += core/subdev/bios/mxm.o
40nouveau-y += core/subdev/bios/perf.o 42nouveau-y += core/subdev/bios/perf.o
41nouveau-y += core/subdev/bios/pll.o 43nouveau-y += core/subdev/bios/pll.o
44nouveau-y += core/subdev/bios/rammap.o
45nouveau-y += core/subdev/bios/timing.o
42nouveau-y += core/subdev/bios/therm.o 46nouveau-y += core/subdev/bios/therm.o
47nouveau-y += core/subdev/bios/vmap.o
48nouveau-y += core/subdev/bios/volt.o
43nouveau-y += core/subdev/bios/xpio.o 49nouveau-y += core/subdev/bios/xpio.o
50nouveau-y += core/subdev/bus/hwsq.o
44nouveau-y += core/subdev/bus/nv04.o 51nouveau-y += core/subdev/bus/nv04.o
45nouveau-y += core/subdev/bus/nv31.o 52nouveau-y += core/subdev/bus/nv31.o
46nouveau-y += core/subdev/bus/nv50.o 53nouveau-y += core/subdev/bus/nv50.o
54nouveau-y += core/subdev/bus/nv94.o
47nouveau-y += core/subdev/bus/nvc0.o 55nouveau-y += core/subdev/bus/nvc0.o
56nouveau-y += core/subdev/clock/base.o
48nouveau-y += core/subdev/clock/nv04.o 57nouveau-y += core/subdev/clock/nv04.o
49nouveau-y += core/subdev/clock/nv40.o 58nouveau-y += core/subdev/clock/nv40.o
50nouveau-y += core/subdev/clock/nv50.o 59nouveau-y += core/subdev/clock/nv50.o
60nouveau-y += core/subdev/clock/nv84.o
51nouveau-y += core/subdev/clock/nva3.o 61nouveau-y += core/subdev/clock/nva3.o
52nouveau-y += core/subdev/clock/nvc0.o 62nouveau-y += core/subdev/clock/nvc0.o
63nouveau-y += core/subdev/clock/nve0.o
53nouveau-y += core/subdev/clock/pllnv04.o 64nouveau-y += core/subdev/clock/pllnv04.o
54nouveau-y += core/subdev/clock/pllnva3.o 65nouveau-y += core/subdev/clock/pllnva3.o
55nouveau-y += core/subdev/devinit/base.o 66nouveau-y += core/subdev/devinit/base.o
@@ -78,7 +89,12 @@ nouveau-y += core/subdev/fb/nv47.o
78nouveau-y += core/subdev/fb/nv49.o 89nouveau-y += core/subdev/fb/nv49.o
79nouveau-y += core/subdev/fb/nv4e.o 90nouveau-y += core/subdev/fb/nv4e.o
80nouveau-y += core/subdev/fb/nv50.o 91nouveau-y += core/subdev/fb/nv50.o
92nouveau-y += core/subdev/fb/nv84.o
93nouveau-y += core/subdev/fb/nva3.o
94nouveau-y += core/subdev/fb/nvaa.o
95nouveau-y += core/subdev/fb/nvaf.o
81nouveau-y += core/subdev/fb/nvc0.o 96nouveau-y += core/subdev/fb/nvc0.o
97nouveau-y += core/subdev/fb/nve0.o
82nouveau-y += core/subdev/fb/ramnv04.o 98nouveau-y += core/subdev/fb/ramnv04.o
83nouveau-y += core/subdev/fb/ramnv10.o 99nouveau-y += core/subdev/fb/ramnv10.o
84nouveau-y += core/subdev/fb/ramnv1a.o 100nouveau-y += core/subdev/fb/ramnv1a.o
@@ -89,7 +105,12 @@ nouveau-y += core/subdev/fb/ramnv44.o
89nouveau-y += core/subdev/fb/ramnv49.o 105nouveau-y += core/subdev/fb/ramnv49.o
90nouveau-y += core/subdev/fb/ramnv4e.o 106nouveau-y += core/subdev/fb/ramnv4e.o
91nouveau-y += core/subdev/fb/ramnv50.o 107nouveau-y += core/subdev/fb/ramnv50.o
108nouveau-y += core/subdev/fb/ramnva3.o
109nouveau-y += core/subdev/fb/ramnvaa.o
92nouveau-y += core/subdev/fb/ramnvc0.o 110nouveau-y += core/subdev/fb/ramnvc0.o
111nouveau-y += core/subdev/fb/ramnve0.o
112nouveau-y += core/subdev/fb/sddr3.o
113nouveau-y += core/subdev/fb/gddr5.o
93nouveau-y += core/subdev/gpio/base.o 114nouveau-y += core/subdev/gpio/base.o
94nouveau-y += core/subdev/gpio/nv10.o 115nouveau-y += core/subdev/gpio/nv10.o
95nouveau-y += core/subdev/gpio/nv50.o 116nouveau-y += core/subdev/gpio/nv50.o
@@ -113,13 +134,22 @@ nouveau-y += core/subdev/instmem/nv50.o
113nouveau-y += core/subdev/ltcg/nvc0.o 134nouveau-y += core/subdev/ltcg/nvc0.o
114nouveau-y += core/subdev/mc/base.o 135nouveau-y += core/subdev/mc/base.o
115nouveau-y += core/subdev/mc/nv04.o 136nouveau-y += core/subdev/mc/nv04.o
137nouveau-y += core/subdev/mc/nv40.o
116nouveau-y += core/subdev/mc/nv44.o 138nouveau-y += core/subdev/mc/nv44.o
117nouveau-y += core/subdev/mc/nv50.o 139nouveau-y += core/subdev/mc/nv50.o
140nouveau-y += core/subdev/mc/nv94.o
118nouveau-y += core/subdev/mc/nv98.o 141nouveau-y += core/subdev/mc/nv98.o
119nouveau-y += core/subdev/mc/nvc0.o 142nouveau-y += core/subdev/mc/nvc0.o
143nouveau-y += core/subdev/mc/nvc3.o
120nouveau-y += core/subdev/mxm/base.o 144nouveau-y += core/subdev/mxm/base.o
121nouveau-y += core/subdev/mxm/mxms.o 145nouveau-y += core/subdev/mxm/mxms.o
122nouveau-y += core/subdev/mxm/nv50.o 146nouveau-y += core/subdev/mxm/nv50.o
147nouveau-y += core/subdev/pwr/base.o
148nouveau-y += core/subdev/pwr/memx.o
149nouveau-y += core/subdev/pwr/nva3.o
150nouveau-y += core/subdev/pwr/nvc0.o
151nouveau-y += core/subdev/pwr/nvd0.o
152nouveau-y += core/subdev/pwr/nv108.o
123nouveau-y += core/subdev/therm/base.o 153nouveau-y += core/subdev/therm/base.o
124nouveau-y += core/subdev/therm/fan.o 154nouveau-y += core/subdev/therm/fan.o
125nouveau-y += core/subdev/therm/fannil.o 155nouveau-y += core/subdev/therm/fannil.o
@@ -140,6 +170,9 @@ nouveau-y += core/subdev/vm/nv41.o
140nouveau-y += core/subdev/vm/nv44.o 170nouveau-y += core/subdev/vm/nv44.o
141nouveau-y += core/subdev/vm/nv50.o 171nouveau-y += core/subdev/vm/nv50.o
142nouveau-y += core/subdev/vm/nvc0.o 172nouveau-y += core/subdev/vm/nvc0.o
173nouveau-y += core/subdev/volt/base.o
174nouveau-y += core/subdev/volt/gpio.o
175nouveau-y += core/subdev/volt/nv40.o
143 176
144nouveau-y += core/engine/falcon.o 177nouveau-y += core/engine/falcon.o
145nouveau-y += core/engine/xtensa.o 178nouveau-y += core/engine/xtensa.o
@@ -158,6 +191,7 @@ nouveau-y += core/engine/copy/nve0.o
158nouveau-y += core/engine/crypt/nv84.o 191nouveau-y += core/engine/crypt/nv84.o
159nouveau-y += core/engine/crypt/nv98.o 192nouveau-y += core/engine/crypt/nv98.o
160nouveau-y += core/engine/device/base.o 193nouveau-y += core/engine/device/base.o
194nouveau-y += core/engine/device/ctrl.o
161nouveau-y += core/engine/device/nv04.o 195nouveau-y += core/engine/device/nv04.o
162nouveau-y += core/engine/device/nv10.o 196nouveau-y += core/engine/device/nv10.o
163nouveau-y += core/engine/device/nv20.o 197nouveau-y += core/engine/device/nv20.o
@@ -227,8 +261,18 @@ nouveau-y += core/engine/graph/nve4.o
227nouveau-y += core/engine/graph/nvf0.o 261nouveau-y += core/engine/graph/nvf0.o
228nouveau-y += core/engine/mpeg/nv31.o 262nouveau-y += core/engine/mpeg/nv31.o
229nouveau-y += core/engine/mpeg/nv40.o 263nouveau-y += core/engine/mpeg/nv40.o
264nouveau-y += core/engine/mpeg/nv44.o
230nouveau-y += core/engine/mpeg/nv50.o 265nouveau-y += core/engine/mpeg/nv50.o
231nouveau-y += core/engine/mpeg/nv84.o 266nouveau-y += core/engine/mpeg/nv84.o
267nouveau-y += core/engine/perfmon/base.o
268nouveau-y += core/engine/perfmon/daemon.o
269nouveau-y += core/engine/perfmon/nv40.o
270nouveau-y += core/engine/perfmon/nv50.o
271nouveau-y += core/engine/perfmon/nv84.o
272nouveau-y += core/engine/perfmon/nva3.o
273nouveau-y += core/engine/perfmon/nvc0.o
274nouveau-y += core/engine/perfmon/nve0.o
275nouveau-y += core/engine/perfmon/nvf0.o
232nouveau-y += core/engine/ppp/nv98.o 276nouveau-y += core/engine/ppp/nv98.o
233nouveau-y += core/engine/ppp/nvc0.o 277nouveau-y += core/engine/ppp/nvc0.o
234nouveau-y += core/engine/software/nv04.o 278nouveau-y += core/engine/software/nv04.o
@@ -260,9 +304,7 @@ include $(src)/dispnv04/Makefile
260nouveau-y += nv50_display.o 304nouveau-y += nv50_display.o
261 305
262# drm/pm 306# drm/pm
263nouveau-y += nouveau_pm.o nouveau_volt.o nouveau_perf.o 307nouveau-y += nouveau_hwmon.o nouveau_sysfs.o
264nouveau-y += nv04_pm.o nv40_pm.o nv50_pm.o nva3_pm.o nvc0_pm.o
265nouveau-y += nouveau_mem.o
266 308
267# other random bits 309# other random bits
268nouveau-$(CONFIG_COMPAT) += nouveau_ioc32.o 310nouveau-$(CONFIG_COMPAT) += nouveau_ioc32.o
diff --git a/drivers/gpu/drm/nouveau/core/core/event.c b/drivers/gpu/drm/nouveau/core/core/event.c
index 7eb81c1b6fab..3f3c76581a9e 100644
--- a/drivers/gpu/drm/nouveau/core/core/event.c
+++ b/drivers/gpu/drm/nouveau/core/core/event.c
@@ -23,62 +23,114 @@
23#include <core/os.h> 23#include <core/os.h>
24#include <core/event.h> 24#include <core/event.h>
25 25
26static void 26void
27nouveau_event_put_locked(struct nouveau_event *event, int index, 27nouveau_event_put(struct nouveau_eventh *handler)
28 struct nouveau_eventh *handler)
29{ 28{
30 if (!--event->index[index].refs) { 29 struct nouveau_event *event = handler->event;
31 if (event->disable) 30 unsigned long flags;
32 event->disable(event, index); 31 if (__test_and_clear_bit(NVKM_EVENT_ENABLE, &handler->flags)) {
32 spin_lock_irqsave(&event->refs_lock, flags);
33 if (!--event->index[handler->index].refs) {
34 if (event->disable)
35 event->disable(event, handler->index);
36 }
37 spin_unlock_irqrestore(&event->refs_lock, flags);
33 } 38 }
34 list_del(&handler->head);
35} 39}
36 40
37void 41void
38nouveau_event_put(struct nouveau_event *event, int index, 42nouveau_event_get(struct nouveau_eventh *handler)
39 struct nouveau_eventh *handler)
40{ 43{
44 struct nouveau_event *event = handler->event;
41 unsigned long flags; 45 unsigned long flags;
46 if (!__test_and_set_bit(NVKM_EVENT_ENABLE, &handler->flags)) {
47 spin_lock_irqsave(&event->refs_lock, flags);
48 if (!event->index[handler->index].refs++) {
49 if (event->enable)
50 event->enable(event, handler->index);
51 }
52 spin_unlock_irqrestore(&event->refs_lock, flags);
53 }
54}
42 55
43 spin_lock_irqsave(&event->lock, flags); 56static void
44 if (index < event->index_nr) 57nouveau_event_fini(struct nouveau_eventh *handler)
45 nouveau_event_put_locked(event, index, handler); 58{
46 spin_unlock_irqrestore(&event->lock, flags); 59 struct nouveau_event *event = handler->event;
60 unsigned long flags;
61 nouveau_event_put(handler);
62 spin_lock_irqsave(&event->list_lock, flags);
63 list_del(&handler->head);
64 spin_unlock_irqrestore(&event->list_lock, flags);
47} 65}
48 66
49void 67static int
50nouveau_event_get(struct nouveau_event *event, int index, 68nouveau_event_init(struct nouveau_event *event, int index,
51 struct nouveau_eventh *handler) 69 int (*func)(void *, int), void *priv,
70 struct nouveau_eventh *handler)
52{ 71{
53 unsigned long flags; 72 unsigned long flags;
54 73
55 spin_lock_irqsave(&event->lock, flags); 74 if (index >= event->index_nr)
56 if (index < event->index_nr) { 75 return -EINVAL;
57 list_add(&handler->head, &event->index[index].list); 76
58 if (!event->index[index].refs++) { 77 handler->event = event;
59 if (event->enable) 78 handler->flags = 0;
60 event->enable(event, index); 79 handler->index = index;
61 } 80 handler->func = func;
81 handler->priv = priv;
82
83 spin_lock_irqsave(&event->list_lock, flags);
84 list_add_tail(&handler->head, &event->index[index].list);
85 spin_unlock_irqrestore(&event->list_lock, flags);
86 return 0;
87}
88
89int
90nouveau_event_new(struct nouveau_event *event, int index,
91 int (*func)(void *, int), void *priv,
92 struct nouveau_eventh **phandler)
93{
94 struct nouveau_eventh *handler;
95 int ret = -ENOMEM;
96
97 handler = *phandler = kmalloc(sizeof(*handler), GFP_KERNEL);
98 if (handler) {
99 ret = nouveau_event_init(event, index, func, priv, handler);
100 if (ret)
101 kfree(handler);
62 } 102 }
63 spin_unlock_irqrestore(&event->lock, flags); 103
104 return ret;
105}
106
107void
108nouveau_event_ref(struct nouveau_eventh *handler, struct nouveau_eventh **ref)
109{
110 BUG_ON(handler != NULL);
111 if (*ref) {
112 nouveau_event_fini(*ref);
113 kfree(*ref);
114 }
115 *ref = handler;
64} 116}
65 117
66void 118void
67nouveau_event_trigger(struct nouveau_event *event, int index) 119nouveau_event_trigger(struct nouveau_event *event, int index)
68{ 120{
69 struct nouveau_eventh *handler, *temp; 121 struct nouveau_eventh *handler;
70 unsigned long flags; 122 unsigned long flags;
71 123
72 if (index >= event->index_nr) 124 if (WARN_ON(index >= event->index_nr))
73 return; 125 return;
74 126
75 spin_lock_irqsave(&event->lock, flags); 127 spin_lock_irqsave(&event->list_lock, flags);
76 list_for_each_entry_safe(handler, temp, &event->index[index].list, head) { 128 list_for_each_entry(handler, &event->index[index].list, head) {
77 if (handler->func(handler, index) == NVKM_EVENT_DROP) { 129 if (test_bit(NVKM_EVENT_ENABLE, &handler->flags) &&
78 nouveau_event_put_locked(event, index, handler); 130 handler->func(handler->priv, index) == NVKM_EVENT_DROP)
79 } 131 nouveau_event_put(handler);
80 } 132 }
81 spin_unlock_irqrestore(&event->lock, flags); 133 spin_unlock_irqrestore(&event->list_lock, flags);
82} 134}
83 135
84void 136void
@@ -102,7 +154,8 @@ nouveau_event_create(int index_nr, struct nouveau_event **pevent)
102 if (!event) 154 if (!event)
103 return -ENOMEM; 155 return -ENOMEM;
104 156
105 spin_lock_init(&event->lock); 157 spin_lock_init(&event->list_lock);
158 spin_lock_init(&event->refs_lock);
106 for (i = 0; i < index_nr; i++) 159 for (i = 0; i < index_nr; i++)
107 INIT_LIST_HEAD(&event->index[i].list); 160 INIT_LIST_HEAD(&event->index[i].list);
108 event->index_nr = index_nr; 161 event->index_nr = index_nr;
diff --git a/drivers/gpu/drm/nouveau/core/core/option.c b/drivers/gpu/drm/nouveau/core/core/option.c
index 62a432ea39e5..9f6fcc5f66c2 100644
--- a/drivers/gpu/drm/nouveau/core/core/option.c
+++ b/drivers/gpu/drm/nouveau/core/core/option.c
@@ -25,15 +25,6 @@
25#include <core/option.h> 25#include <core/option.h>
26#include <core/debug.h> 26#include <core/debug.h>
27 27
28/* compares unterminated string 'str' with zero-terminated string 'cmp' */
29static inline int
30strncasecmpz(const char *str, const char *cmp, size_t len)
31{
32 if (strlen(cmp) != len)
33 return len;
34 return strncasecmp(str, cmp, len);
35}
36
37const char * 28const char *
38nouveau_stropt(const char *optstr, const char *opt, int *arglen) 29nouveau_stropt(const char *optstr, const char *opt, int *arglen)
39{ 30{
@@ -105,7 +96,7 @@ nouveau_dbgopt(const char *optstr, const char *sub)
105 else if (!strncasecmpz(optstr, "warn", len)) 96 else if (!strncasecmpz(optstr, "warn", len))
106 level = NV_DBG_WARN; 97 level = NV_DBG_WARN;
107 else if (!strncasecmpz(optstr, "info", len)) 98 else if (!strncasecmpz(optstr, "info", len))
108 level = NV_DBG_INFO; 99 level = NV_DBG_INFO_NORMAL;
109 else if (!strncasecmpz(optstr, "debug", len)) 100 else if (!strncasecmpz(optstr, "debug", len))
110 level = NV_DBG_DEBUG; 101 level = NV_DBG_DEBUG;
111 else if (!strncasecmpz(optstr, "trace", len)) 102 else if (!strncasecmpz(optstr, "trace", len))
diff --git a/drivers/gpu/drm/nouveau/core/core/printk.c b/drivers/gpu/drm/nouveau/core/core/printk.c
index 52fb2aa129e8..03e0060b13da 100644
--- a/drivers/gpu/drm/nouveau/core/core/printk.c
+++ b/drivers/gpu/drm/nouveau/core/core/printk.c
@@ -27,16 +27,38 @@
27#include <core/subdev.h> 27#include <core/subdev.h>
28#include <core/printk.h> 28#include <core/printk.h>
29 29
30int nv_printk_suspend_level = NV_DBG_DEBUG; 30int nv_info_debug_level = NV_DBG_INFO_NORMAL;
31 31
32void 32void
33nv_printk_(struct nouveau_object *object, const char *pfx, int level, 33nv_printk_(struct nouveau_object *object, int level, const char *fmt, ...)
34 const char *fmt, ...)
35{ 34{
36 static const char name[] = { '!', 'E', 'W', ' ', 'D', 'T', 'P', 'S' }; 35 static const char name[] = { '!', 'E', 'W', ' ', 'D', 'T', 'P', 'S' };
36 const char *pfx;
37 char mfmt[256]; 37 char mfmt[256];
38 va_list args; 38 va_list args;
39 39
40 switch (level) {
41 case NV_DBG_FATAL:
42 pfx = KERN_CRIT;
43 break;
44 case NV_DBG_ERROR:
45 pfx = KERN_ERR;
46 break;
47 case NV_DBG_WARN:
48 pfx = KERN_WARNING;
49 break;
50 case NV_DBG_INFO_NORMAL:
51 pfx = KERN_INFO;
52 break;
53 case NV_DBG_DEBUG:
54 case NV_DBG_PARANOIA:
55 case NV_DBG_TRACE:
56 case NV_DBG_SPAM:
57 default:
58 pfx = KERN_DEBUG;
59 break;
60 }
61
40 if (object && !nv_iclass(object, NV_CLIENT_CLASS)) { 62 if (object && !nv_iclass(object, NV_CLIENT_CLASS)) {
41 struct nouveau_object *device = object; 63 struct nouveau_object *device = object;
42 struct nouveau_object *subdev = object; 64 struct nouveau_object *subdev = object;
@@ -74,20 +96,3 @@ nv_printk_(struct nouveau_object *object, const char *pfx, int level,
74 vprintk(mfmt, args); 96 vprintk(mfmt, args);
75 va_end(args); 97 va_end(args);
76} 98}
77
78#define CONV_LEVEL(x) case NV_DBG_##x: return NV_PRINTK_##x
79
80const char *nv_printk_level_to_pfx(int level)
81{
82 switch (level) {
83 CONV_LEVEL(FATAL);
84 CONV_LEVEL(ERROR);
85 CONV_LEVEL(WARN);
86 CONV_LEVEL(INFO);
87 CONV_LEVEL(DEBUG);
88 CONV_LEVEL(PARANOIA);
89 CONV_LEVEL(TRACE);
90 CONV_LEVEL(SPAM);
91 }
92 return NV_PRINTK_DEBUG;
93}
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/base.c b/drivers/gpu/drm/nouveau/core/engine/device/base.c
index 4c72571655ad..9135b25a29d0 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/base.c
@@ -29,7 +29,7 @@
29 29
30#include <core/class.h> 30#include <core/class.h>
31 31
32#include <engine/device.h> 32#include "priv.h"
33 33
34static DEFINE_MUTEX(nv_devices_mutex); 34static DEFINE_MUTEX(nv_devices_mutex);
35static LIST_HEAD(nv_devices); 35static LIST_HEAD(nv_devices);
@@ -75,7 +75,9 @@ static const u64 disable_map[] = {
75 [NVDEV_SUBDEV_BAR] = NV_DEVICE_DISABLE_CORE, 75 [NVDEV_SUBDEV_BAR] = NV_DEVICE_DISABLE_CORE,
76 [NVDEV_SUBDEV_VOLT] = NV_DEVICE_DISABLE_CORE, 76 [NVDEV_SUBDEV_VOLT] = NV_DEVICE_DISABLE_CORE,
77 [NVDEV_SUBDEV_THERM] = NV_DEVICE_DISABLE_CORE, 77 [NVDEV_SUBDEV_THERM] = NV_DEVICE_DISABLE_CORE,
78 [NVDEV_SUBDEV_PWR] = NV_DEVICE_DISABLE_CORE,
78 [NVDEV_ENGINE_DMAOBJ] = NV_DEVICE_DISABLE_CORE, 79 [NVDEV_ENGINE_DMAOBJ] = NV_DEVICE_DISABLE_CORE,
80 [NVDEV_ENGINE_PERFMON] = NV_DEVICE_DISABLE_CORE,
79 [NVDEV_ENGINE_FIFO] = NV_DEVICE_DISABLE_FIFO, 81 [NVDEV_ENGINE_FIFO] = NV_DEVICE_DISABLE_FIFO,
80 [NVDEV_ENGINE_SW] = NV_DEVICE_DISABLE_FIFO, 82 [NVDEV_ENGINE_SW] = NV_DEVICE_DISABLE_FIFO,
81 [NVDEV_ENGINE_GR] = NV_DEVICE_DISABLE_GRAPH, 83 [NVDEV_ENGINE_GR] = NV_DEVICE_DISABLE_GRAPH,
@@ -87,7 +89,7 @@ static const u64 disable_map[] = {
87 [NVDEV_ENGINE_PPP] = NV_DEVICE_DISABLE_PPP, 89 [NVDEV_ENGINE_PPP] = NV_DEVICE_DISABLE_PPP,
88 [NVDEV_ENGINE_COPY0] = NV_DEVICE_DISABLE_COPY0, 90 [NVDEV_ENGINE_COPY0] = NV_DEVICE_DISABLE_COPY0,
89 [NVDEV_ENGINE_COPY1] = NV_DEVICE_DISABLE_COPY1, 91 [NVDEV_ENGINE_COPY1] = NV_DEVICE_DISABLE_COPY1,
90 [NVDEV_ENGINE_UNK1C1] = NV_DEVICE_DISABLE_UNK1C1, 92 [NVDEV_ENGINE_VIC] = NV_DEVICE_DISABLE_VIC,
91 [NVDEV_ENGINE_VENC] = NV_DEVICE_DISABLE_VENC, 93 [NVDEV_ENGINE_VENC] = NV_DEVICE_DISABLE_VENC,
92 [NVDEV_ENGINE_DISP] = NV_DEVICE_DISABLE_DISP, 94 [NVDEV_ENGINE_DISP] = NV_DEVICE_DISABLE_DISP,
93 [NVDEV_SUBDEV_NR] = 0, 95 [NVDEV_SUBDEV_NR] = 0,
@@ -119,10 +121,12 @@ nouveau_devobj_ctor(struct nouveau_object *parent,
119 return -ENODEV; 121 return -ENODEV;
120 } 122 }
121 123
122 ret = nouveau_parent_create(parent, nv_object(device), oclass, 0, NULL, 124 ret = nouveau_parent_create(parent, nv_object(device), oclass, 0,
125 nouveau_control_oclass,
123 (1ULL << NVDEV_ENGINE_DMAOBJ) | 126 (1ULL << NVDEV_ENGINE_DMAOBJ) |
124 (1ULL << NVDEV_ENGINE_FIFO) | 127 (1ULL << NVDEV_ENGINE_FIFO) |
125 (1ULL << NVDEV_ENGINE_DISP), &devobj); 128 (1ULL << NVDEV_ENGINE_DISP) |
129 (1ULL << NVDEV_ENGINE_PERFMON), &devobj);
126 *pobject = nv_object(devobj); 130 *pobject = nv_object(devobj);
127 if (ret) 131 if (ret)
128 return ret; 132 return ret;
@@ -158,22 +162,29 @@ nouveau_devobj_ctor(struct nouveau_object *parent,
158 iounmap(map); 162 iounmap(map);
159 163
160 /* determine chipset and derive architecture from it */ 164 /* determine chipset and derive architecture from it */
161 if ((boot0 & 0x0f000000) > 0) { 165 if ((boot0 & 0x1f000000) > 0) {
162 device->chipset = (boot0 & 0xff00000) >> 20; 166 device->chipset = (boot0 & 0x1ff00000) >> 20;
163 switch (device->chipset & 0xf0) { 167 switch (device->chipset & 0x1f0) {
164 case 0x10: device->card_type = NV_10; break; 168 case 0x010: {
165 case 0x20: device->card_type = NV_20; break; 169 if (0x461 & (1 << (device->chipset & 0xf)))
166 case 0x30: device->card_type = NV_30; break; 170 device->card_type = NV_10;
167 case 0x40: 171 else
168 case 0x60: device->card_type = NV_40; break; 172 device->card_type = NV_11;
169 case 0x50: 173 break;
170 case 0x80: 174 }
171 case 0x90: 175 case 0x020: device->card_type = NV_20; break;
172 case 0xa0: device->card_type = NV_50; break; 176 case 0x030: device->card_type = NV_30; break;
173 case 0xc0: device->card_type = NV_C0; break; 177 case 0x040:
174 case 0xd0: device->card_type = NV_D0; break; 178 case 0x060: device->card_type = NV_40; break;
175 case 0xe0: 179 case 0x050:
176 case 0xf0: device->card_type = NV_E0; break; 180 case 0x080:
181 case 0x090:
182 case 0x0a0: device->card_type = NV_50; break;
183 case 0x0c0: device->card_type = NV_C0; break;
184 case 0x0d0: device->card_type = NV_D0; break;
185 case 0x0e0:
186 case 0x0f0:
187 case 0x100: device->card_type = NV_E0; break;
177 default: 188 default:
178 break; 189 break;
179 } 190 }
@@ -188,7 +199,8 @@ nouveau_devobj_ctor(struct nouveau_object *parent,
188 199
189 switch (device->card_type) { 200 switch (device->card_type) {
190 case NV_04: ret = nv04_identify(device); break; 201 case NV_04: ret = nv04_identify(device); break;
191 case NV_10: ret = nv10_identify(device); break; 202 case NV_10:
203 case NV_11: ret = nv10_identify(device); break;
192 case NV_20: ret = nv20_identify(device); break; 204 case NV_20: ret = nv20_identify(device); break;
193 case NV_30: ret = nv30_identify(device); break; 205 case NV_30: ret = nv30_identify(device); break;
194 case NV_40: ret = nv40_identify(device); break; 206 case NV_40: ret = nv40_identify(device); break;
@@ -212,7 +224,7 @@ nouveau_devobj_ctor(struct nouveau_object *parent,
212 nv_info(device, "Family : NV%02X\n", device->card_type); 224 nv_info(device, "Family : NV%02X\n", device->card_type);
213 225
214 /* determine frequency of timing crystal */ 226 /* determine frequency of timing crystal */
215 if ( device->chipset < 0x17 || 227 if ( device->card_type <= NV_10 || device->chipset < 0x17 ||
216 (device->chipset >= 0x20 && device->chipset < 0x25)) 228 (device->chipset >= 0x20 && device->chipset < 0x25))
217 strap &= 0x00000040; 229 strap &= 0x00000040;
218 else 230 else
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/ctrl.c b/drivers/gpu/drm/nouveau/core/engine/device/ctrl.c
new file mode 100644
index 000000000000..4b69bf56ed01
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/device/ctrl.c
@@ -0,0 +1,144 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */
24
25#include <core/object.h>
26#include <core/class.h>
27
28#include <subdev/clock.h>
29
30#include "priv.h"
31
32static int
33nouveau_control_mthd_pstate_info(struct nouveau_object *object, u32 mthd,
34 void *data, u32 size)
35{
36 struct nouveau_clock *clk = nouveau_clock(object);
37 struct nv_control_pstate_info *args = data;
38
39 if (size < sizeof(*args))
40 return -EINVAL;
41
42 if (clk) {
43 args->count = clk->state_nr;
44 args->ustate = clk->ustate;
45 args->pstate = clk->pstate;
46 } else {
47 args->count = 0;
48 args->ustate = NV_CONTROL_PSTATE_INFO_USTATE_DISABLE;
49 args->pstate = NV_CONTROL_PSTATE_INFO_PSTATE_UNKNOWN;
50 }
51
52 return 0;
53}
54
55static int
56nouveau_control_mthd_pstate_attr(struct nouveau_object *object, u32 mthd,
57 void *data, u32 size)
58{
59 struct nouveau_clock *clk = nouveau_clock(object);
60 struct nv_control_pstate_attr *args = data;
61 struct nouveau_clocks *domain;
62 struct nouveau_pstate *pstate;
63 struct nouveau_cstate *cstate;
64 int i = 0, j = -1;
65 u32 lo, hi;
66
67 if ((size < sizeof(*args)) || !clk ||
68 (args->state >= 0 && args->state >= clk->state_nr))
69 return -EINVAL;
70 domain = clk->domains;
71
72 while (domain->name != nv_clk_src_max) {
73 if (domain->mname && ++j == args->index)
74 break;
75 domain++;
76 }
77
78 if (domain->name == nv_clk_src_max)
79 return -EINVAL;
80
81 if (args->state != NV_CONTROL_PSTATE_ATTR_STATE_CURRENT) {
82 list_for_each_entry(pstate, &clk->states, head) {
83 if (i++ == args->state)
84 break;
85 }
86
87 lo = pstate->base.domain[domain->name];
88 hi = lo;
89 list_for_each_entry(cstate, &pstate->list, head) {
90 lo = min(lo, cstate->domain[domain->name]);
91 hi = max(hi, cstate->domain[domain->name]);
92 }
93
94 args->state = pstate->pstate;
95 } else {
96 lo = max(clk->read(clk, domain->name), 0);
97 hi = lo;
98 }
99
100 snprintf(args->name, sizeof(args->name), "%s", domain->mname);
101 snprintf(args->unit, sizeof(args->unit), "MHz");
102 args->min = lo / domain->mdiv;
103 args->max = hi / domain->mdiv;
104
105 args->index = 0;
106 while ((++domain)->name != nv_clk_src_max) {
107 if (domain->mname) {
108 args->index = ++j;
109 break;
110 }
111 }
112
113 return 0;
114}
115
116static int
117nouveau_control_mthd_pstate_user(struct nouveau_object *object, u32 mthd,
118 void *data, u32 size)
119{
120 struct nouveau_clock *clk = nouveau_clock(object);
121 struct nv_control_pstate_user *args = data;
122
123 if (size < sizeof(*args) || !clk)
124 return -EINVAL;
125
126 return nouveau_clock_ustate(clk, args->state);
127}
128
129struct nouveau_oclass
130nouveau_control_oclass[] = {
131 { .handle = NV_CONTROL_CLASS,
132 .ofuncs = &nouveau_object_ofuncs,
133 .omthds = (struct nouveau_omthds[]) {
134 { NV_CONTROL_PSTATE_INFO,
135 NV_CONTROL_PSTATE_INFO, nouveau_control_mthd_pstate_info },
136 { NV_CONTROL_PSTATE_ATTR,
137 NV_CONTROL_PSTATE_ATTR, nouveau_control_mthd_pstate_attr },
138 { NV_CONTROL_PSTATE_USER,
139 NV_CONTROL_PSTATE_USER, nouveau_control_mthd_pstate_user },
140 {},
141 },
142 },
143 {}
144};
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv04.c b/drivers/gpu/drm/nouveau/core/engine/device/nv04.c
index a0284cf09c0f..dbd2dde7b7e7 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv04.c
@@ -50,15 +50,15 @@ nv04_identify(struct nouveau_device *device)
50 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; 50 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
51 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; 51 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
52 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv04_devinit_oclass; 52 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv04_devinit_oclass;
53 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 53 device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
54 device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass; 54 device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
55 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 55 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
56 device->oclass[NVDEV_SUBDEV_FB ] = &nv04_fb_oclass; 56 device->oclass[NVDEV_SUBDEV_FB ] = nv04_fb_oclass;
57 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; 57 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
58 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; 58 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
59 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 59 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
60 device->oclass[NVDEV_ENGINE_FIFO ] = &nv04_fifo_oclass; 60 device->oclass[NVDEV_ENGINE_FIFO ] = nv04_fifo_oclass;
61 device->oclass[NVDEV_ENGINE_SW ] = &nv04_software_oclass; 61 device->oclass[NVDEV_ENGINE_SW ] = nv04_software_oclass;
62 device->oclass[NVDEV_ENGINE_GR ] = &nv04_graph_oclass; 62 device->oclass[NVDEV_ENGINE_GR ] = &nv04_graph_oclass;
63 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; 63 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
64 break; 64 break;
@@ -68,15 +68,15 @@ nv04_identify(struct nouveau_device *device)
68 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; 68 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
69 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; 69 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
70 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv05_devinit_oclass; 70 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv05_devinit_oclass;
71 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 71 device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
72 device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass; 72 device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
73 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 73 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
74 device->oclass[NVDEV_SUBDEV_FB ] = &nv04_fb_oclass; 74 device->oclass[NVDEV_SUBDEV_FB ] = nv04_fb_oclass;
75 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; 75 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
76 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; 76 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
77 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 77 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
78 device->oclass[NVDEV_ENGINE_FIFO ] = &nv04_fifo_oclass; 78 device->oclass[NVDEV_ENGINE_FIFO ] = nv04_fifo_oclass;
79 device->oclass[NVDEV_ENGINE_SW ] = &nv04_software_oclass; 79 device->oclass[NVDEV_ENGINE_SW ] = nv04_software_oclass;
80 device->oclass[NVDEV_ENGINE_GR ] = &nv04_graph_oclass; 80 device->oclass[NVDEV_ENGINE_GR ] = &nv04_graph_oclass;
81 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; 81 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
82 break; 82 break;
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv10.c b/drivers/gpu/drm/nouveau/core/engine/device/nv10.c
index 1b7809a095c3..6e03dd6abeea 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nv10.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv10.c
@@ -52,10 +52,10 @@ nv10_identify(struct nouveau_device *device)
52 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; 52 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
53 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; 53 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
54 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass; 54 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
55 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 55 device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
56 device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass; 56 device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
57 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 57 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
58 device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass; 58 device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass;
59 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; 59 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
60 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; 60 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
61 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 61 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -69,15 +69,15 @@ nv10_identify(struct nouveau_device *device)
69 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; 69 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
70 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; 70 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
71 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass; 71 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
72 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 72 device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
73 device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass; 73 device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
74 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 74 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
75 device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass; 75 device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass;
76 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; 76 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
77 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; 77 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
78 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 78 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
79 device->oclass[NVDEV_ENGINE_FIFO ] = &nv10_fifo_oclass; 79 device->oclass[NVDEV_ENGINE_FIFO ] = nv10_fifo_oclass;
80 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass; 80 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
81 device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass; 81 device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass;
82 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; 82 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
83 break; 83 break;
@@ -88,15 +88,15 @@ nv10_identify(struct nouveau_device *device)
88 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; 88 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
89 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; 89 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
90 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass; 90 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
91 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 91 device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
92 device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass; 92 device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
93 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 93 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
94 device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass; 94 device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass;
95 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; 95 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
96 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; 96 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
97 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 97 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
98 device->oclass[NVDEV_ENGINE_FIFO ] = &nv10_fifo_oclass; 98 device->oclass[NVDEV_ENGINE_FIFO ] = nv10_fifo_oclass;
99 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass; 99 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
100 device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass; 100 device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass;
101 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; 101 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
102 break; 102 break;
@@ -107,15 +107,15 @@ nv10_identify(struct nouveau_device *device)
107 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; 107 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
108 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; 108 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
109 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; 109 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
110 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 110 device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
111 device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass; 111 device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
112 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 112 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
113 device->oclass[NVDEV_SUBDEV_FB ] = &nv1a_fb_oclass; 113 device->oclass[NVDEV_SUBDEV_FB ] = nv1a_fb_oclass;
114 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; 114 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
115 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; 115 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
116 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 116 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
117 device->oclass[NVDEV_ENGINE_FIFO ] = &nv10_fifo_oclass; 117 device->oclass[NVDEV_ENGINE_FIFO ] = nv10_fifo_oclass;
118 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass; 118 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
119 device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass; 119 device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass;
120 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; 120 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
121 break; 121 break;
@@ -126,15 +126,15 @@ nv10_identify(struct nouveau_device *device)
126 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; 126 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
127 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; 127 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
128 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass; 128 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
129 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 129 device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
130 device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass; 130 device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
131 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 131 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
132 device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass; 132 device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass;
133 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; 133 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
134 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; 134 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
135 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 135 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
136 device->oclass[NVDEV_ENGINE_FIFO ] = &nv10_fifo_oclass; 136 device->oclass[NVDEV_ENGINE_FIFO ] = nv10_fifo_oclass;
137 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass; 137 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
138 device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass; 138 device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass;
139 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; 139 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
140 break; 140 break;
@@ -145,15 +145,15 @@ nv10_identify(struct nouveau_device *device)
145 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; 145 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
146 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; 146 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
147 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass; 147 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
148 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 148 device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
149 device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass; 149 device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
150 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 150 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
151 device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass; 151 device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass;
152 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; 152 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
153 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; 153 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
154 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 154 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
155 device->oclass[NVDEV_ENGINE_FIFO ] = &nv17_fifo_oclass; 155 device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
156 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass; 156 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
157 device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass; 157 device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass;
158 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; 158 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
159 break; 159 break;
@@ -164,15 +164,15 @@ nv10_identify(struct nouveau_device *device)
164 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; 164 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
165 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; 165 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
166 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; 166 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
167 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 167 device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
168 device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass; 168 device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
169 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 169 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
170 device->oclass[NVDEV_SUBDEV_FB ] = &nv1a_fb_oclass; 170 device->oclass[NVDEV_SUBDEV_FB ] = nv1a_fb_oclass;
171 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; 171 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
172 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; 172 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
173 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 173 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
174 device->oclass[NVDEV_ENGINE_FIFO ] = &nv17_fifo_oclass; 174 device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
175 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass; 175 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
176 device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass; 176 device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass;
177 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; 177 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
178 break; 178 break;
@@ -183,15 +183,15 @@ nv10_identify(struct nouveau_device *device)
183 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; 183 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
184 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; 184 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
185 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass; 185 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
186 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 186 device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
187 device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass; 187 device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
188 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 188 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
189 device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass; 189 device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass;
190 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; 190 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
191 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; 191 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
192 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 192 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
193 device->oclass[NVDEV_ENGINE_FIFO ] = &nv17_fifo_oclass; 193 device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
194 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass; 194 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
195 device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass; 195 device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass;
196 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; 196 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
197 break; 197 break;
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv20.c b/drivers/gpu/drm/nouveau/core/engine/device/nv20.c
index 12a4005fa619..dcde53b9f07f 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nv20.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv20.c
@@ -53,15 +53,15 @@ nv20_identify(struct nouveau_device *device)
53 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; 53 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
54 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; 54 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
55 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass; 55 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
56 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 56 device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
57 device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass; 57 device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
58 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 58 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
59 device->oclass[NVDEV_SUBDEV_FB ] = &nv20_fb_oclass; 59 device->oclass[NVDEV_SUBDEV_FB ] = nv20_fb_oclass;
60 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; 60 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
61 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; 61 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
62 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 62 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
63 device->oclass[NVDEV_ENGINE_FIFO ] = &nv17_fifo_oclass; 63 device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
64 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass; 64 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
65 device->oclass[NVDEV_ENGINE_GR ] = &nv20_graph_oclass; 65 device->oclass[NVDEV_ENGINE_GR ] = &nv20_graph_oclass;
66 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; 66 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
67 break; 67 break;
@@ -72,15 +72,15 @@ nv20_identify(struct nouveau_device *device)
72 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; 72 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
73 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; 73 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
74 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass; 74 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
75 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 75 device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
76 device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass; 76 device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
77 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 77 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
78 device->oclass[NVDEV_SUBDEV_FB ] = &nv25_fb_oclass; 78 device->oclass[NVDEV_SUBDEV_FB ] = nv25_fb_oclass;
79 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; 79 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
80 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; 80 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
81 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 81 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
82 device->oclass[NVDEV_ENGINE_FIFO ] = &nv17_fifo_oclass; 82 device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
83 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass; 83 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
84 device->oclass[NVDEV_ENGINE_GR ] = &nv25_graph_oclass; 84 device->oclass[NVDEV_ENGINE_GR ] = &nv25_graph_oclass;
85 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; 85 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
86 break; 86 break;
@@ -91,15 +91,15 @@ nv20_identify(struct nouveau_device *device)
91 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; 91 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
92 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; 92 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
93 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass; 93 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
94 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 94 device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
95 device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass; 95 device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
96 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 96 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
97 device->oclass[NVDEV_SUBDEV_FB ] = &nv25_fb_oclass; 97 device->oclass[NVDEV_SUBDEV_FB ] = nv25_fb_oclass;
98 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; 98 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
99 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; 99 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
100 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 100 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
101 device->oclass[NVDEV_ENGINE_FIFO ] = &nv17_fifo_oclass; 101 device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
102 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass; 102 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
103 device->oclass[NVDEV_ENGINE_GR ] = &nv25_graph_oclass; 103 device->oclass[NVDEV_ENGINE_GR ] = &nv25_graph_oclass;
104 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; 104 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
105 break; 105 break;
@@ -110,15 +110,15 @@ nv20_identify(struct nouveau_device *device)
110 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; 110 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
111 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; 111 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
112 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass; 112 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
113 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 113 device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
114 device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass; 114 device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
115 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 115 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
116 device->oclass[NVDEV_SUBDEV_FB ] = &nv25_fb_oclass; 116 device->oclass[NVDEV_SUBDEV_FB ] = nv25_fb_oclass;
117 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; 117 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
118 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; 118 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
119 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 119 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
120 device->oclass[NVDEV_ENGINE_FIFO ] = &nv17_fifo_oclass; 120 device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
121 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass; 121 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
122 device->oclass[NVDEV_ENGINE_GR ] = &nv2a_graph_oclass; 122 device->oclass[NVDEV_ENGINE_GR ] = &nv2a_graph_oclass;
123 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; 123 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
124 break; 124 break;
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv30.c b/drivers/gpu/drm/nouveau/core/engine/device/nv30.c
index cef0f1ea4c21..7b8662ef4f59 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nv30.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv30.c
@@ -53,15 +53,15 @@ nv30_identify(struct nouveau_device *device)
53 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; 53 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
54 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; 54 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
55 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass; 55 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
56 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 56 device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
57 device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass; 57 device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
58 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 58 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
59 device->oclass[NVDEV_SUBDEV_FB ] = &nv30_fb_oclass; 59 device->oclass[NVDEV_SUBDEV_FB ] = nv30_fb_oclass;
60 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; 60 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
61 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; 61 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
62 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 62 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
63 device->oclass[NVDEV_ENGINE_FIFO ] = &nv17_fifo_oclass; 63 device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
64 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass; 64 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
65 device->oclass[NVDEV_ENGINE_GR ] = &nv30_graph_oclass; 65 device->oclass[NVDEV_ENGINE_GR ] = &nv30_graph_oclass;
66 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; 66 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
67 break; 67 break;
@@ -72,15 +72,15 @@ nv30_identify(struct nouveau_device *device)
72 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; 72 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
73 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; 73 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
74 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass; 74 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
75 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 75 device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
76 device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass; 76 device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
77 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 77 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
78 device->oclass[NVDEV_SUBDEV_FB ] = &nv35_fb_oclass; 78 device->oclass[NVDEV_SUBDEV_FB ] = nv35_fb_oclass;
79 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; 79 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
80 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; 80 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
81 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 81 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
82 device->oclass[NVDEV_ENGINE_FIFO ] = &nv17_fifo_oclass; 82 device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
83 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass; 83 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
84 device->oclass[NVDEV_ENGINE_GR ] = &nv35_graph_oclass; 84 device->oclass[NVDEV_ENGINE_GR ] = &nv35_graph_oclass;
85 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; 85 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
86 break; 86 break;
@@ -91,15 +91,15 @@ nv30_identify(struct nouveau_device *device)
91 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; 91 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
92 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; 92 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
93 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass; 93 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
94 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 94 device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
95 device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass; 95 device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
96 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 96 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
97 device->oclass[NVDEV_SUBDEV_FB ] = &nv30_fb_oclass; 97 device->oclass[NVDEV_SUBDEV_FB ] = nv30_fb_oclass;
98 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; 98 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
99 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; 99 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
100 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 100 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
101 device->oclass[NVDEV_ENGINE_FIFO ] = &nv17_fifo_oclass; 101 device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
102 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass; 102 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
103 device->oclass[NVDEV_ENGINE_GR ] = &nv30_graph_oclass; 103 device->oclass[NVDEV_ENGINE_GR ] = &nv30_graph_oclass;
104 device->oclass[NVDEV_ENGINE_MPEG ] = &nv31_mpeg_oclass; 104 device->oclass[NVDEV_ENGINE_MPEG ] = &nv31_mpeg_oclass;
105 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; 105 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
@@ -111,15 +111,15 @@ nv30_identify(struct nouveau_device *device)
111 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; 111 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
112 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; 112 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
113 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass; 113 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
114 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 114 device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
115 device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass; 115 device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
116 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 116 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
117 device->oclass[NVDEV_SUBDEV_FB ] = &nv36_fb_oclass; 117 device->oclass[NVDEV_SUBDEV_FB ] = nv36_fb_oclass;
118 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; 118 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
119 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; 119 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
120 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 120 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
121 device->oclass[NVDEV_ENGINE_FIFO ] = &nv17_fifo_oclass; 121 device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
122 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass; 122 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
123 device->oclass[NVDEV_ENGINE_GR ] = &nv35_graph_oclass; 123 device->oclass[NVDEV_ENGINE_GR ] = &nv35_graph_oclass;
124 device->oclass[NVDEV_ENGINE_MPEG ] = &nv31_mpeg_oclass; 124 device->oclass[NVDEV_ENGINE_MPEG ] = &nv31_mpeg_oclass;
125 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; 125 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
@@ -131,15 +131,15 @@ nv30_identify(struct nouveau_device *device)
131 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; 131 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
132 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; 132 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
133 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass; 133 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
134 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 134 device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
135 device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass; 135 device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
136 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 136 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
137 device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass; 137 device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass;
138 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; 138 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
139 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; 139 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
140 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 140 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
141 device->oclass[NVDEV_ENGINE_FIFO ] = &nv17_fifo_oclass; 141 device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
142 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass; 142 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
143 device->oclass[NVDEV_ENGINE_GR ] = &nv34_graph_oclass; 143 device->oclass[NVDEV_ENGINE_GR ] = &nv34_graph_oclass;
144 device->oclass[NVDEV_ENGINE_MPEG ] = &nv31_mpeg_oclass; 144 device->oclass[NVDEV_ENGINE_MPEG ] = &nv31_mpeg_oclass;
145 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; 145 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv40.c b/drivers/gpu/drm/nouveau/core/engine/device/nv40.c
index 1719cb0ee595..c8c41e93695e 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv40.c
@@ -35,6 +35,7 @@
35#include <subdev/fb.h> 35#include <subdev/fb.h>
36#include <subdev/instmem.h> 36#include <subdev/instmem.h>
37#include <subdev/vm.h> 37#include <subdev/vm.h>
38#include <subdev/volt.h>
38 39
39#include <engine/device.h> 40#include <engine/device.h>
40#include <engine/dmaobj.h> 41#include <engine/dmaobj.h>
@@ -43,6 +44,7 @@
43#include <engine/graph.h> 44#include <engine/graph.h>
44#include <engine/mpeg.h> 45#include <engine/mpeg.h>
45#include <engine/disp.h> 46#include <engine/disp.h>
47#include <engine/perfmon.h>
46 48
47int 49int
48nv40_identify(struct nouveau_device *device) 50nv40_identify(struct nouveau_device *device)
@@ -56,18 +58,20 @@ nv40_identify(struct nouveau_device *device)
56 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; 58 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
57 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; 59 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
58 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; 60 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
59 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 61 device->oclass[NVDEV_SUBDEV_MC ] = nv40_mc_oclass;
60 device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass; 62 device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
61 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 63 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
62 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass; 64 device->oclass[NVDEV_SUBDEV_FB ] = nv40_fb_oclass;
63 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; 65 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
64 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; 66 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
67 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
65 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 68 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
66 device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass; 69 device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
67 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass; 70 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
68 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; 71 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
69 device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass; 72 device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
70 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; 73 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
74 device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass;
71 break; 75 break;
72 case 0x41: 76 case 0x41:
73 device->cname = "NV41"; 77 device->cname = "NV41";
@@ -77,18 +81,20 @@ nv40_identify(struct nouveau_device *device)
77 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; 81 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
78 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; 82 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
79 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; 83 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
80 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 84 device->oclass[NVDEV_SUBDEV_MC ] = nv40_mc_oclass;
81 device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass; 85 device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
82 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 86 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
83 device->oclass[NVDEV_SUBDEV_FB ] = &nv41_fb_oclass; 87 device->oclass[NVDEV_SUBDEV_FB ] = nv41_fb_oclass;
84 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; 88 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
85 device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass; 89 device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass;
90 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
86 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 91 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
87 device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass; 92 device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
88 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass; 93 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
89 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; 94 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
90 device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass; 95 device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
91 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; 96 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
97 device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass;
92 break; 98 break;
93 case 0x42: 99 case 0x42:
94 device->cname = "NV42"; 100 device->cname = "NV42";
@@ -98,18 +104,20 @@ nv40_identify(struct nouveau_device *device)
98 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; 104 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
99 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; 105 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
100 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; 106 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
101 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 107 device->oclass[NVDEV_SUBDEV_MC ] = nv40_mc_oclass;
102 device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass; 108 device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
103 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 109 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
104 device->oclass[NVDEV_SUBDEV_FB ] = &nv41_fb_oclass; 110 device->oclass[NVDEV_SUBDEV_FB ] = nv41_fb_oclass;
105 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; 111 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
106 device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass; 112 device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass;
113 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
107 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 114 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
108 device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass; 115 device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
109 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass; 116 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
110 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; 117 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
111 device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass; 118 device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
112 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; 119 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
120 device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass;
113 break; 121 break;
114 case 0x43: 122 case 0x43:
115 device->cname = "NV43"; 123 device->cname = "NV43";
@@ -119,18 +127,20 @@ nv40_identify(struct nouveau_device *device)
119 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; 127 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
120 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; 128 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
121 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; 129 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
122 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 130 device->oclass[NVDEV_SUBDEV_MC ] = nv40_mc_oclass;
123 device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass; 131 device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
124 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 132 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
125 device->oclass[NVDEV_SUBDEV_FB ] = &nv41_fb_oclass; 133 device->oclass[NVDEV_SUBDEV_FB ] = nv41_fb_oclass;
126 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; 134 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
127 device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass; 135 device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass;
136 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
128 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 137 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
129 device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass; 138 device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
130 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass; 139 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
131 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; 140 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
132 device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass; 141 device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
133 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; 142 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
143 device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass;
134 break; 144 break;
135 case 0x45: 145 case 0x45:
136 device->cname = "NV45"; 146 device->cname = "NV45";
@@ -140,18 +150,20 @@ nv40_identify(struct nouveau_device *device)
140 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; 150 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
141 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; 151 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
142 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; 152 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
143 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 153 device->oclass[NVDEV_SUBDEV_MC ] = nv40_mc_oclass;
144 device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass; 154 device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
145 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 155 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
146 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass; 156 device->oclass[NVDEV_SUBDEV_FB ] = nv40_fb_oclass;
147 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; 157 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
148 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; 158 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
159 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
149 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 160 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
150 device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass; 161 device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
151 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass; 162 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
152 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; 163 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
153 device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass; 164 device->oclass[NVDEV_ENGINE_MPEG ] = &nv44_mpeg_oclass;
154 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; 165 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
166 device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass;
155 break; 167 break;
156 case 0x47: 168 case 0x47:
157 device->cname = "G70"; 169 device->cname = "G70";
@@ -161,18 +173,20 @@ nv40_identify(struct nouveau_device *device)
161 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; 173 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
162 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; 174 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
163 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; 175 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
164 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 176 device->oclass[NVDEV_SUBDEV_MC ] = nv40_mc_oclass;
165 device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass; 177 device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
166 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 178 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
167 device->oclass[NVDEV_SUBDEV_FB ] = &nv47_fb_oclass; 179 device->oclass[NVDEV_SUBDEV_FB ] = nv47_fb_oclass;
168 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; 180 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
169 device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass; 181 device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass;
182 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
170 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 183 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
171 device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass; 184 device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
172 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass; 185 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
173 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; 186 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
174 device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass; 187 device->oclass[NVDEV_ENGINE_MPEG ] = &nv44_mpeg_oclass;
175 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; 188 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
189 device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass;
176 break; 190 break;
177 case 0x49: 191 case 0x49:
178 device->cname = "G71"; 192 device->cname = "G71";
@@ -182,18 +196,20 @@ nv40_identify(struct nouveau_device *device)
182 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; 196 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
183 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; 197 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
184 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; 198 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
185 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 199 device->oclass[NVDEV_SUBDEV_MC ] = nv40_mc_oclass;
186 device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass; 200 device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
187 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 201 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
188 device->oclass[NVDEV_SUBDEV_FB ] = &nv49_fb_oclass; 202 device->oclass[NVDEV_SUBDEV_FB ] = nv49_fb_oclass;
189 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; 203 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
190 device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass; 204 device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass;
205 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
191 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 206 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
192 device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass; 207 device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
193 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass; 208 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
194 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; 209 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
195 device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass; 210 device->oclass[NVDEV_ENGINE_MPEG ] = &nv44_mpeg_oclass;
196 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; 211 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
212 device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass;
197 break; 213 break;
198 case 0x4b: 214 case 0x4b:
199 device->cname = "G73"; 215 device->cname = "G73";
@@ -203,18 +219,20 @@ nv40_identify(struct nouveau_device *device)
203 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; 219 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
204 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; 220 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
205 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; 221 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
206 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 222 device->oclass[NVDEV_SUBDEV_MC ] = nv40_mc_oclass;
207 device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass; 223 device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
208 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 224 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
209 device->oclass[NVDEV_SUBDEV_FB ] = &nv49_fb_oclass; 225 device->oclass[NVDEV_SUBDEV_FB ] = nv49_fb_oclass;
210 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; 226 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
211 device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass; 227 device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass;
228 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
212 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 229 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
213 device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass; 230 device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
214 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass; 231 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
215 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; 232 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
216 device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass; 233 device->oclass[NVDEV_ENGINE_MPEG ] = &nv44_mpeg_oclass;
217 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; 234 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
235 device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass;
218 break; 236 break;
219 case 0x44: 237 case 0x44:
220 device->cname = "NV44"; 238 device->cname = "NV44";
@@ -224,18 +242,20 @@ nv40_identify(struct nouveau_device *device)
224 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; 242 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
225 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; 243 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
226 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; 244 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
227 device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass; 245 device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass;
228 device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass; 246 device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
229 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 247 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
230 device->oclass[NVDEV_SUBDEV_FB ] = &nv44_fb_oclass; 248 device->oclass[NVDEV_SUBDEV_FB ] = nv44_fb_oclass;
231 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; 249 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
232 device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass; 250 device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
251 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
233 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 252 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
234 device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass; 253 device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
235 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass; 254 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
236 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; 255 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
237 device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass; 256 device->oclass[NVDEV_ENGINE_MPEG ] = &nv44_mpeg_oclass;
238 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; 257 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
258 device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass;
239 break; 259 break;
240 case 0x46: 260 case 0x46:
241 device->cname = "G72"; 261 device->cname = "G72";
@@ -245,18 +265,20 @@ nv40_identify(struct nouveau_device *device)
245 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; 265 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
246 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; 266 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
247 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; 267 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
248 device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass; 268 device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass;
249 device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass; 269 device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
250 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 270 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
251 device->oclass[NVDEV_SUBDEV_FB ] = &nv46_fb_oclass; 271 device->oclass[NVDEV_SUBDEV_FB ] = nv46_fb_oclass;
252 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; 272 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
253 device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass; 273 device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
274 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
254 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 275 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
255 device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass; 276 device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
256 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass; 277 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
257 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; 278 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
258 device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass; 279 device->oclass[NVDEV_ENGINE_MPEG ] = &nv44_mpeg_oclass;
259 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; 280 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
281 device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass;
260 break; 282 break;
261 case 0x4a: 283 case 0x4a:
262 device->cname = "NV44A"; 284 device->cname = "NV44A";
@@ -266,18 +288,20 @@ nv40_identify(struct nouveau_device *device)
266 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; 288 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
267 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; 289 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
268 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; 290 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
269 device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass; 291 device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass;
270 device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass; 292 device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
271 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 293 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
272 device->oclass[NVDEV_SUBDEV_FB ] = &nv44_fb_oclass; 294 device->oclass[NVDEV_SUBDEV_FB ] = nv44_fb_oclass;
273 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; 295 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
274 device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass; 296 device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
297 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
275 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 298 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
276 device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass; 299 device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
277 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass; 300 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
278 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; 301 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
279 device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass; 302 device->oclass[NVDEV_ENGINE_MPEG ] = &nv44_mpeg_oclass;
280 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; 303 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
304 device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass;
281 break; 305 break;
282 case 0x4c: 306 case 0x4c:
283 device->cname = "C61"; 307 device->cname = "C61";
@@ -287,18 +311,20 @@ nv40_identify(struct nouveau_device *device)
287 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; 311 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
288 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; 312 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
289 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; 313 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
290 device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass; 314 device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass;
291 device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass; 315 device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
292 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 316 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
293 device->oclass[NVDEV_SUBDEV_FB ] = &nv46_fb_oclass; 317 device->oclass[NVDEV_SUBDEV_FB ] = nv46_fb_oclass;
294 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; 318 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
295 device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass; 319 device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
320 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
296 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 321 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
297 device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass; 322 device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
298 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass; 323 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
299 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; 324 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
300 device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass; 325 device->oclass[NVDEV_ENGINE_MPEG ] = &nv44_mpeg_oclass;
301 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; 326 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
327 device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass;
302 break; 328 break;
303 case 0x4e: 329 case 0x4e:
304 device->cname = "C51"; 330 device->cname = "C51";
@@ -308,18 +334,20 @@ nv40_identify(struct nouveau_device *device)
308 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; 334 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
309 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; 335 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
310 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; 336 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
311 device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass; 337 device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass;
312 device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass; 338 device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
313 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 339 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
314 device->oclass[NVDEV_SUBDEV_FB ] = &nv4e_fb_oclass; 340 device->oclass[NVDEV_SUBDEV_FB ] = nv4e_fb_oclass;
315 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; 341 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
316 device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass; 342 device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
343 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
317 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 344 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
318 device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass; 345 device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
319 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass; 346 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
320 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; 347 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
321 device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass; 348 device->oclass[NVDEV_ENGINE_MPEG ] = &nv44_mpeg_oclass;
322 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; 349 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
350 device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass;
323 break; 351 break;
324 case 0x63: 352 case 0x63:
325 device->cname = "C73"; 353 device->cname = "C73";
@@ -329,18 +357,20 @@ nv40_identify(struct nouveau_device *device)
329 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; 357 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
330 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; 358 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
331 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; 359 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
332 device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass; 360 device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass;
333 device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass; 361 device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
334 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 362 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
335 device->oclass[NVDEV_SUBDEV_FB ] = &nv46_fb_oclass; 363 device->oclass[NVDEV_SUBDEV_FB ] = nv46_fb_oclass;
336 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; 364 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
337 device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass; 365 device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
366 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
338 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 367 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
339 device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass; 368 device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
340 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass; 369 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
341 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; 370 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
342 device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass; 371 device->oclass[NVDEV_ENGINE_MPEG ] = &nv44_mpeg_oclass;
343 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; 372 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
373 device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass;
344 break; 374 break;
345 case 0x67: 375 case 0x67:
346 device->cname = "C67"; 376 device->cname = "C67";
@@ -350,18 +380,20 @@ nv40_identify(struct nouveau_device *device)
350 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; 380 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
351 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; 381 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
352 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; 382 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
353 device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass; 383 device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass;
354 device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass; 384 device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
355 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 385 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
356 device->oclass[NVDEV_SUBDEV_FB ] = &nv46_fb_oclass; 386 device->oclass[NVDEV_SUBDEV_FB ] = nv46_fb_oclass;
357 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; 387 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
358 device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass; 388 device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
389 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
359 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 390 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
360 device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass; 391 device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
361 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass; 392 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
362 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; 393 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
363 device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass; 394 device->oclass[NVDEV_ENGINE_MPEG ] = &nv44_mpeg_oclass;
364 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; 395 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
396 device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass;
365 break; 397 break;
366 case 0x68: 398 case 0x68:
367 device->cname = "C68"; 399 device->cname = "C68";
@@ -371,18 +403,20 @@ nv40_identify(struct nouveau_device *device)
371 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; 403 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
372 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; 404 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
373 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; 405 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
374 device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass; 406 device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass;
375 device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass; 407 device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
376 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 408 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
377 device->oclass[NVDEV_SUBDEV_FB ] = &nv46_fb_oclass; 409 device->oclass[NVDEV_SUBDEV_FB ] = nv46_fb_oclass;
378 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; 410 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
379 device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass; 411 device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
412 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
380 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 413 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
381 device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass; 414 device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
382 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass; 415 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
383 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; 416 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
384 device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass; 417 device->oclass[NVDEV_ENGINE_MPEG ] = &nv44_mpeg_oclass;
385 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; 418 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
419 device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass;
386 break; 420 break;
387 default: 421 default:
388 nv_fatal(device, "unknown Curie chipset\n"); 422 nv_fatal(device, "unknown Curie chipset\n");
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv50.c b/drivers/gpu/drm/nouveau/core/engine/device/nv50.c
index ffc18b80c5d9..db139827047c 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv50.c
@@ -36,6 +36,8 @@
36#include <subdev/instmem.h> 36#include <subdev/instmem.h>
37#include <subdev/vm.h> 37#include <subdev/vm.h>
38#include <subdev/bar.h> 38#include <subdev/bar.h>
39#include <subdev/pwr.h>
40#include <subdev/volt.h>
39 41
40#include <engine/device.h> 42#include <engine/device.h>
41#include <engine/dmaobj.h> 43#include <engine/dmaobj.h>
@@ -49,6 +51,7 @@
49#include <engine/ppp.h> 51#include <engine/ppp.h>
50#include <engine/copy.h> 52#include <engine/copy.h>
51#include <engine/disp.h> 53#include <engine/disp.h>
54#include <engine/perfmon.h>
52 55
53int 56int
54nv50_identify(struct nouveau_device *device) 57nv50_identify(struct nouveau_device *device)
@@ -59,257 +62,277 @@ nv50_identify(struct nouveau_device *device)
59 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 62 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
60 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass; 63 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
61 device->oclass[NVDEV_SUBDEV_I2C ] = &nv50_i2c_oclass; 64 device->oclass[NVDEV_SUBDEV_I2C ] = &nv50_i2c_oclass;
62 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass; 65 device->oclass[NVDEV_SUBDEV_CLOCK ] = nv50_clock_oclass;
63 device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass; 66 device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
64 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 67 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
65 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; 68 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
66 device->oclass[NVDEV_SUBDEV_MC ] = &nv50_mc_oclass; 69 device->oclass[NVDEV_SUBDEV_MC ] = nv50_mc_oclass;
67 device->oclass[NVDEV_SUBDEV_BUS ] = &nv50_bus_oclass; 70 device->oclass[NVDEV_SUBDEV_BUS ] = nv50_bus_oclass;
68 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 71 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
69 device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass; 72 device->oclass[NVDEV_SUBDEV_FB ] = nv50_fb_oclass;
70 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; 73 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
71 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; 74 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
72 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; 75 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
76 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
73 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass; 77 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
74 device->oclass[NVDEV_ENGINE_FIFO ] = &nv50_fifo_oclass; 78 device->oclass[NVDEV_ENGINE_FIFO ] = nv50_fifo_oclass;
75 device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass; 79 device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass;
76 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass; 80 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
77 device->oclass[NVDEV_ENGINE_MPEG ] = &nv50_mpeg_oclass; 81 device->oclass[NVDEV_ENGINE_MPEG ] = &nv50_mpeg_oclass;
78 device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass; 82 device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
83 device->oclass[NVDEV_ENGINE_PERFMON] = nv50_perfmon_oclass;
79 break; 84 break;
80 case 0x84: 85 case 0x84:
81 device->cname = "G84"; 86 device->cname = "G84";
82 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 87 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
83 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass; 88 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
84 device->oclass[NVDEV_SUBDEV_I2C ] = &nv50_i2c_oclass; 89 device->oclass[NVDEV_SUBDEV_I2C ] = &nv50_i2c_oclass;
85 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass; 90 device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass;
86 device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass; 91 device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
87 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 92 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
88 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; 93 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
89 device->oclass[NVDEV_SUBDEV_MC ] = &nv50_mc_oclass; 94 device->oclass[NVDEV_SUBDEV_MC ] = nv50_mc_oclass;
90 device->oclass[NVDEV_SUBDEV_BUS ] = &nv50_bus_oclass; 95 device->oclass[NVDEV_SUBDEV_BUS ] = nv50_bus_oclass;
91 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 96 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
92 device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass; 97 device->oclass[NVDEV_SUBDEV_FB ] = nv84_fb_oclass;
93 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; 98 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
94 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; 99 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
95 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; 100 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
101 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
96 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass; 102 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
97 device->oclass[NVDEV_ENGINE_FIFO ] = &nv84_fifo_oclass; 103 device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass;
98 device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass; 104 device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass;
99 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass; 105 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
100 device->oclass[NVDEV_ENGINE_MPEG ] = &nv84_mpeg_oclass; 106 device->oclass[NVDEV_ENGINE_MPEG ] = &nv84_mpeg_oclass;
101 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass; 107 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
102 device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass; 108 device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass;
103 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass; 109 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
104 device->oclass[NVDEV_ENGINE_DISP ] = &nv84_disp_oclass; 110 device->oclass[NVDEV_ENGINE_DISP ] = &nv84_disp_oclass;
111 device->oclass[NVDEV_ENGINE_PERFMON] = nv84_perfmon_oclass;
105 break; 112 break;
106 case 0x86: 113 case 0x86:
107 device->cname = "G86"; 114 device->cname = "G86";
108 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 115 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
109 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass; 116 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
110 device->oclass[NVDEV_SUBDEV_I2C ] = &nv50_i2c_oclass; 117 device->oclass[NVDEV_SUBDEV_I2C ] = &nv50_i2c_oclass;
111 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass; 118 device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass;
112 device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass; 119 device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
113 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 120 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
114 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; 121 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
115 device->oclass[NVDEV_SUBDEV_MC ] = &nv50_mc_oclass; 122 device->oclass[NVDEV_SUBDEV_MC ] = nv50_mc_oclass;
116 device->oclass[NVDEV_SUBDEV_BUS ] = &nv50_bus_oclass; 123 device->oclass[NVDEV_SUBDEV_BUS ] = nv50_bus_oclass;
117 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 124 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
118 device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass; 125 device->oclass[NVDEV_SUBDEV_FB ] = nv84_fb_oclass;
119 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; 126 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
120 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; 127 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
121 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; 128 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
129 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
122 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass; 130 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
123 device->oclass[NVDEV_ENGINE_FIFO ] = &nv84_fifo_oclass; 131 device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass;
124 device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass; 132 device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass;
125 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass; 133 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
126 device->oclass[NVDEV_ENGINE_MPEG ] = &nv84_mpeg_oclass; 134 device->oclass[NVDEV_ENGINE_MPEG ] = &nv84_mpeg_oclass;
127 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass; 135 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
128 device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass; 136 device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass;
129 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass; 137 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
130 device->oclass[NVDEV_ENGINE_DISP ] = &nv84_disp_oclass; 138 device->oclass[NVDEV_ENGINE_DISP ] = &nv84_disp_oclass;
139 device->oclass[NVDEV_ENGINE_PERFMON] = nv84_perfmon_oclass;
131 break; 140 break;
132 case 0x92: 141 case 0x92:
133 device->cname = "G92"; 142 device->cname = "G92";
134 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 143 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
135 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass; 144 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
136 device->oclass[NVDEV_SUBDEV_I2C ] = &nv50_i2c_oclass; 145 device->oclass[NVDEV_SUBDEV_I2C ] = &nv50_i2c_oclass;
137 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass; 146 device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass;
138 device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass; 147 device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
139 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 148 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
140 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; 149 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
141 device->oclass[NVDEV_SUBDEV_MC ] = &nv50_mc_oclass; 150 device->oclass[NVDEV_SUBDEV_MC ] = nv50_mc_oclass;
142 device->oclass[NVDEV_SUBDEV_BUS ] = &nv50_bus_oclass; 151 device->oclass[NVDEV_SUBDEV_BUS ] = nv50_bus_oclass;
143 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 152 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
144 device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass; 153 device->oclass[NVDEV_SUBDEV_FB ] = nv84_fb_oclass;
145 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; 154 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
146 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; 155 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
147 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; 156 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
157 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
148 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass; 158 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
149 device->oclass[NVDEV_ENGINE_FIFO ] = &nv84_fifo_oclass; 159 device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass;
150 device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass; 160 device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass;
151 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass; 161 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
152 device->oclass[NVDEV_ENGINE_MPEG ] = &nv84_mpeg_oclass; 162 device->oclass[NVDEV_ENGINE_MPEG ] = &nv84_mpeg_oclass;
153 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass; 163 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
154 device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass; 164 device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass;
155 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass; 165 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
156 device->oclass[NVDEV_ENGINE_DISP ] = &nv84_disp_oclass; 166 device->oclass[NVDEV_ENGINE_DISP ] = &nv84_disp_oclass;
167 device->oclass[NVDEV_ENGINE_PERFMON] = nv84_perfmon_oclass;
157 break; 168 break;
158 case 0x94: 169 case 0x94:
159 device->cname = "G94"; 170 device->cname = "G94";
160 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 171 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
161 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass; 172 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
162 device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass; 173 device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
163 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass; 174 device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass;
164 device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass; 175 device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
165 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 176 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
166 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; 177 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
167 device->oclass[NVDEV_SUBDEV_MC ] = &nv50_mc_oclass; 178 device->oclass[NVDEV_SUBDEV_MC ] = nv94_mc_oclass;
168 device->oclass[NVDEV_SUBDEV_BUS ] = &nv50_bus_oclass; 179 device->oclass[NVDEV_SUBDEV_BUS ] = nv94_bus_oclass;
169 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 180 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
170 device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass; 181 device->oclass[NVDEV_SUBDEV_FB ] = nv84_fb_oclass;
171 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; 182 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
172 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; 183 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
173 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; 184 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
185 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
174 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass; 186 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
175 device->oclass[NVDEV_ENGINE_FIFO ] = &nv84_fifo_oclass; 187 device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass;
176 device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass; 188 device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass;
177 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass; 189 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
178 device->oclass[NVDEV_ENGINE_MPEG ] = &nv84_mpeg_oclass; 190 device->oclass[NVDEV_ENGINE_MPEG ] = &nv84_mpeg_oclass;
179 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass; 191 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
180 device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass; 192 device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass;
181 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass; 193 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
182 device->oclass[NVDEV_ENGINE_DISP ] = &nv94_disp_oclass; 194 device->oclass[NVDEV_ENGINE_DISP ] = &nv94_disp_oclass;
195 device->oclass[NVDEV_ENGINE_PERFMON] = nv84_perfmon_oclass;
183 break; 196 break;
184 case 0x96: 197 case 0x96:
185 device->cname = "G96"; 198 device->cname = "G96";
186 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 199 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
187 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass; 200 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
188 device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass; 201 device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
189 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass; 202 device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass;
190 device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass; 203 device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
191 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 204 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
192 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; 205 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
193 device->oclass[NVDEV_SUBDEV_MC ] = &nv50_mc_oclass; 206 device->oclass[NVDEV_SUBDEV_MC ] = nv94_mc_oclass;
194 device->oclass[NVDEV_SUBDEV_BUS ] = &nv50_bus_oclass; 207 device->oclass[NVDEV_SUBDEV_BUS ] = nv94_bus_oclass;
195 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 208 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
196 device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass; 209 device->oclass[NVDEV_SUBDEV_FB ] = nv84_fb_oclass;
197 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; 210 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
198 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; 211 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
199 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; 212 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
213 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
200 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass; 214 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
201 device->oclass[NVDEV_ENGINE_FIFO ] = &nv84_fifo_oclass; 215 device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass;
202 device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass; 216 device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass;
203 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass; 217 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
204 device->oclass[NVDEV_ENGINE_MPEG ] = &nv84_mpeg_oclass; 218 device->oclass[NVDEV_ENGINE_MPEG ] = &nv84_mpeg_oclass;
205 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass; 219 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
206 device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass; 220 device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass;
207 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass; 221 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
208 device->oclass[NVDEV_ENGINE_DISP ] = &nv94_disp_oclass; 222 device->oclass[NVDEV_ENGINE_DISP ] = &nv94_disp_oclass;
223 device->oclass[NVDEV_ENGINE_PERFMON] = nv84_perfmon_oclass;
209 break; 224 break;
210 case 0x98: 225 case 0x98:
211 device->cname = "G98"; 226 device->cname = "G98";
212 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 227 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
213 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass; 228 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
214 device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass; 229 device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
215 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass; 230 device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass;
216 device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass; 231 device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
217 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 232 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
218 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; 233 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
219 device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass; 234 device->oclass[NVDEV_SUBDEV_MC ] = nv98_mc_oclass;
220 device->oclass[NVDEV_SUBDEV_BUS ] = &nv50_bus_oclass; 235 device->oclass[NVDEV_SUBDEV_BUS ] = nv94_bus_oclass;
221 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 236 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
222 device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass; 237 device->oclass[NVDEV_SUBDEV_FB ] = nv84_fb_oclass;
223 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; 238 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
224 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; 239 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
225 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; 240 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
241 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
226 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass; 242 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
227 device->oclass[NVDEV_ENGINE_FIFO ] = &nv84_fifo_oclass; 243 device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass;
228 device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass; 244 device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass;
229 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass; 245 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
230 device->oclass[NVDEV_ENGINE_VP ] = &nv98_vp_oclass; 246 device->oclass[NVDEV_ENGINE_VP ] = &nv98_vp_oclass;
231 device->oclass[NVDEV_ENGINE_CRYPT ] = &nv98_crypt_oclass; 247 device->oclass[NVDEV_ENGINE_CRYPT ] = &nv98_crypt_oclass;
232 device->oclass[NVDEV_ENGINE_BSP ] = &nv98_bsp_oclass; 248 device->oclass[NVDEV_ENGINE_BSP ] = &nv98_bsp_oclass;
233 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass; 249 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
234 device->oclass[NVDEV_ENGINE_DISP ] = &nv94_disp_oclass; 250 device->oclass[NVDEV_ENGINE_DISP ] = &nv94_disp_oclass;
251 device->oclass[NVDEV_ENGINE_PERFMON] = nv84_perfmon_oclass;
235 break; 252 break;
236 case 0xa0: 253 case 0xa0:
237 device->cname = "G200"; 254 device->cname = "G200";
238 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 255 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
239 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass; 256 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
240 device->oclass[NVDEV_SUBDEV_I2C ] = &nv50_i2c_oclass; 257 device->oclass[NVDEV_SUBDEV_I2C ] = &nv50_i2c_oclass;
241 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass; 258 device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass;
242 device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass; 259 device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
243 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 260 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
244 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; 261 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
245 device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass; 262 device->oclass[NVDEV_SUBDEV_MC ] = nv98_mc_oclass;
246 device->oclass[NVDEV_SUBDEV_BUS ] = &nv50_bus_oclass; 263 device->oclass[NVDEV_SUBDEV_BUS ] = nv94_bus_oclass;
247 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 264 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
248 device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass; 265 device->oclass[NVDEV_SUBDEV_FB ] = nv84_fb_oclass;
249 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; 266 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
250 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; 267 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
251 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; 268 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
269 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
252 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass; 270 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
253 device->oclass[NVDEV_ENGINE_FIFO ] = &nv84_fifo_oclass; 271 device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass;
254 device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass; 272 device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass;
255 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass; 273 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
256 device->oclass[NVDEV_ENGINE_MPEG ] = &nv84_mpeg_oclass; 274 device->oclass[NVDEV_ENGINE_MPEG ] = &nv84_mpeg_oclass;
257 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass; 275 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
258 device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass; 276 device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass;
259 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass; 277 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
260 device->oclass[NVDEV_ENGINE_DISP ] = &nva0_disp_oclass; 278 device->oclass[NVDEV_ENGINE_DISP ] = &nva0_disp_oclass;
279 device->oclass[NVDEV_ENGINE_PERFMON] = nv84_perfmon_oclass;
261 break; 280 break;
262 case 0xaa: 281 case 0xaa:
263 device->cname = "MCP77/MCP78"; 282 device->cname = "MCP77/MCP78";
264 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 283 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
265 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass; 284 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
266 device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass; 285 device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
267 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass; 286 device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass;
268 device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass; 287 device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
269 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 288 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
270 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; 289 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
271 device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass; 290 device->oclass[NVDEV_SUBDEV_MC ] = nv98_mc_oclass;
272 device->oclass[NVDEV_SUBDEV_BUS ] = &nv50_bus_oclass; 291 device->oclass[NVDEV_SUBDEV_BUS ] = nv94_bus_oclass;
273 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 292 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
274 device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass; 293 device->oclass[NVDEV_SUBDEV_FB ] = nvaa_fb_oclass;
275 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; 294 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
276 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; 295 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
277 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; 296 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
297 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
278 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass; 298 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
279 device->oclass[NVDEV_ENGINE_FIFO ] = &nv84_fifo_oclass; 299 device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass;
280 device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass; 300 device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass;
281 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass; 301 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
282 device->oclass[NVDEV_ENGINE_VP ] = &nv98_vp_oclass; 302 device->oclass[NVDEV_ENGINE_VP ] = &nv98_vp_oclass;
283 device->oclass[NVDEV_ENGINE_CRYPT ] = &nv98_crypt_oclass; 303 device->oclass[NVDEV_ENGINE_CRYPT ] = &nv98_crypt_oclass;
284 device->oclass[NVDEV_ENGINE_BSP ] = &nv98_bsp_oclass; 304 device->oclass[NVDEV_ENGINE_BSP ] = &nv98_bsp_oclass;
285 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass; 305 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
286 device->oclass[NVDEV_ENGINE_DISP ] = &nv94_disp_oclass; 306 device->oclass[NVDEV_ENGINE_DISP ] = &nv94_disp_oclass;
307 device->oclass[NVDEV_ENGINE_PERFMON] = nv84_perfmon_oclass;
287 break; 308 break;
288 case 0xac: 309 case 0xac:
289 device->cname = "MCP79/MCP7A"; 310 device->cname = "MCP79/MCP7A";
290 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 311 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
291 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass; 312 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
292 device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass; 313 device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
293 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass; 314 device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass;
294 device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass; 315 device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
295 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 316 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
296 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; 317 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
297 device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass; 318 device->oclass[NVDEV_SUBDEV_MC ] = nv98_mc_oclass;
298 device->oclass[NVDEV_SUBDEV_BUS ] = &nv50_bus_oclass; 319 device->oclass[NVDEV_SUBDEV_BUS ] = nv94_bus_oclass;
299 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 320 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
300 device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass; 321 device->oclass[NVDEV_SUBDEV_FB ] = nvaa_fb_oclass;
301 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; 322 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
302 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; 323 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
303 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; 324 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
325 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
304 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass; 326 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
305 device->oclass[NVDEV_ENGINE_FIFO ] = &nv84_fifo_oclass; 327 device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass;
306 device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass; 328 device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass;
307 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass; 329 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
308 device->oclass[NVDEV_ENGINE_VP ] = &nv98_vp_oclass; 330 device->oclass[NVDEV_ENGINE_VP ] = &nv98_vp_oclass;
309 device->oclass[NVDEV_ENGINE_CRYPT ] = &nv98_crypt_oclass; 331 device->oclass[NVDEV_ENGINE_CRYPT ] = &nv98_crypt_oclass;
310 device->oclass[NVDEV_ENGINE_BSP ] = &nv98_bsp_oclass; 332 device->oclass[NVDEV_ENGINE_BSP ] = &nv98_bsp_oclass;
311 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass; 333 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
312 device->oclass[NVDEV_ENGINE_DISP ] = &nv94_disp_oclass; 334 device->oclass[NVDEV_ENGINE_DISP ] = &nv94_disp_oclass;
335 device->oclass[NVDEV_ENGINE_PERFMON] = nv84_perfmon_oclass;
313 break; 336 break;
314 case 0xa3: 337 case 0xa3:
315 device->cname = "GT215"; 338 device->cname = "GT215";
@@ -320,16 +343,18 @@ nv50_identify(struct nouveau_device *device)
320 device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass; 343 device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
321 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 344 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
322 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nva3_devinit_oclass; 345 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nva3_devinit_oclass;
323 device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass; 346 device->oclass[NVDEV_SUBDEV_MC ] = nv98_mc_oclass;
324 device->oclass[NVDEV_SUBDEV_BUS ] = &nv50_bus_oclass; 347 device->oclass[NVDEV_SUBDEV_BUS ] = nv94_bus_oclass;
325 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 348 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
326 device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass; 349 device->oclass[NVDEV_SUBDEV_FB ] = nva3_fb_oclass;
327 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; 350 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
328 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; 351 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
329 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; 352 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
353 device->oclass[NVDEV_SUBDEV_PWR ] = &nva3_pwr_oclass;
354 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
330 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass; 355 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
331 device->oclass[NVDEV_ENGINE_FIFO ] = &nv84_fifo_oclass; 356 device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass;
332 device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass; 357 device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass;
333 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass; 358 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
334 device->oclass[NVDEV_ENGINE_MPEG ] = &nv84_mpeg_oclass; 359 device->oclass[NVDEV_ENGINE_MPEG ] = &nv84_mpeg_oclass;
335 device->oclass[NVDEV_ENGINE_VP ] = &nv98_vp_oclass; 360 device->oclass[NVDEV_ENGINE_VP ] = &nv98_vp_oclass;
@@ -337,6 +362,7 @@ nv50_identify(struct nouveau_device *device)
337 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass; 362 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
338 device->oclass[NVDEV_ENGINE_COPY0 ] = &nva3_copy_oclass; 363 device->oclass[NVDEV_ENGINE_COPY0 ] = &nva3_copy_oclass;
339 device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass; 364 device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass;
365 device->oclass[NVDEV_ENGINE_PERFMON] = nva3_perfmon_oclass;
340 break; 366 break;
341 case 0xa5: 367 case 0xa5:
342 device->cname = "GT216"; 368 device->cname = "GT216";
@@ -347,22 +373,25 @@ nv50_identify(struct nouveau_device *device)
347 device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass; 373 device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
348 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 374 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
349 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nva3_devinit_oclass; 375 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nva3_devinit_oclass;
350 device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass; 376 device->oclass[NVDEV_SUBDEV_MC ] = nv98_mc_oclass;
351 device->oclass[NVDEV_SUBDEV_BUS ] = &nv50_bus_oclass; 377 device->oclass[NVDEV_SUBDEV_BUS ] = nv94_bus_oclass;
352 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 378 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
353 device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass; 379 device->oclass[NVDEV_SUBDEV_FB ] = nva3_fb_oclass;
354 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; 380 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
355 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; 381 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
356 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; 382 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
383 device->oclass[NVDEV_SUBDEV_PWR ] = &nva3_pwr_oclass;
384 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
357 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass; 385 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
358 device->oclass[NVDEV_ENGINE_FIFO ] = &nv84_fifo_oclass; 386 device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass;
359 device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass; 387 device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass;
360 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass; 388 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
361 device->oclass[NVDEV_ENGINE_VP ] = &nv98_vp_oclass; 389 device->oclass[NVDEV_ENGINE_VP ] = &nv98_vp_oclass;
362 device->oclass[NVDEV_ENGINE_BSP ] = &nv98_bsp_oclass; 390 device->oclass[NVDEV_ENGINE_BSP ] = &nv98_bsp_oclass;
363 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass; 391 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
364 device->oclass[NVDEV_ENGINE_COPY0 ] = &nva3_copy_oclass; 392 device->oclass[NVDEV_ENGINE_COPY0 ] = &nva3_copy_oclass;
365 device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass; 393 device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass;
394 device->oclass[NVDEV_ENGINE_PERFMON] = nva3_perfmon_oclass;
366 break; 395 break;
367 case 0xa8: 396 case 0xa8:
368 device->cname = "GT218"; 397 device->cname = "GT218";
@@ -373,22 +402,25 @@ nv50_identify(struct nouveau_device *device)
373 device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass; 402 device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
374 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 403 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
375 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nva3_devinit_oclass; 404 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nva3_devinit_oclass;
376 device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass; 405 device->oclass[NVDEV_SUBDEV_MC ] = nv98_mc_oclass;
377 device->oclass[NVDEV_SUBDEV_BUS ] = &nv50_bus_oclass; 406 device->oclass[NVDEV_SUBDEV_BUS ] = nv94_bus_oclass;
378 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 407 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
379 device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass; 408 device->oclass[NVDEV_SUBDEV_FB ] = nva3_fb_oclass;
380 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; 409 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
381 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; 410 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
382 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; 411 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
412 device->oclass[NVDEV_SUBDEV_PWR ] = &nva3_pwr_oclass;
413 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
383 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass; 414 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
384 device->oclass[NVDEV_ENGINE_FIFO ] = &nv84_fifo_oclass; 415 device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass;
385 device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass; 416 device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass;
386 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass; 417 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
387 device->oclass[NVDEV_ENGINE_VP ] = &nv98_vp_oclass; 418 device->oclass[NVDEV_ENGINE_VP ] = &nv98_vp_oclass;
388 device->oclass[NVDEV_ENGINE_BSP ] = &nv98_bsp_oclass; 419 device->oclass[NVDEV_ENGINE_BSP ] = &nv98_bsp_oclass;
389 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass; 420 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
390 device->oclass[NVDEV_ENGINE_COPY0 ] = &nva3_copy_oclass; 421 device->oclass[NVDEV_ENGINE_COPY0 ] = &nva3_copy_oclass;
391 device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass; 422 device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass;
423 device->oclass[NVDEV_ENGINE_PERFMON] = nva3_perfmon_oclass;
392 break; 424 break;
393 case 0xaf: 425 case 0xaf:
394 device->cname = "MCP89"; 426 device->cname = "MCP89";
@@ -399,22 +431,25 @@ nv50_identify(struct nouveau_device *device)
399 device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass; 431 device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
400 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 432 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
401 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nva3_devinit_oclass; 433 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nva3_devinit_oclass;
402 device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass; 434 device->oclass[NVDEV_SUBDEV_MC ] = nv98_mc_oclass;
403 device->oclass[NVDEV_SUBDEV_BUS ] = &nv50_bus_oclass; 435 device->oclass[NVDEV_SUBDEV_BUS ] = nv94_bus_oclass;
404 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 436 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
405 device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass; 437 device->oclass[NVDEV_SUBDEV_FB ] = nvaf_fb_oclass;
406 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; 438 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
407 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; 439 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
408 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; 440 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
441 device->oclass[NVDEV_SUBDEV_PWR ] = &nva3_pwr_oclass;
442 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
409 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass; 443 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
410 device->oclass[NVDEV_ENGINE_FIFO ] = &nv84_fifo_oclass; 444 device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass;
411 device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass; 445 device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass;
412 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass; 446 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
413 device->oclass[NVDEV_ENGINE_VP ] = &nv98_vp_oclass; 447 device->oclass[NVDEV_ENGINE_VP ] = &nv98_vp_oclass;
414 device->oclass[NVDEV_ENGINE_BSP ] = &nv98_bsp_oclass; 448 device->oclass[NVDEV_ENGINE_BSP ] = &nv98_bsp_oclass;
415 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass; 449 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
416 device->oclass[NVDEV_ENGINE_COPY0 ] = &nva3_copy_oclass; 450 device->oclass[NVDEV_ENGINE_COPY0 ] = &nva3_copy_oclass;
417 device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass; 451 device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass;
452 device->oclass[NVDEV_ENGINE_PERFMON] = nva3_perfmon_oclass;
418 break; 453 break;
419 default: 454 default:
420 nv_fatal(device, "unknown Tesla chipset\n"); 455 nv_fatal(device, "unknown Tesla chipset\n");
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
index 418f51f50d7a..8d06eef2b9ee 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
@@ -38,6 +38,8 @@
38#include <subdev/instmem.h> 38#include <subdev/instmem.h>
39#include <subdev/vm.h> 39#include <subdev/vm.h>
40#include <subdev/bar.h> 40#include <subdev/bar.h>
41#include <subdev/pwr.h>
42#include <subdev/volt.h>
41 43
42#include <engine/device.h> 44#include <engine/device.h>
43#include <engine/dmaobj.h> 45#include <engine/dmaobj.h>
@@ -49,6 +51,7 @@
49#include <engine/ppp.h> 51#include <engine/ppp.h>
50#include <engine/copy.h> 52#include <engine/copy.h>
51#include <engine/disp.h> 53#include <engine/disp.h>
54#include <engine/perfmon.h>
52 55
53int 56int
54nvc0_identify(struct nouveau_device *device) 57nvc0_identify(struct nouveau_device *device)
@@ -63,18 +66,20 @@ nvc0_identify(struct nouveau_device *device)
63 device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass; 66 device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
64 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 67 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
65 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass; 68 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
66 device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass; 69 device->oclass[NVDEV_SUBDEV_MC ] = nvc0_mc_oclass;
67 device->oclass[NVDEV_SUBDEV_BUS ] = &nvc0_bus_oclass; 70 device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
68 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 71 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
69 device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass; 72 device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass;
70 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass; 73 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
71 device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass; 74 device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
72 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; 75 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
73 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; 76 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
74 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; 77 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
78 device->oclass[NVDEV_SUBDEV_PWR ] = &nvc0_pwr_oclass;
79 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
75 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass; 80 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
76 device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass; 81 device->oclass[NVDEV_ENGINE_FIFO ] = nvc0_fifo_oclass;
77 device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass; 82 device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
78 device->oclass[NVDEV_ENGINE_GR ] = nvc0_graph_oclass; 83 device->oclass[NVDEV_ENGINE_GR ] = nvc0_graph_oclass;
79 device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass; 84 device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass;
80 device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass; 85 device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
@@ -82,6 +87,7 @@ nvc0_identify(struct nouveau_device *device)
82 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass; 87 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
83 device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass; 88 device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
84 device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass; 89 device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass;
90 device->oclass[NVDEV_ENGINE_PERFMON] = &nvc0_perfmon_oclass;
85 break; 91 break;
86 case 0xc4: 92 case 0xc4:
87 device->cname = "GF104"; 93 device->cname = "GF104";
@@ -92,18 +98,20 @@ nvc0_identify(struct nouveau_device *device)
92 device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass; 98 device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
93 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 99 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
94 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass; 100 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
95 device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass; 101 device->oclass[NVDEV_SUBDEV_MC ] = nvc0_mc_oclass;
96 device->oclass[NVDEV_SUBDEV_BUS ] = &nvc0_bus_oclass; 102 device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
97 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 103 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
98 device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass; 104 device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass;
99 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass; 105 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
100 device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass; 106 device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
101 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; 107 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
102 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; 108 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
103 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; 109 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
110 device->oclass[NVDEV_SUBDEV_PWR ] = &nvc0_pwr_oclass;
111 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
104 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass; 112 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
105 device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass; 113 device->oclass[NVDEV_ENGINE_FIFO ] = nvc0_fifo_oclass;
106 device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass; 114 device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
107 device->oclass[NVDEV_ENGINE_GR ] = nvc3_graph_oclass; 115 device->oclass[NVDEV_ENGINE_GR ] = nvc3_graph_oclass;
108 device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass; 116 device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass;
109 device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass; 117 device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
@@ -111,6 +119,7 @@ nvc0_identify(struct nouveau_device *device)
111 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass; 119 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
112 device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass; 120 device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
113 device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass; 121 device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass;
122 device->oclass[NVDEV_ENGINE_PERFMON] = &nvc0_perfmon_oclass;
114 break; 123 break;
115 case 0xc3: 124 case 0xc3:
116 device->cname = "GF106"; 125 device->cname = "GF106";
@@ -121,24 +130,27 @@ nvc0_identify(struct nouveau_device *device)
121 device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass; 130 device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
122 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 131 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
123 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass; 132 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
124 device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass; 133 device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass;
125 device->oclass[NVDEV_SUBDEV_BUS ] = &nvc0_bus_oclass; 134 device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
126 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 135 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
127 device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass; 136 device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass;
128 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass; 137 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
129 device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass; 138 device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
130 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; 139 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
131 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; 140 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
132 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; 141 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
142 device->oclass[NVDEV_SUBDEV_PWR ] = &nvc0_pwr_oclass;
143 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
133 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass; 144 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
134 device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass; 145 device->oclass[NVDEV_ENGINE_FIFO ] = nvc0_fifo_oclass;
135 device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass; 146 device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
136 device->oclass[NVDEV_ENGINE_GR ] = nvc3_graph_oclass; 147 device->oclass[NVDEV_ENGINE_GR ] = nvc3_graph_oclass;
137 device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass; 148 device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass;
138 device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass; 149 device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
139 device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass; 150 device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
140 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass; 151 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
141 device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass; 152 device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass;
153 device->oclass[NVDEV_ENGINE_PERFMON] = &nvc0_perfmon_oclass;
142 break; 154 break;
143 case 0xce: 155 case 0xce:
144 device->cname = "GF114"; 156 device->cname = "GF114";
@@ -149,18 +161,20 @@ nvc0_identify(struct nouveau_device *device)
149 device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass; 161 device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
150 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 162 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
151 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass; 163 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
152 device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass; 164 device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass;
153 device->oclass[NVDEV_SUBDEV_BUS ] = &nvc0_bus_oclass; 165 device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
154 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 166 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
155 device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass; 167 device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass;
156 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass; 168 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
157 device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass; 169 device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
158 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; 170 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
159 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; 171 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
160 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; 172 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
173 device->oclass[NVDEV_SUBDEV_PWR ] = &nvc0_pwr_oclass;
174 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
161 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass; 175 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
162 device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass; 176 device->oclass[NVDEV_ENGINE_FIFO ] = nvc0_fifo_oclass;
163 device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass; 177 device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
164 device->oclass[NVDEV_ENGINE_GR ] = nvc3_graph_oclass; 178 device->oclass[NVDEV_ENGINE_GR ] = nvc3_graph_oclass;
165 device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass; 179 device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass;
166 device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass; 180 device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
@@ -168,6 +182,7 @@ nvc0_identify(struct nouveau_device *device)
168 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass; 182 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
169 device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass; 183 device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
170 device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass; 184 device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass;
185 device->oclass[NVDEV_ENGINE_PERFMON] = &nvc0_perfmon_oclass;
171 break; 186 break;
172 case 0xcf: 187 case 0xcf:
173 device->cname = "GF116"; 188 device->cname = "GF116";
@@ -178,18 +193,20 @@ nvc0_identify(struct nouveau_device *device)
178 device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass; 193 device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
179 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 194 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
180 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass; 195 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
181 device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass; 196 device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass;
182 device->oclass[NVDEV_SUBDEV_BUS ] = &nvc0_bus_oclass; 197 device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
183 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 198 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
184 device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass; 199 device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass;
185 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass; 200 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
186 device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass; 201 device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
187 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; 202 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
188 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; 203 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
189 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; 204 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
205 device->oclass[NVDEV_SUBDEV_PWR ] = &nvc0_pwr_oclass;
206 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
190 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass; 207 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
191 device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass; 208 device->oclass[NVDEV_ENGINE_FIFO ] = nvc0_fifo_oclass;
192 device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass; 209 device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
193 device->oclass[NVDEV_ENGINE_GR ] = nvc3_graph_oclass; 210 device->oclass[NVDEV_ENGINE_GR ] = nvc3_graph_oclass;
194 device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass; 211 device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass;
195 device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass; 212 device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
@@ -197,6 +214,7 @@ nvc0_identify(struct nouveau_device *device)
197 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass; 214 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
198 device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass; 215 device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
199 device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass; 216 device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass;
217 device->oclass[NVDEV_ENGINE_PERFMON] = &nvc0_perfmon_oclass;
200 break; 218 break;
201 case 0xc1: 219 case 0xc1:
202 device->cname = "GF108"; 220 device->cname = "GF108";
@@ -207,24 +225,27 @@ nvc0_identify(struct nouveau_device *device)
207 device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass; 225 device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
208 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 226 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
209 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass; 227 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
210 device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass; 228 device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass;
211 device->oclass[NVDEV_SUBDEV_BUS ] = &nvc0_bus_oclass; 229 device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
212 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 230 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
213 device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass; 231 device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass;
214 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass; 232 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
215 device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass; 233 device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
216 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; 234 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
217 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; 235 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
218 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; 236 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
237 device->oclass[NVDEV_SUBDEV_PWR ] = &nvc0_pwr_oclass;
238 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
219 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass; 239 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
220 device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass; 240 device->oclass[NVDEV_ENGINE_FIFO ] = nvc0_fifo_oclass;
221 device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass; 241 device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
222 device->oclass[NVDEV_ENGINE_GR ] = nvc1_graph_oclass; 242 device->oclass[NVDEV_ENGINE_GR ] = nvc1_graph_oclass;
223 device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass; 243 device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass;
224 device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass; 244 device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
225 device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass; 245 device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
226 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass; 246 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
227 device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass; 247 device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass;
248 device->oclass[NVDEV_ENGINE_PERFMON] = &nvc0_perfmon_oclass;
228 break; 249 break;
229 case 0xc8: 250 case 0xc8:
230 device->cname = "GF110"; 251 device->cname = "GF110";
@@ -235,18 +256,20 @@ nvc0_identify(struct nouveau_device *device)
235 device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass; 256 device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
236 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 257 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
237 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass; 258 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
238 device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass; 259 device->oclass[NVDEV_SUBDEV_MC ] = nvc0_mc_oclass;
239 device->oclass[NVDEV_SUBDEV_BUS ] = &nvc0_bus_oclass; 260 device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
240 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 261 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
241 device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass; 262 device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass;
242 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass; 263 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
243 device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass; 264 device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
244 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; 265 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
245 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; 266 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
246 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; 267 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
268 device->oclass[NVDEV_SUBDEV_PWR ] = &nvc0_pwr_oclass;
269 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
247 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass; 270 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
248 device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass; 271 device->oclass[NVDEV_ENGINE_FIFO ] = nvc0_fifo_oclass;
249 device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass; 272 device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
250 device->oclass[NVDEV_ENGINE_GR ] = nvc8_graph_oclass; 273 device->oclass[NVDEV_ENGINE_GR ] = nvc8_graph_oclass;
251 device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass; 274 device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass;
252 device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass; 275 device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
@@ -254,6 +277,7 @@ nvc0_identify(struct nouveau_device *device)
254 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass; 277 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
255 device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass; 278 device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
256 device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass; 279 device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass;
280 device->oclass[NVDEV_ENGINE_PERFMON] = &nvc0_perfmon_oclass;
257 break; 281 break;
258 case 0xd9: 282 case 0xd9:
259 device->cname = "GF119"; 283 device->cname = "GF119";
@@ -264,24 +288,27 @@ nvc0_identify(struct nouveau_device *device)
264 device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass; 288 device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass;
265 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 289 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
266 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass; 290 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
267 device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass; 291 device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass;
268 device->oclass[NVDEV_SUBDEV_BUS ] = &nvc0_bus_oclass; 292 device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
269 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 293 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
270 device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass; 294 device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass;
271 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass; 295 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
272 device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass; 296 device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
273 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; 297 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
274 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; 298 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
275 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; 299 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
300 device->oclass[NVDEV_SUBDEV_PWR ] = &nvd0_pwr_oclass;
301 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
276 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass; 302 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
277 device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass; 303 device->oclass[NVDEV_ENGINE_FIFO ] = nvc0_fifo_oclass;
278 device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass; 304 device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
279 device->oclass[NVDEV_ENGINE_GR ] = nvd9_graph_oclass; 305 device->oclass[NVDEV_ENGINE_GR ] = nvd9_graph_oclass;
280 device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass; 306 device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass;
281 device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass; 307 device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
282 device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass; 308 device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
283 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass; 309 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
284 device->oclass[NVDEV_ENGINE_DISP ] = &nvd0_disp_oclass; 310 device->oclass[NVDEV_ENGINE_DISP ] = &nvd0_disp_oclass;
311 device->oclass[NVDEV_ENGINE_PERFMON] = &nvc0_perfmon_oclass;
285 break; 312 break;
286 case 0xd7: 313 case 0xd7:
287 device->cname = "GF117"; 314 device->cname = "GF117";
@@ -292,24 +319,25 @@ nvc0_identify(struct nouveau_device *device)
292 device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass; 319 device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass;
293 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 320 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
294 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass; 321 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
295 device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass; 322 device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass;
296 device->oclass[NVDEV_SUBDEV_BUS ] = &nvc0_bus_oclass; 323 device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
297 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 324 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
298 device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass; 325 device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass;
299 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass; 326 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
300 device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass; 327 device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
301 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; 328 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
302 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; 329 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
303 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; 330 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
304 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass; 331 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
305 device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass; 332 device->oclass[NVDEV_ENGINE_FIFO ] = nvc0_fifo_oclass;
306 device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass; 333 device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
307 device->oclass[NVDEV_ENGINE_GR ] = nvd7_graph_oclass; 334 device->oclass[NVDEV_ENGINE_GR ] = nvd7_graph_oclass;
308 device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass; 335 device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass;
309 device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass; 336 device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
310 device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass; 337 device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
311 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass; 338 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
312 device->oclass[NVDEV_ENGINE_DISP ] = &nvd0_disp_oclass; 339 device->oclass[NVDEV_ENGINE_DISP ] = &nvd0_disp_oclass;
340 device->oclass[NVDEV_ENGINE_PERFMON] = &nvc0_perfmon_oclass;
313 break; 341 break;
314 default: 342 default:
315 nv_fatal(device, "unknown Fermi chipset\n"); 343 nv_fatal(device, "unknown Fermi chipset\n");
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nve0.c b/drivers/gpu/drm/nouveau/core/engine/device/nve0.c
index 7aca1877add4..3900104976fc 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nve0.c
@@ -38,6 +38,8 @@
38#include <subdev/instmem.h> 38#include <subdev/instmem.h>
39#include <subdev/vm.h> 39#include <subdev/vm.h>
40#include <subdev/bar.h> 40#include <subdev/bar.h>
41#include <subdev/pwr.h>
42#include <subdev/volt.h>
41 43
42#include <engine/device.h> 44#include <engine/device.h>
43#include <engine/dmaobj.h> 45#include <engine/dmaobj.h>
@@ -49,6 +51,7 @@
49#include <engine/bsp.h> 51#include <engine/bsp.h>
50#include <engine/vp.h> 52#include <engine/vp.h>
51#include <engine/ppp.h> 53#include <engine/ppp.h>
54#include <engine/perfmon.h>
52 55
53int 56int
54nve0_identify(struct nouveau_device *device) 57nve0_identify(struct nouveau_device *device)
@@ -59,22 +62,24 @@ nve0_identify(struct nouveau_device *device)
59 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 62 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
60 device->oclass[NVDEV_SUBDEV_GPIO ] = &nve0_gpio_oclass; 63 device->oclass[NVDEV_SUBDEV_GPIO ] = &nve0_gpio_oclass;
61 device->oclass[NVDEV_SUBDEV_I2C ] = &nvd0_i2c_oclass; 64 device->oclass[NVDEV_SUBDEV_I2C ] = &nvd0_i2c_oclass;
62 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass; 65 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nve0_clock_oclass;
63 device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass; 66 device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass;
64 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 67 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
65 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass; 68 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
66 device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass; 69 device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass;
67 device->oclass[NVDEV_SUBDEV_BUS ] = &nvc0_bus_oclass; 70 device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
68 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 71 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
69 device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass; 72 device->oclass[NVDEV_SUBDEV_FB ] = nve0_fb_oclass;
70 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass; 73 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
71 device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass; 74 device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass;
72 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; 75 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
73 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; 76 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
74 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; 77 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
78 device->oclass[NVDEV_SUBDEV_PWR ] = &nvd0_pwr_oclass;
79 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
75 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass; 80 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
76 device->oclass[NVDEV_ENGINE_FIFO ] = &nve0_fifo_oclass; 81 device->oclass[NVDEV_ENGINE_FIFO ] = nve0_fifo_oclass;
77 device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass; 82 device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
78 device->oclass[NVDEV_ENGINE_GR ] = nve4_graph_oclass; 83 device->oclass[NVDEV_ENGINE_GR ] = nve4_graph_oclass;
79 device->oclass[NVDEV_ENGINE_DISP ] = &nve0_disp_oclass; 84 device->oclass[NVDEV_ENGINE_DISP ] = &nve0_disp_oclass;
80 device->oclass[NVDEV_ENGINE_COPY0 ] = &nve0_copy0_oclass; 85 device->oclass[NVDEV_ENGINE_COPY0 ] = &nve0_copy0_oclass;
@@ -83,28 +88,31 @@ nve0_identify(struct nouveau_device *device)
83 device->oclass[NVDEV_ENGINE_BSP ] = &nve0_bsp_oclass; 88 device->oclass[NVDEV_ENGINE_BSP ] = &nve0_bsp_oclass;
84 device->oclass[NVDEV_ENGINE_VP ] = &nve0_vp_oclass; 89 device->oclass[NVDEV_ENGINE_VP ] = &nve0_vp_oclass;
85 device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass; 90 device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
91 device->oclass[NVDEV_ENGINE_PERFMON] = &nve0_perfmon_oclass;
86 break; 92 break;
87 case 0xe7: 93 case 0xe7:
88 device->cname = "GK107"; 94 device->cname = "GK107";
89 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 95 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
90 device->oclass[NVDEV_SUBDEV_GPIO ] = &nve0_gpio_oclass; 96 device->oclass[NVDEV_SUBDEV_GPIO ] = &nve0_gpio_oclass;
91 device->oclass[NVDEV_SUBDEV_I2C ] = &nvd0_i2c_oclass; 97 device->oclass[NVDEV_SUBDEV_I2C ] = &nvd0_i2c_oclass;
92 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass; 98 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nve0_clock_oclass;
93 device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass; 99 device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass;
94 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 100 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
95 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass; 101 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
96 device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass; 102 device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass;
97 device->oclass[NVDEV_SUBDEV_BUS ] = &nvc0_bus_oclass; 103 device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
98 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 104 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
99 device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass; 105 device->oclass[NVDEV_SUBDEV_FB ] = nve0_fb_oclass;
100 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass; 106 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
101 device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass; 107 device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass;
102 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; 108 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
103 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; 109 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
104 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; 110 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
111 device->oclass[NVDEV_SUBDEV_PWR ] = &nvd0_pwr_oclass;
112 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
105 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass; 113 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
106 device->oclass[NVDEV_ENGINE_FIFO ] = &nve0_fifo_oclass; 114 device->oclass[NVDEV_ENGINE_FIFO ] = nve0_fifo_oclass;
107 device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass; 115 device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
108 device->oclass[NVDEV_ENGINE_GR ] = nve4_graph_oclass; 116 device->oclass[NVDEV_ENGINE_GR ] = nve4_graph_oclass;
109 device->oclass[NVDEV_ENGINE_DISP ] = &nve0_disp_oclass; 117 device->oclass[NVDEV_ENGINE_DISP ] = &nve0_disp_oclass;
110 device->oclass[NVDEV_ENGINE_COPY0 ] = &nve0_copy0_oclass; 118 device->oclass[NVDEV_ENGINE_COPY0 ] = &nve0_copy0_oclass;
@@ -113,28 +121,31 @@ nve0_identify(struct nouveau_device *device)
113 device->oclass[NVDEV_ENGINE_BSP ] = &nve0_bsp_oclass; 121 device->oclass[NVDEV_ENGINE_BSP ] = &nve0_bsp_oclass;
114 device->oclass[NVDEV_ENGINE_VP ] = &nve0_vp_oclass; 122 device->oclass[NVDEV_ENGINE_VP ] = &nve0_vp_oclass;
115 device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass; 123 device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
124 device->oclass[NVDEV_ENGINE_PERFMON] = &nve0_perfmon_oclass;
116 break; 125 break;
117 case 0xe6: 126 case 0xe6:
118 device->cname = "GK106"; 127 device->cname = "GK106";
119 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 128 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
120 device->oclass[NVDEV_SUBDEV_GPIO ] = &nve0_gpio_oclass; 129 device->oclass[NVDEV_SUBDEV_GPIO ] = &nve0_gpio_oclass;
121 device->oclass[NVDEV_SUBDEV_I2C ] = &nvd0_i2c_oclass; 130 device->oclass[NVDEV_SUBDEV_I2C ] = &nvd0_i2c_oclass;
122 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass; 131 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nve0_clock_oclass;
123 device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass; 132 device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass;
124 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 133 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
125 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass; 134 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
126 device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass; 135 device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass;
127 device->oclass[NVDEV_SUBDEV_BUS ] = &nvc0_bus_oclass; 136 device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
128 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 137 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
129 device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass; 138 device->oclass[NVDEV_SUBDEV_FB ] = nve0_fb_oclass;
130 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass; 139 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
131 device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass; 140 device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass;
132 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; 141 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
133 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; 142 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
134 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; 143 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
144 device->oclass[NVDEV_SUBDEV_PWR ] = &nvd0_pwr_oclass;
145 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
135 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass; 146 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
136 device->oclass[NVDEV_ENGINE_FIFO ] = &nve0_fifo_oclass; 147 device->oclass[NVDEV_ENGINE_FIFO ] = nve0_fifo_oclass;
137 device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass; 148 device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
138 device->oclass[NVDEV_ENGINE_GR ] = nve4_graph_oclass; 149 device->oclass[NVDEV_ENGINE_GR ] = nve4_graph_oclass;
139 device->oclass[NVDEV_ENGINE_DISP ] = &nve0_disp_oclass; 150 device->oclass[NVDEV_ENGINE_DISP ] = &nve0_disp_oclass;
140 device->oclass[NVDEV_ENGINE_COPY0 ] = &nve0_copy0_oclass; 151 device->oclass[NVDEV_ENGINE_COPY0 ] = &nve0_copy0_oclass;
@@ -143,28 +154,31 @@ nve0_identify(struct nouveau_device *device)
143 device->oclass[NVDEV_ENGINE_BSP ] = &nve0_bsp_oclass; 154 device->oclass[NVDEV_ENGINE_BSP ] = &nve0_bsp_oclass;
144 device->oclass[NVDEV_ENGINE_VP ] = &nve0_vp_oclass; 155 device->oclass[NVDEV_ENGINE_VP ] = &nve0_vp_oclass;
145 device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass; 156 device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
157 device->oclass[NVDEV_ENGINE_PERFMON] = &nve0_perfmon_oclass;
146 break; 158 break;
147 case 0xf0: 159 case 0xf0:
148 device->cname = "GK110"; 160 device->cname = "GK110";
149 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 161 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
150 device->oclass[NVDEV_SUBDEV_GPIO ] = &nve0_gpio_oclass; 162 device->oclass[NVDEV_SUBDEV_GPIO ] = &nve0_gpio_oclass;
151 device->oclass[NVDEV_SUBDEV_I2C ] = &nvd0_i2c_oclass; 163 device->oclass[NVDEV_SUBDEV_I2C ] = &nvd0_i2c_oclass;
152 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass; 164 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nve0_clock_oclass;
153 device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass; 165 device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass;
154 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 166 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
155 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass; 167 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
156 device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass; 168 device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass;
157 device->oclass[NVDEV_SUBDEV_BUS ] = &nvc0_bus_oclass; 169 device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
158 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 170 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
159 device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass; 171 device->oclass[NVDEV_SUBDEV_FB ] = nve0_fb_oclass;
160 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass; 172 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
161 device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass; 173 device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass;
162 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; 174 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
163 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; 175 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
164 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; 176 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
177 device->oclass[NVDEV_SUBDEV_PWR ] = &nvd0_pwr_oclass;
178 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
165 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass; 179 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
166 device->oclass[NVDEV_ENGINE_FIFO ] = &nve0_fifo_oclass; 180 device->oclass[NVDEV_ENGINE_FIFO ] = nve0_fifo_oclass;
167 device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass; 181 device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
168 device->oclass[NVDEV_ENGINE_GR ] = nvf0_graph_oclass; 182 device->oclass[NVDEV_ENGINE_GR ] = nvf0_graph_oclass;
169 device->oclass[NVDEV_ENGINE_DISP ] = &nvf0_disp_oclass; 183 device->oclass[NVDEV_ENGINE_DISP ] = &nvf0_disp_oclass;
170 device->oclass[NVDEV_ENGINE_COPY0 ] = &nve0_copy0_oclass; 184 device->oclass[NVDEV_ENGINE_COPY0 ] = &nve0_copy0_oclass;
@@ -175,6 +189,43 @@ nve0_identify(struct nouveau_device *device)
175 device->oclass[NVDEV_ENGINE_VP ] = &nve0_vp_oclass; 189 device->oclass[NVDEV_ENGINE_VP ] = &nve0_vp_oclass;
176 device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass; 190 device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
177#endif 191#endif
192 device->oclass[NVDEV_ENGINE_PERFMON] = &nvf0_perfmon_oclass;
193 break;
194 case 0x108:
195 device->cname = "GK208";
196 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
197 device->oclass[NVDEV_SUBDEV_GPIO ] = &nve0_gpio_oclass;
198 device->oclass[NVDEV_SUBDEV_I2C ] = &nvd0_i2c_oclass;
199 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nve0_clock_oclass;
200 device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass;
201 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
202 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
203 device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass;
204 device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
205 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
206 device->oclass[NVDEV_SUBDEV_FB ] = nve0_fb_oclass;
207 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
208 device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass;
209 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
210 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
211 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
212 device->oclass[NVDEV_SUBDEV_PWR ] = &nv108_pwr_oclass;
213 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
214 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
215#if 0
216 device->oclass[NVDEV_ENGINE_FIFO ] = nve0_fifo_oclass;
217 device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
218 device->oclass[NVDEV_ENGINE_GR ] = nvf0_graph_oclass;
219#endif
220 device->oclass[NVDEV_ENGINE_DISP ] = &nvf0_disp_oclass;
221#if 0
222 device->oclass[NVDEV_ENGINE_COPY0 ] = &nve0_copy0_oclass;
223 device->oclass[NVDEV_ENGINE_COPY1 ] = &nve0_copy1_oclass;
224 device->oclass[NVDEV_ENGINE_COPY2 ] = &nve0_copy2_oclass;
225 device->oclass[NVDEV_ENGINE_BSP ] = &nve0_bsp_oclass;
226 device->oclass[NVDEV_ENGINE_VP ] = &nve0_vp_oclass;
227 device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
228#endif
178 break; 229 break;
179 default: 230 default:
180 nv_fatal(device, "unknown Kepler chipset\n"); 231 nv_fatal(device, "unknown Kepler chipset\n");
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/priv.h b/drivers/gpu/drm/nouveau/core/engine/device/priv.h
new file mode 100644
index 000000000000..035fd5b9cfc3
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/device/priv.h
@@ -0,0 +1,8 @@
1#ifndef __NVKM_DEVICE_PRIV_H__
2#define __NVKM_DEVICE_PRIV_H__
3
4#include <engine/device.h>
5
6extern struct nouveau_oclass nouveau_control_oclass[];
7
8#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/dport.c b/drivers/gpu/drm/nouveau/core/engine/disp/dport.c
index 054d9cff4f53..1bd4c63369c1 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/dport.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/dport.c
@@ -70,17 +70,10 @@ dp_set_link_config(struct dp_state *dp)
70 }; 70 };
71 u32 lnkcmp; 71 u32 lnkcmp;
72 u8 sink[2]; 72 u8 sink[2];
73 int ret;
73 74
74 DBG("%d lanes at %d KB/s\n", dp->link_nr, dp->link_bw); 75 DBG("%d lanes at %d KB/s\n", dp->link_nr, dp->link_bw);
75 76
76 /* set desired link configuration on the sink */
77 sink[0] = dp->link_bw / 27000;
78 sink[1] = dp->link_nr;
79 if (dp->dpcd[DPCD_RC02] & DPCD_RC02_ENHANCED_FRAME_CAP)
80 sink[1] |= DPCD_LC01_ENHANCED_FRAME_EN;
81
82 nv_wraux(dp->aux, DPCD_LC00, sink, 2);
83
84 /* set desired link configuration on the source */ 77 /* set desired link configuration on the source */
85 if ((lnkcmp = dp->info.lnkcmp)) { 78 if ((lnkcmp = dp->info.lnkcmp)) {
86 if (dp->version < 0x30) { 79 if (dp->version < 0x30) {
@@ -96,10 +89,22 @@ dp_set_link_config(struct dp_state *dp)
96 nvbios_exec(&init); 89 nvbios_exec(&init);
97 } 90 }
98 91
99 return dp->func->lnk_ctl(dp->disp, dp->outp, dp->head, 92 ret = dp->func->lnk_ctl(dp->disp, dp->outp, dp->head,
100 dp->link_nr, dp->link_bw / 27000, 93 dp->link_nr, dp->link_bw / 27000,
101 dp->dpcd[DPCD_RC02] & 94 dp->dpcd[DPCD_RC02] &
102 DPCD_RC02_ENHANCED_FRAME_CAP); 95 DPCD_RC02_ENHANCED_FRAME_CAP);
96 if (ret) {
97 ERR("lnk_ctl failed with %d\n", ret);
98 return ret;
99 }
100
101 /* set desired link configuration on the sink */
102 sink[0] = dp->link_bw / 27000;
103 sink[1] = dp->link_nr;
104 if (dp->dpcd[DPCD_RC02] & DPCD_RC02_ENHANCED_FRAME_CAP)
105 sink[1] |= DPCD_LC01_ENHANCED_FRAME_EN;
106
107 return nv_wraux(dp->aux, DPCD_LC00, sink, 2);
103} 108}
104 109
105static void 110static void
@@ -294,8 +299,17 @@ nouveau_dp_train(struct nouveau_disp *disp, const struct nouveau_dp_func *func,
294 299
295 ret = nv_rdaux(dp->aux, 0x00000, dp->dpcd, sizeof(dp->dpcd)); 300 ret = nv_rdaux(dp->aux, 0x00000, dp->dpcd, sizeof(dp->dpcd));
296 if (ret) { 301 if (ret) {
302 /* it's possible the display has been unplugged before we
303 * get here. we still need to execute the full set of
304 * vbios scripts, and program the OR at a high enough
305 * frequency to satisfy the target mode. failure to do
306 * so results at best in an UPDATE hanging, and at worst
307 * with PDISP running away to join the circus.
308 */
309 dp->dpcd[1] = link_bw[0] / 27000;
310 dp->dpcd[2] = 4;
311 dp->dpcd[3] = 0x00;
297 ERR("failed to read DPCD\n"); 312 ERR("failed to read DPCD\n");
298 return ret;
299 } 313 }
300 314
301 /* adjust required bandwidth for 8B/10B coding overhead */ 315 /* adjust required bandwidth for 8B/10B coding overhead */
@@ -308,7 +322,7 @@ nouveau_dp_train(struct nouveau_disp *disp, const struct nouveau_dp_func *func,
308 while (*link_bw > (dp->dpcd[1] * 27000)) 322 while (*link_bw > (dp->dpcd[1] * 27000))
309 link_bw++; 323 link_bw++;
310 324
311 while (link_bw[0]) { 325 while ((ret = -EIO) && link_bw[0]) {
312 /* find minimum required lane count at this link rate */ 326 /* find minimum required lane count at this link rate */
313 dp->link_nr = dp->dpcd[2] & DPCD_RC02_MAX_LANE_COUNT; 327 dp->link_nr = dp->dpcd[2] & DPCD_RC02_MAX_LANE_COUNT;
314 while ((dp->link_nr >> 1) * link_bw[0] > datarate) 328 while ((dp->link_nr >> 1) * link_bw[0] > datarate)
@@ -328,8 +342,10 @@ nouveau_dp_train(struct nouveau_disp *disp, const struct nouveau_dp_func *func,
328 !dp_link_train_eq(dp)) 342 !dp_link_train_eq(dp))
329 break; 343 break;
330 } else 344 } else
331 if (ret >= 1) { 345 if (ret) {
332 /* dp_set_link_config() handled training */ 346 /* dp_set_link_config() handled training, or
347 * we failed to communicate with the sink.
348 */
333 break; 349 break;
334 } 350 }
335 351
@@ -339,8 +355,10 @@ nouveau_dp_train(struct nouveau_disp *disp, const struct nouveau_dp_func *func,
339 355
340 /* finish link training */ 356 /* finish link training */
341 dp_set_training_pattern(dp, 0); 357 dp_set_training_pattern(dp, 0);
358 if (ret < 0)
359 ERR("link training failed\n");
342 360
343 /* execute post-train script from vbios */ 361 /* execute post-train script from vbios */
344 dp_link_train_fini(dp); 362 dp_link_train_fini(dp);
345 return true; 363 return (ret < 0) ? false : true;
346} 364}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv04.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv04.c
index 05e903f08a36..a0bc8a89b699 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv04.c
@@ -59,6 +59,7 @@ nv04_disp_intr(struct nouveau_subdev *subdev)
59 struct nv04_disp_priv *priv = (void *)subdev; 59 struct nv04_disp_priv *priv = (void *)subdev;
60 u32 crtc0 = nv_rd32(priv, 0x600100); 60 u32 crtc0 = nv_rd32(priv, 0x600100);
61 u32 crtc1 = nv_rd32(priv, 0x602100); 61 u32 crtc1 = nv_rd32(priv, 0x602100);
62 u32 pvideo;
62 63
63 if (crtc0 & 0x00000001) { 64 if (crtc0 & 0x00000001) {
64 nouveau_event_trigger(priv->base.vblank, 0); 65 nouveau_event_trigger(priv->base.vblank, 0);
@@ -69,6 +70,14 @@ nv04_disp_intr(struct nouveau_subdev *subdev)
69 nouveau_event_trigger(priv->base.vblank, 1); 70 nouveau_event_trigger(priv->base.vblank, 1);
70 nv_wr32(priv, 0x602100, 0x00000001); 71 nv_wr32(priv, 0x602100, 0x00000001);
71 } 72 }
73
74 if (nv_device(priv)->chipset >= 0x10 &&
75 nv_device(priv)->chipset <= 0x40) {
76 pvideo = nv_rd32(priv, 0x8100);
77 if (pvideo & ~0x11)
78 nv_info(priv, "PVIDEO intr: %08x\n", pvideo);
79 nv_wr32(priv, 0x8100, pvideo);
80 }
72} 81}
73 82
74static int 83static int
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
index 52dd7a1db729..378a015091d2 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
@@ -541,6 +541,15 @@ nvd0_disp_base_init(struct nouveau_object *object)
541 nv_wr32(priv, 0x6100a0, 0x00000000); 541 nv_wr32(priv, 0x6100a0, 0x00000000);
542 nv_wr32(priv, 0x6100b0, 0x00000307); 542 nv_wr32(priv, 0x6100b0, 0x00000307);
543 543
544 /* disable underflow reporting, preventing an intermittent issue
545 * on some nve4 boards where the production vbios left this
546 * setting enabled by default.
547 *
548 * ftp://download.nvidia.com/open-gpu-doc/gk104-disable-underflow-reporting/1/gk104-disable-underflow-reporting.txt
549 */
550 for (i = 0; i < priv->head.nr; i++)
551 nv_mask(priv, 0x616308 + (i * 0x800), 0x00000111, 0x00000010);
552
544 return 0; 553 return 0;
545} 554}
546 555
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c b/drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c
index 7ec4ee83fb64..eea3ef59693d 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c
@@ -97,8 +97,9 @@ nv94_sor_dp_drv_ctl(struct nouveau_disp *disp, struct dcb_output *outp,
97{ 97{
98 struct nouveau_bios *bios = nouveau_bios(disp); 98 struct nouveau_bios *bios = nouveau_bios(disp);
99 struct nv50_disp_priv *priv = (void *)disp; 99 struct nv50_disp_priv *priv = (void *)disp;
100 const u32 shift = nv94_sor_dp_lane_map(priv, lane);
100 const u32 loff = nv94_sor_loff(outp); 101 const u32 loff = nv94_sor_loff(outp);
101 u32 addr, shift = nv94_sor_dp_lane_map(priv, lane); 102 u32 addr, data[3];
102 u8 ver, hdr, cnt, len; 103 u8 ver, hdr, cnt, len;
103 struct nvbios_dpout info; 104 struct nvbios_dpout info;
104 struct nvbios_dpcfg ocfg; 105 struct nvbios_dpcfg ocfg;
@@ -113,9 +114,12 @@ nv94_sor_dp_drv_ctl(struct nouveau_disp *disp, struct dcb_output *outp,
113 if (!addr) 114 if (!addr)
114 return -EINVAL; 115 return -EINVAL;
115 116
116 nv_mask(priv, 0x61c118 + loff, 0x000000ff << shift, ocfg.drv << shift); 117 data[0] = nv_rd32(priv, 0x61c118 + loff) & ~(0x000000ff << shift);
117 nv_mask(priv, 0x61c120 + loff, 0x000000ff << shift, ocfg.pre << shift); 118 data[1] = nv_rd32(priv, 0x61c120 + loff) & ~(0x000000ff << shift);
118 nv_mask(priv, 0x61c130 + loff, 0x0000ff00, ocfg.unk << 8); 119 data[2] = nv_rd32(priv, 0x61c130 + loff) & ~(0x0000ff00);
120 nv_wr32(priv, 0x61c118 + loff, data[0] | (ocfg.drv << shift));
121 nv_wr32(priv, 0x61c120 + loff, data[1] | (ocfg.pre << shift));
122 nv_wr32(priv, 0x61c130 + loff, data[2] | (ocfg.unk << 8));
119 return 0; 123 return 0;
120} 124}
121 125
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c
index 9e1d435d7282..d2df572f16a3 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c
@@ -93,8 +93,9 @@ nvd0_sor_dp_drv_ctl(struct nouveau_disp *disp, struct dcb_output *outp,
93{ 93{
94 struct nouveau_bios *bios = nouveau_bios(disp); 94 struct nouveau_bios *bios = nouveau_bios(disp);
95 struct nv50_disp_priv *priv = (void *)disp; 95 struct nv50_disp_priv *priv = (void *)disp;
96 const u32 shift = nvd0_sor_dp_lane_map(priv, lane);
96 const u32 loff = nvd0_sor_loff(outp); 97 const u32 loff = nvd0_sor_loff(outp);
97 u32 addr, shift = nvd0_sor_dp_lane_map(priv, lane); 98 u32 addr, data[3];
98 u8 ver, hdr, cnt, len; 99 u8 ver, hdr, cnt, len;
99 struct nvbios_dpout info; 100 struct nvbios_dpout info;
100 struct nvbios_dpcfg ocfg; 101 struct nvbios_dpcfg ocfg;
@@ -109,9 +110,12 @@ nvd0_sor_dp_drv_ctl(struct nouveau_disp *disp, struct dcb_output *outp,
109 if (!addr) 110 if (!addr)
110 return -EINVAL; 111 return -EINVAL;
111 112
112 nv_mask(priv, 0x61c118 + loff, 0x000000ff << shift, ocfg.drv << shift); 113 data[0] = nv_rd32(priv, 0x61c118 + loff) & ~(0x000000ff << shift);
113 nv_mask(priv, 0x61c120 + loff, 0x000000ff << shift, ocfg.pre << shift); 114 data[1] = nv_rd32(priv, 0x61c120 + loff) & ~(0x000000ff << shift);
114 nv_mask(priv, 0x61c130 + loff, 0x0000ff00, ocfg.unk << 8); 115 data[2] = nv_rd32(priv, 0x61c130 + loff) & ~(0x0000ff00);
116 nv_wr32(priv, 0x61c118 + loff, data[0] | (ocfg.drv << shift));
117 nv_wr32(priv, 0x61c120 + loff, data[1] | (ocfg.pre << shift));
118 nv_wr32(priv, 0x61c130 + loff, data[2] | (ocfg.unk << 8));
115 nv_mask(priv, 0x61c13c + loff, 0x00000000, 0x00000000); 119 nv_mask(priv, 0x61c13c + loff, 0x00000000, 0x00000000);
116 return 0; 120 return 0;
117} 121}
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c
index f877bd524a92..54f26cc801c7 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c
@@ -632,8 +632,8 @@ nv04_fifo_init(struct nouveau_object *object)
632 return 0; 632 return 0;
633} 633}
634 634
635struct nouveau_oclass 635struct nouveau_oclass *
636nv04_fifo_oclass = { 636nv04_fifo_oclass = &(struct nouveau_oclass) {
637 .handle = NV_ENGINE(FIFO, 0x04), 637 .handle = NV_ENGINE(FIFO, 0x04),
638 .ofuncs = &(struct nouveau_ofuncs) { 638 .ofuncs = &(struct nouveau_ofuncs) {
639 .ctor = nv04_fifo_ctor, 639 .ctor = nv04_fifo_ctor,
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c
index 2c927c1d173b..571a22aa1ae5 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c
@@ -159,8 +159,8 @@ nv10_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
159 return 0; 159 return 0;
160} 160}
161 161
162struct nouveau_oclass 162struct nouveau_oclass *
163nv10_fifo_oclass = { 163nv10_fifo_oclass = &(struct nouveau_oclass) {
164 .handle = NV_ENGINE(FIFO, 0x10), 164 .handle = NV_ENGINE(FIFO, 0x10),
165 .ofuncs = &(struct nouveau_ofuncs) { 165 .ofuncs = &(struct nouveau_ofuncs) {
166 .ctor = nv10_fifo_ctor, 166 .ctor = nv10_fifo_ctor,
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c
index a9cb51d38c57..f25760209316 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c
@@ -196,8 +196,8 @@ nv17_fifo_init(struct nouveau_object *object)
196 return 0; 196 return 0;
197} 197}
198 198
199struct nouveau_oclass 199struct nouveau_oclass *
200nv17_fifo_oclass = { 200nv17_fifo_oclass = &(struct nouveau_oclass) {
201 .handle = NV_ENGINE(FIFO, 0x17), 201 .handle = NV_ENGINE(FIFO, 0x17),
202 .ofuncs = &(struct nouveau_ofuncs) { 202 .ofuncs = &(struct nouveau_ofuncs) {
203 .ctor = nv17_fifo_ctor, 203 .ctor = nv17_fifo_ctor,
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c
index 5c7433d5069f..343487ed2238 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c
@@ -337,8 +337,8 @@ nv40_fifo_init(struct nouveau_object *object)
337 return 0; 337 return 0;
338} 338}
339 339
340struct nouveau_oclass 340struct nouveau_oclass *
341nv40_fifo_oclass = { 341nv40_fifo_oclass = &(struct nouveau_oclass) {
342 .handle = NV_ENGINE(FIFO, 0x40), 342 .handle = NV_ENGINE(FIFO, 0x40),
343 .ofuncs = &(struct nouveau_ofuncs) { 343 .ofuncs = &(struct nouveau_ofuncs) {
344 .ctor = nv40_fifo_ctor, 344 .ctor = nv40_fifo_ctor,
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
index 7e5dff51d3c5..5f555788121c 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
@@ -502,8 +502,8 @@ nv50_fifo_init(struct nouveau_object *object)
502 return 0; 502 return 0;
503} 503}
504 504
505struct nouveau_oclass 505struct nouveau_oclass *
506nv50_fifo_oclass = { 506nv50_fifo_oclass = &(struct nouveau_oclass) {
507 .handle = NV_ENGINE(FIFO, 0x50), 507 .handle = NV_ENGINE(FIFO, 0x50),
508 .ofuncs = &(struct nouveau_ofuncs) { 508 .ofuncs = &(struct nouveau_ofuncs) {
509 .ctor = nv50_fifo_ctor, 509 .ctor = nv50_fifo_ctor,
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
index 91a87cd7195a..0908dc834c84 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
@@ -144,7 +144,7 @@ nv84_fifo_object_attach(struct nouveau_object *parent,
144 case NVDEV_ENGINE_COPY0 : context |= 0x00300000; break; 144 case NVDEV_ENGINE_COPY0 : context |= 0x00300000; break;
145 case NVDEV_ENGINE_VP : context |= 0x00400000; break; 145 case NVDEV_ENGINE_VP : context |= 0x00400000; break;
146 case NVDEV_ENGINE_CRYPT : 146 case NVDEV_ENGINE_CRYPT :
147 case NVDEV_ENGINE_UNK1C1: context |= 0x00500000; break; 147 case NVDEV_ENGINE_VIC : context |= 0x00500000; break;
148 case NVDEV_ENGINE_BSP : context |= 0x00600000; break; 148 case NVDEV_ENGINE_BSP : context |= 0x00600000; break;
149 default: 149 default:
150 return -EINVAL; 150 return -EINVAL;
@@ -180,7 +180,7 @@ nv84_fifo_chan_ctor_dma(struct nouveau_object *parent,
180 (1ULL << NVDEV_ENGINE_BSP) | 180 (1ULL << NVDEV_ENGINE_BSP) |
181 (1ULL << NVDEV_ENGINE_PPP) | 181 (1ULL << NVDEV_ENGINE_PPP) |
182 (1ULL << NVDEV_ENGINE_COPY0) | 182 (1ULL << NVDEV_ENGINE_COPY0) |
183 (1ULL << NVDEV_ENGINE_UNK1C1), &chan); 183 (1ULL << NVDEV_ENGINE_VIC), &chan);
184 *pobject = nv_object(chan); 184 *pobject = nv_object(chan);
185 if (ret) 185 if (ret)
186 return ret; 186 return ret;
@@ -243,7 +243,7 @@ nv84_fifo_chan_ctor_ind(struct nouveau_object *parent,
243 (1ULL << NVDEV_ENGINE_BSP) | 243 (1ULL << NVDEV_ENGINE_BSP) |
244 (1ULL << NVDEV_ENGINE_PPP) | 244 (1ULL << NVDEV_ENGINE_PPP) |
245 (1ULL << NVDEV_ENGINE_COPY0) | 245 (1ULL << NVDEV_ENGINE_COPY0) |
246 (1ULL << NVDEV_ENGINE_UNK1C1), &chan); 246 (1ULL << NVDEV_ENGINE_VIC), &chan);
247 *pobject = nv_object(chan); 247 *pobject = nv_object(chan);
248 if (ret) 248 if (ret)
249 return ret; 249 return ret;
@@ -435,8 +435,8 @@ nv84_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
435 return 0; 435 return 0;
436} 436}
437 437
438struct nouveau_oclass 438struct nouveau_oclass *
439nv84_fifo_oclass = { 439nv84_fifo_oclass = &(struct nouveau_oclass) {
440 .handle = NV_ENGINE(FIFO, 0x84), 440 .handle = NV_ENGINE(FIFO, 0x84),
441 .ofuncs = &(struct nouveau_ofuncs) { 441 .ofuncs = &(struct nouveau_ofuncs) {
442 .ctor = nv84_fifo_ctor, 442 .ctor = nv84_fifo_ctor,
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
index ce92f289e751..9ac94d4e5646 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
@@ -494,13 +494,6 @@ nvc0_fifo_isr_subfifo_intr(struct nvc0_fifo_priv *priv, int unit)
494 u32 mthd = (addr & 0x00003ffc); 494 u32 mthd = (addr & 0x00003ffc);
495 u32 show = stat; 495 u32 show = stat;
496 496
497 if (stat & 0x00200000) {
498 if (mthd == 0x0054) {
499 if (!nvc0_fifo_swmthd(priv, chid, 0x0500, 0x00000000))
500 show &= ~0x00200000;
501 }
502 }
503
504 if (stat & 0x00800000) { 497 if (stat & 0x00800000) {
505 if (!nvc0_fifo_swmthd(priv, chid, mthd, data)) 498 if (!nvc0_fifo_swmthd(priv, chid, mthd, data))
506 show &= ~0x00800000; 499 show &= ~0x00800000;
@@ -720,8 +713,8 @@ nvc0_fifo_init(struct nouveau_object *object)
720 return 0; 713 return 0;
721} 714}
722 715
723struct nouveau_oclass 716struct nouveau_oclass *
724nvc0_fifo_oclass = { 717nvc0_fifo_oclass = &(struct nouveau_oclass) {
725 .handle = NV_ENGINE(FIFO, 0xc0), 718 .handle = NV_ENGINE(FIFO, 0xc0),
726 .ofuncs = &(struct nouveau_ofuncs) { 719 .ofuncs = &(struct nouveau_ofuncs) {
727 .ctor = nvc0_fifo_ctor, 720 .ctor = nvc0_fifo_ctor,
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
index 8e8121abe31b..04f412922d2d 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
@@ -481,13 +481,6 @@ nve0_fifo_isr_subfifo_intr(struct nve0_fifo_priv *priv, int unit)
481 u32 mthd = (addr & 0x00003ffc); 481 u32 mthd = (addr & 0x00003ffc);
482 u32 show = stat; 482 u32 show = stat;
483 483
484 if (stat & 0x00200000) {
485 if (mthd == 0x0054) {
486 if (!nve0_fifo_swmthd(priv, chid, 0x0500, 0x00000000))
487 show &= ~0x00200000;
488 }
489 }
490
491 if (stat & 0x00800000) { 484 if (stat & 0x00800000) {
492 if (!nve0_fifo_swmthd(priv, chid, mthd, data)) 485 if (!nve0_fifo_swmthd(priv, chid, mthd, data))
493 show &= ~0x00800000; 486 show &= ~0x00800000;
@@ -675,8 +668,8 @@ nve0_fifo_init(struct nouveau_object *object)
675 return 0; 668 return 0;
676} 669}
677 670
678struct nouveau_oclass 671struct nouveau_oclass *
679nve0_fifo_oclass = { 672nve0_fifo_oclass = &(struct nouveau_oclass) {
680 .handle = NV_ENGINE(FIFO, 0xe0), 673 .handle = NV_ENGINE(FIFO, 0xe0),
681 .ofuncs = &(struct nouveau_ofuncs) { 674 .ofuncs = &(struct nouveau_ofuncs) {
682 .ctor = nve0_fifo_ctor, 675 .ctor = nve0_fifo_ctor,
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.c
index 64dca260912f..fe67415c3e17 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.c
@@ -1039,7 +1039,7 @@ nvc0_grctx_generate_r406800(struct nvc0_graph_priv *priv)
1039 } while (!tpcnr[gpc]); 1039 } while (!tpcnr[gpc]);
1040 tpc = priv->tpc_nr[gpc] - tpcnr[gpc]--; 1040 tpc = priv->tpc_nr[gpc] - tpcnr[gpc]--;
1041 1041
1042 tpc_set |= 1 << ((gpc * 8) + tpc); 1042 tpc_set |= 1ULL << ((gpc * 8) + tpc);
1043 } 1043 }
1044 1044
1045 nv_wr32(priv, 0x406800 + (i * 0x20), lower_32_bits(tpc_set)); 1045 nv_wr32(priv, 0x406800 + (i * 0x20), lower_32_bits(tpc_set));
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc1.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc1.c
index e5be3ee7f172..71b4283f7fad 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc1.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc1.c
@@ -587,6 +587,7 @@ nvc1_grctx_init_unk58xx[] = {
587 { 0x405870, 4, 0x04, 0x00000001 }, 587 { 0x405870, 4, 0x04, 0x00000001 },
588 { 0x405a00, 2, 0x04, 0x00000000 }, 588 { 0x405a00, 2, 0x04, 0x00000000 },
589 { 0x405a18, 1, 0x04, 0x00000000 }, 589 { 0x405a18, 1, 0x04, 0x00000000 },
590 {}
590}; 591};
591 592
592static struct nvc0_graph_init 593static struct nvc0_graph_init
@@ -598,6 +599,7 @@ nvc1_grctx_init_rop[] = {
598 { 0x408904, 1, 0x04, 0x62000001 }, 599 { 0x408904, 1, 0x04, 0x62000001 },
599 { 0x408908, 1, 0x04, 0x00c80929 }, 600 { 0x408908, 1, 0x04, 0x00c80929 },
600 { 0x408980, 1, 0x04, 0x0000011d }, 601 { 0x408980, 1, 0x04, 0x0000011d },
602 {}
601}; 603};
602 604
603static struct nvc0_graph_init 605static struct nvc0_graph_init
@@ -671,6 +673,7 @@ nvc1_grctx_init_gpc_0[] = {
671 { 0x419000, 1, 0x04, 0x00000780 }, 673 { 0x419000, 1, 0x04, 0x00000780 },
672 { 0x419004, 2, 0x04, 0x00000000 }, 674 { 0x419004, 2, 0x04, 0x00000000 },
673 { 0x419014, 1, 0x04, 0x00000004 }, 675 { 0x419014, 1, 0x04, 0x00000004 },
676 {}
674}; 677};
675 678
676static struct nvc0_graph_init 679static struct nvc0_graph_init
@@ -717,6 +720,7 @@ nvc1_grctx_init_tpc[] = {
717 { 0x419e98, 1, 0x04, 0x00000000 }, 720 { 0x419e98, 1, 0x04, 0x00000000 },
718 { 0x419ee0, 1, 0x04, 0x00011110 }, 721 { 0x419ee0, 1, 0x04, 0x00011110 },
719 { 0x419f30, 11, 0x04, 0x00000000 }, 722 { 0x419f30, 11, 0x04, 0x00000000 },
723 {}
720}; 724};
721 725
722void 726void
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd7.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd7.c
index 438e78410808..c4740d528532 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd7.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd7.c
@@ -258,6 +258,7 @@ nvd7_grctx_init_hub[] = {
258 nvc0_grctx_init_unk78xx, 258 nvc0_grctx_init_unk78xx,
259 nvc0_grctx_init_unk80xx, 259 nvc0_grctx_init_unk80xx,
260 nvd9_grctx_init_rop, 260 nvd9_grctx_init_rop,
261 NULL
261}; 262};
262 263
263struct nvc0_graph_init * 264struct nvc0_graph_init *
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd9.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd9.c
index 818a4751df46..a1102cbf2fdc 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd9.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd9.c
@@ -466,6 +466,7 @@ nvd9_grctx_init_hub[] = {
466 nvc0_grctx_init_unk78xx, 466 nvc0_grctx_init_unk78xx,
467 nvc0_grctx_init_unk80xx, 467 nvc0_grctx_init_unk80xx,
468 nvd9_grctx_init_rop, 468 nvd9_grctx_init_rop,
469 NULL
469}; 470};
470 471
471struct nvc0_graph_init * 472struct nvc0_graph_init *
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv10.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv10.c
index 23c143aaa556..4532f7e5618c 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv10.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv10.c
@@ -945,7 +945,8 @@ nv10_graph_load_context(struct nv10_graph_chan *chan, int chid)
945 for (i = 0; i < ARRAY_SIZE(nv10_graph_ctx_regs); i++) 945 for (i = 0; i < ARRAY_SIZE(nv10_graph_ctx_regs); i++)
946 nv_wr32(priv, nv10_graph_ctx_regs[i], chan->nv10[i]); 946 nv_wr32(priv, nv10_graph_ctx_regs[i], chan->nv10[i]);
947 947
948 if (nv_device(priv)->chipset >= 0x17) { 948 if (nv_device(priv)->card_type >= NV_11 &&
949 nv_device(priv)->chipset >= 0x17) {
949 for (i = 0; i < ARRAY_SIZE(nv17_graph_ctx_regs); i++) 950 for (i = 0; i < ARRAY_SIZE(nv17_graph_ctx_regs); i++)
950 nv_wr32(priv, nv17_graph_ctx_regs[i], chan->nv17[i]); 951 nv_wr32(priv, nv17_graph_ctx_regs[i], chan->nv17[i]);
951 } 952 }
@@ -970,7 +971,8 @@ nv10_graph_unload_context(struct nv10_graph_chan *chan)
970 for (i = 0; i < ARRAY_SIZE(nv10_graph_ctx_regs); i++) 971 for (i = 0; i < ARRAY_SIZE(nv10_graph_ctx_regs); i++)
971 chan->nv10[i] = nv_rd32(priv, nv10_graph_ctx_regs[i]); 972 chan->nv10[i] = nv_rd32(priv, nv10_graph_ctx_regs[i]);
972 973
973 if (nv_device(priv)->chipset >= 0x17) { 974 if (nv_device(priv)->card_type >= NV_11 &&
975 nv_device(priv)->chipset >= 0x17) {
974 for (i = 0; i < ARRAY_SIZE(nv17_graph_ctx_regs); i++) 976 for (i = 0; i < ARRAY_SIZE(nv17_graph_ctx_regs); i++)
975 chan->nv17[i] = nv_rd32(priv, nv17_graph_ctx_regs[i]); 977 chan->nv17[i] = nv_rd32(priv, nv17_graph_ctx_regs[i]);
976 } 978 }
@@ -1052,7 +1054,8 @@ nv10_graph_context_ctor(struct nouveau_object *parent,
1052 NV_WRITE_CTX(0x00400e14, 0x00001000); 1054 NV_WRITE_CTX(0x00400e14, 0x00001000);
1053 NV_WRITE_CTX(0x00400e30, 0x00080008); 1055 NV_WRITE_CTX(0x00400e30, 0x00080008);
1054 NV_WRITE_CTX(0x00400e34, 0x00080008); 1056 NV_WRITE_CTX(0x00400e34, 0x00080008);
1055 if (nv_device(priv)->chipset >= 0x17) { 1057 if (nv_device(priv)->card_type >= NV_11 &&
1058 nv_device(priv)->chipset >= 0x17) {
1056 /* is it really needed ??? */ 1059 /* is it really needed ??? */
1057 NV17_WRITE_CTX(NV10_PGRAPH_DEBUG_4, 1060 NV17_WRITE_CTX(NV10_PGRAPH_DEBUG_4,
1058 nv_rd32(priv, NV10_PGRAPH_DEBUG_4)); 1061 nv_rd32(priv, NV10_PGRAPH_DEBUG_4));
@@ -1231,7 +1234,7 @@ nv10_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
1231 nv_engine(priv)->sclass = nv10_graph_sclass; 1234 nv_engine(priv)->sclass = nv10_graph_sclass;
1232 else 1235 else
1233 if (nv_device(priv)->chipset < 0x17 || 1236 if (nv_device(priv)->chipset < 0x17 ||
1234 nv_device(priv)->chipset == 0x1a) 1237 nv_device(priv)->card_type < NV_11)
1235 nv_engine(priv)->sclass = nv15_graph_sclass; 1238 nv_engine(priv)->sclass = nv15_graph_sclass;
1236 else 1239 else
1237 nv_engine(priv)->sclass = nv17_graph_sclass; 1240 nv_engine(priv)->sclass = nv17_graph_sclass;
@@ -1270,7 +1273,8 @@ nv10_graph_init(struct nouveau_object *object)
1270 nv_wr32(priv, NV04_PGRAPH_DEBUG_2, 0x25f92ad9); 1273 nv_wr32(priv, NV04_PGRAPH_DEBUG_2, 0x25f92ad9);
1271 nv_wr32(priv, NV04_PGRAPH_DEBUG_3, 0x55DE0830 | (1 << 29) | (1 << 31)); 1274 nv_wr32(priv, NV04_PGRAPH_DEBUG_3, 0x55DE0830 | (1 << 29) | (1 << 31));
1272 1275
1273 if (nv_device(priv)->chipset >= 0x17) { 1276 if (nv_device(priv)->card_type >= NV_11 &&
1277 nv_device(priv)->chipset >= 0x17) {
1274 nv_wr32(priv, NV10_PGRAPH_DEBUG_4, 0x1f000000); 1278 nv_wr32(priv, NV10_PGRAPH_DEBUG_4, 0x1f000000);
1275 nv_wr32(priv, 0x400a10, 0x03ff3fb6); 1279 nv_wr32(priv, 0x400a10, 0x03ff3fb6);
1276 nv_wr32(priv, 0x400838, 0x002f8684); 1280 nv_wr32(priv, 0x400838, 0x002f8684);
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
index 3f4f35cc3848..434bb4b0fa2e 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
@@ -1138,7 +1138,7 @@ nvc0_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
1138 if (ret) 1138 if (ret)
1139 return ret; 1139 return ret;
1140 1140
1141 nv_subdev(priv)->unit = 0x18001000; 1141 nv_subdev(priv)->unit = 0x08001000;
1142 nv_subdev(priv)->intr = nvc0_graph_intr; 1142 nv_subdev(priv)->intr = nvc0_graph_intr;
1143 1143
1144 priv->base.units = nvc0_graph_units; 1144 priv->base.units = nvc0_graph_units;
diff --git a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c
index c19004301309..7eb6d94c84e2 100644
--- a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c
+++ b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c
@@ -34,16 +34,7 @@
34 34
35#include <engine/fifo.h> 35#include <engine/fifo.h>
36#include <engine/mpeg.h> 36#include <engine/mpeg.h>
37#include <engine/graph/nv40.h> 37#include <engine/mpeg/nv31.h>
38
39struct nv31_mpeg_priv {
40 struct nouveau_mpeg base;
41 atomic_t refcount;
42};
43
44struct nv31_mpeg_chan {
45 struct nouveau_object base;
46};
47 38
48/******************************************************************************* 39/*******************************************************************************
49 * MPEG object classes 40 * MPEG object classes
@@ -89,18 +80,18 @@ nv31_mpeg_mthd_dma(struct nouveau_object *object, u32 mthd, void *arg, u32 len)
89 80
90 if (mthd == 0x0190) { 81 if (mthd == 0x0190) {
91 /* DMA_CMD */ 82 /* DMA_CMD */
92 nv_mask(priv, 0x00b300, 0x00030000, (dma0 & 0x00030000)); 83 nv_mask(priv, 0x00b300, 0x00010000, (dma0 & 0x00030000) ? 0x00010000 : 0);
93 nv_wr32(priv, 0x00b334, base); 84 nv_wr32(priv, 0x00b334, base);
94 nv_wr32(priv, 0x00b324, size); 85 nv_wr32(priv, 0x00b324, size);
95 } else 86 } else
96 if (mthd == 0x01a0) { 87 if (mthd == 0x01a0) {
97 /* DMA_DATA */ 88 /* DMA_DATA */
98 nv_mask(priv, 0x00b300, 0x000c0000, (dma0 & 0x00030000) << 2); 89 nv_mask(priv, 0x00b300, 0x00020000, (dma0 & 0x00030000) ? 0x00020000 : 0);
99 nv_wr32(priv, 0x00b360, base); 90 nv_wr32(priv, 0x00b360, base);
100 nv_wr32(priv, 0x00b364, size); 91 nv_wr32(priv, 0x00b364, size);
101 } else { 92 } else {
102 /* DMA_IMAGE, VRAM only */ 93 /* DMA_IMAGE, VRAM only */
103 if (dma0 & 0x000c0000) 94 if (dma0 & 0x00030000)
104 return -EINVAL; 95 return -EINVAL;
105 96
106 nv_wr32(priv, 0x00b370, base); 97 nv_wr32(priv, 0x00b370, base);
@@ -110,7 +101,7 @@ nv31_mpeg_mthd_dma(struct nouveau_object *object, u32 mthd, void *arg, u32 len)
110 return 0; 101 return 0;
111} 102}
112 103
113static struct nouveau_ofuncs 104struct nouveau_ofuncs
114nv31_mpeg_ofuncs = { 105nv31_mpeg_ofuncs = {
115 .ctor = nv31_mpeg_object_ctor, 106 .ctor = nv31_mpeg_object_ctor,
116 .dtor = _nouveau_gpuobj_dtor, 107 .dtor = _nouveau_gpuobj_dtor,
@@ -146,16 +137,23 @@ nv31_mpeg_context_ctor(struct nouveau_object *parent,
146{ 137{
147 struct nv31_mpeg_priv *priv = (void *)engine; 138 struct nv31_mpeg_priv *priv = (void *)engine;
148 struct nv31_mpeg_chan *chan; 139 struct nv31_mpeg_chan *chan;
140 unsigned long flags;
149 int ret; 141 int ret;
150 142
151 if (!atomic_add_unless(&priv->refcount, 1, 1))
152 return -EBUSY;
153
154 ret = nouveau_object_create(parent, engine, oclass, 0, &chan); 143 ret = nouveau_object_create(parent, engine, oclass, 0, &chan);
155 *pobject = nv_object(chan); 144 *pobject = nv_object(chan);
156 if (ret) 145 if (ret)
157 return ret; 146 return ret;
158 147
148 spin_lock_irqsave(&nv_engine(priv)->lock, flags);
149 if (priv->chan) {
150 spin_unlock_irqrestore(&nv_engine(priv)->lock, flags);
151 nouveau_object_destroy(&chan->base);
152 *pobject = NULL;
153 return -EBUSY;
154 }
155 priv->chan = chan;
156 spin_unlock_irqrestore(&nv_engine(priv)->lock, flags);
159 return 0; 157 return 0;
160} 158}
161 159
@@ -164,11 +162,15 @@ nv31_mpeg_context_dtor(struct nouveau_object *object)
164{ 162{
165 struct nv31_mpeg_priv *priv = (void *)object->engine; 163 struct nv31_mpeg_priv *priv = (void *)object->engine;
166 struct nv31_mpeg_chan *chan = (void *)object; 164 struct nv31_mpeg_chan *chan = (void *)object;
167 atomic_dec(&priv->refcount); 165 unsigned long flags;
166
167 spin_lock_irqsave(&nv_engine(priv)->lock, flags);
168 priv->chan = NULL;
169 spin_unlock_irqrestore(&nv_engine(priv)->lock, flags);
168 nouveau_object_destroy(&chan->base); 170 nouveau_object_destroy(&chan->base);
169} 171}
170 172
171static struct nouveau_oclass 173struct nouveau_oclass
172nv31_mpeg_cclass = { 174nv31_mpeg_cclass = {
173 .handle = NV_ENGCTX(MPEG, 0x31), 175 .handle = NV_ENGCTX(MPEG, 0x31),
174 .ofuncs = &(struct nouveau_ofuncs) { 176 .ofuncs = &(struct nouveau_ofuncs) {
@@ -197,21 +199,19 @@ nv31_mpeg_tile_prog(struct nouveau_engine *engine, int i)
197void 199void
198nv31_mpeg_intr(struct nouveau_subdev *subdev) 200nv31_mpeg_intr(struct nouveau_subdev *subdev)
199{ 201{
202 struct nv31_mpeg_priv *priv = (void *)subdev;
200 struct nouveau_fifo *pfifo = nouveau_fifo(subdev); 203 struct nouveau_fifo *pfifo = nouveau_fifo(subdev);
201 struct nouveau_engine *engine = nv_engine(subdev);
202 struct nouveau_object *engctx;
203 struct nouveau_handle *handle; 204 struct nouveau_handle *handle;
204 struct nv31_mpeg_priv *priv = (void *)subdev; 205 struct nouveau_object *engctx;
205 u32 inst = nv_rd32(priv, 0x00b318) & 0x000fffff;
206 u32 stat = nv_rd32(priv, 0x00b100); 206 u32 stat = nv_rd32(priv, 0x00b100);
207 u32 type = nv_rd32(priv, 0x00b230); 207 u32 type = nv_rd32(priv, 0x00b230);
208 u32 mthd = nv_rd32(priv, 0x00b234); 208 u32 mthd = nv_rd32(priv, 0x00b234);
209 u32 data = nv_rd32(priv, 0x00b238); 209 u32 data = nv_rd32(priv, 0x00b238);
210 u32 show = stat; 210 u32 show = stat;
211 int chid; 211 unsigned long flags;
212 212
213 engctx = nouveau_engctx_get(engine, inst); 213 spin_lock_irqsave(&nv_engine(priv)->lock, flags);
214 chid = pfifo->chid(pfifo, engctx); 214 engctx = nv_object(priv->chan);
215 215
216 if (stat & 0x01000000) { 216 if (stat & 0x01000000) {
217 /* happens on initial binding of the object */ 217 /* happens on initial binding of the object */
@@ -220,7 +220,7 @@ nv31_mpeg_intr(struct nouveau_subdev *subdev)
220 show &= ~0x01000000; 220 show &= ~0x01000000;
221 } 221 }
222 222
223 if (type == 0x00000010) { 223 if (type == 0x00000010 && engctx) {
224 handle = nouveau_handle_get_class(engctx, 0x3174); 224 handle = nouveau_handle_get_class(engctx, 0x3174);
225 if (handle && !nv_call(handle->object, mthd, data)) 225 if (handle && !nv_call(handle->object, mthd, data))
226 show &= ~0x01000000; 226 show &= ~0x01000000;
@@ -232,13 +232,12 @@ nv31_mpeg_intr(struct nouveau_subdev *subdev)
232 nv_wr32(priv, 0x00b230, 0x00000001); 232 nv_wr32(priv, 0x00b230, 0x00000001);
233 233
234 if (show) { 234 if (show) {
235 nv_error(priv, 235 nv_error(priv, "ch %d [%s] 0x%08x 0x%08x 0x%08x 0x%08x\n",
236 "ch %d [0x%08x %s] 0x%08x 0x%08x 0x%08x 0x%08x\n", 236 pfifo->chid(pfifo, engctx),
237 chid, inst << 4, nouveau_client_name(engctx), stat, 237 nouveau_client_name(engctx), stat, type, mthd, data);
238 type, mthd, data);
239 } 238 }
240 239
241 nouveau_engctx_put(engctx); 240 spin_unlock_irqrestore(&nv_engine(priv)->lock, flags);
242} 241}
243 242
244static int 243static int
@@ -284,10 +283,7 @@ nv31_mpeg_init(struct nouveau_object *object)
284 /* PMPEG init */ 283 /* PMPEG init */
285 nv_wr32(priv, 0x00b32c, 0x00000000); 284 nv_wr32(priv, 0x00b32c, 0x00000000);
286 nv_wr32(priv, 0x00b314, 0x00000100); 285 nv_wr32(priv, 0x00b314, 0x00000100);
287 if (nv_device(priv)->chipset >= 0x40 && nv44_graph_class(priv)) 286 nv_wr32(priv, 0x00b220, 0x00000031);
288 nv_wr32(priv, 0x00b220, 0x00000044);
289 else
290 nv_wr32(priv, 0x00b220, 0x00000031);
291 nv_wr32(priv, 0x00b300, 0x02001ec1); 287 nv_wr32(priv, 0x00b300, 0x02001ec1);
292 nv_mask(priv, 0x00b32c, 0x00000001, 0x00000001); 288 nv_mask(priv, 0x00b32c, 0x00000001, 0x00000001);
293 289
diff --git a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.h b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.h
new file mode 100644
index 000000000000..d08629d0b6ad
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.h
@@ -0,0 +1,15 @@
1#ifndef __NV31_MPEG_H__
2#define __NV31_MPEG_H__
3
4#include <engine/mpeg.h>
5
6struct nv31_mpeg_chan {
7 struct nouveau_object base;
8};
9
10struct nv31_mpeg_priv {
11 struct nouveau_mpeg base;
12 struct nv31_mpeg_chan *chan;
13};
14
15#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c
index dd6196072e9c..d4e7ec0ba68c 100644
--- a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c
@@ -31,66 +31,63 @@
31#include <subdev/instmem.h> 31#include <subdev/instmem.h>
32 32
33#include <engine/mpeg.h> 33#include <engine/mpeg.h>
34#include <engine/graph/nv40.h> 34#include <engine/mpeg/nv31.h>
35
36struct nv40_mpeg_priv {
37 struct nouveau_mpeg base;
38};
39
40struct nv40_mpeg_chan {
41 struct nouveau_mpeg_chan base;
42};
43 35
44/******************************************************************************* 36/*******************************************************************************
45 * PMPEG context 37 * MPEG object classes
46 ******************************************************************************/ 38 ******************************************************************************/
47 39
48static int 40static int
49nv40_mpeg_context_ctor(struct nouveau_object *parent, 41nv40_mpeg_mthd_dma(struct nouveau_object *object, u32 mthd, void *arg, u32 len)
50 struct nouveau_object *engine,
51 struct nouveau_oclass *oclass, void *data, u32 size,
52 struct nouveau_object **pobject)
53{ 42{
54 struct nv40_mpeg_chan *chan; 43 struct nouveau_instmem *imem = nouveau_instmem(object);
55 int ret; 44 struct nv31_mpeg_priv *priv = (void *)object->engine;
56 45 u32 inst = *(u32 *)arg << 4;
57 ret = nouveau_mpeg_context_create(parent, engine, oclass, NULL, 46 u32 dma0 = nv_ro32(imem, inst + 0);
58 264 * 4, 16, 47 u32 dma1 = nv_ro32(imem, inst + 4);
59 NVOBJ_FLAG_ZERO_ALLOC, &chan); 48 u32 dma2 = nv_ro32(imem, inst + 8);
60 *pobject = nv_object(chan); 49 u32 base = (dma2 & 0xfffff000) | (dma0 >> 20);
61 if (ret) 50 u32 size = dma1 + 1;
62 return ret; 51
52 /* only allow linear DMA objects */
53 if (!(dma0 & 0x00002000))
54 return -EINVAL;
55
56 if (mthd == 0x0190) {
57 /* DMA_CMD */
58 nv_mask(priv, 0x00b300, 0x00030000, (dma0 & 0x00030000));
59 nv_wr32(priv, 0x00b334, base);
60 nv_wr32(priv, 0x00b324, size);
61 } else
62 if (mthd == 0x01a0) {
63 /* DMA_DATA */
64 nv_mask(priv, 0x00b300, 0x000c0000, (dma0 & 0x00030000) << 2);
65 nv_wr32(priv, 0x00b360, base);
66 nv_wr32(priv, 0x00b364, size);
67 } else {
68 /* DMA_IMAGE, VRAM only */
69 if (dma0 & 0x00030000)
70 return -EINVAL;
71
72 nv_wr32(priv, 0x00b370, base);
73 nv_wr32(priv, 0x00b374, size);
74 }
63 75
64 nv_wo32(&chan->base.base, 0x78, 0x02001ec1);
65 return 0; 76 return 0;
66} 77}
67 78
68static int 79static struct nouveau_omthds
69nv40_mpeg_context_fini(struct nouveau_object *object, bool suspend) 80nv40_mpeg_omthds[] = {
70{ 81 { 0x0190, 0x0190, nv40_mpeg_mthd_dma },
71 82 { 0x01a0, 0x01a0, nv40_mpeg_mthd_dma },
72 struct nv40_mpeg_priv *priv = (void *)object->engine; 83 { 0x01b0, 0x01b0, nv40_mpeg_mthd_dma },
73 struct nv40_mpeg_chan *chan = (void *)object; 84 {}
74 u32 inst = 0x80000000 | nv_gpuobj(chan)->addr >> 4; 85};
75
76 nv_mask(priv, 0x00b32c, 0x00000001, 0x00000000);
77 if (nv_rd32(priv, 0x00b318) == inst)
78 nv_mask(priv, 0x00b318, 0x80000000, 0x00000000);
79 nv_mask(priv, 0x00b32c, 0x00000001, 0x00000001);
80 return 0;
81}
82 86
83static struct nouveau_oclass 87struct nouveau_oclass
84nv40_mpeg_cclass = { 88nv40_mpeg_sclass[] = {
85 .handle = NV_ENGCTX(MPEG, 0x40), 89 { 0x3174, &nv31_mpeg_ofuncs, nv40_mpeg_omthds },
86 .ofuncs = &(struct nouveau_ofuncs) { 90 {}
87 .ctor = nv40_mpeg_context_ctor,
88 .dtor = _nouveau_mpeg_context_dtor,
89 .init = _nouveau_mpeg_context_init,
90 .fini = nv40_mpeg_context_fini,
91 .rd32 = _nouveau_mpeg_context_rd32,
92 .wr32 = _nouveau_mpeg_context_wr32,
93 },
94}; 91};
95 92
96/******************************************************************************* 93/*******************************************************************************
@@ -100,7 +97,7 @@ nv40_mpeg_cclass = {
100static void 97static void
101nv40_mpeg_intr(struct nouveau_subdev *subdev) 98nv40_mpeg_intr(struct nouveau_subdev *subdev)
102{ 99{
103 struct nv40_mpeg_priv *priv = (void *)subdev; 100 struct nv31_mpeg_priv *priv = (void *)subdev;
104 u32 stat; 101 u32 stat;
105 102
106 if ((stat = nv_rd32(priv, 0x00b100))) 103 if ((stat = nv_rd32(priv, 0x00b100)))
@@ -117,7 +114,7 @@ nv40_mpeg_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
117 struct nouveau_oclass *oclass, void *data, u32 size, 114 struct nouveau_oclass *oclass, void *data, u32 size,
118 struct nouveau_object **pobject) 115 struct nouveau_object **pobject)
119{ 116{
120 struct nv40_mpeg_priv *priv; 117 struct nv31_mpeg_priv *priv;
121 int ret; 118 int ret;
122 119
123 ret = nouveau_mpeg_create(parent, engine, oclass, &priv); 120 ret = nouveau_mpeg_create(parent, engine, oclass, &priv);
@@ -127,8 +124,8 @@ nv40_mpeg_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
127 124
128 nv_subdev(priv)->unit = 0x00000002; 125 nv_subdev(priv)->unit = 0x00000002;
129 nv_subdev(priv)->intr = nv40_mpeg_intr; 126 nv_subdev(priv)->intr = nv40_mpeg_intr;
130 nv_engine(priv)->cclass = &nv40_mpeg_cclass; 127 nv_engine(priv)->cclass = &nv31_mpeg_cclass;
131 nv_engine(priv)->sclass = nv31_mpeg_sclass; 128 nv_engine(priv)->sclass = nv40_mpeg_sclass;
132 nv_engine(priv)->tile_prog = nv31_mpeg_tile_prog; 129 nv_engine(priv)->tile_prog = nv31_mpeg_tile_prog;
133 return 0; 130 return 0;
134} 131}
diff --git a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv44.c b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv44.c
new file mode 100644
index 000000000000..3d8c2133e0e8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv44.c
@@ -0,0 +1,194 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/os.h>
26#include <core/class.h>
27#include <core/client.h>
28#include <core/engctx.h>
29#include <core/handle.h>
30
31#include <subdev/fb.h>
32#include <subdev/timer.h>
33#include <subdev/instmem.h>
34
35#include <engine/fifo.h>
36#include <engine/mpeg.h>
37
38struct nv44_mpeg_priv {
39 struct nouveau_mpeg base;
40};
41
42struct nv44_mpeg_chan {
43 struct nouveau_mpeg_chan base;
44};
45
46/*******************************************************************************
47 * PMPEG context
48 ******************************************************************************/
49
50static int
51nv44_mpeg_context_ctor(struct nouveau_object *parent,
52 struct nouveau_object *engine,
53 struct nouveau_oclass *oclass, void *data, u32 size,
54 struct nouveau_object **pobject)
55{
56 struct nv44_mpeg_chan *chan;
57 int ret;
58
59 ret = nouveau_mpeg_context_create(parent, engine, oclass, NULL,
60 264 * 4, 16,
61 NVOBJ_FLAG_ZERO_ALLOC, &chan);
62 *pobject = nv_object(chan);
63 if (ret)
64 return ret;
65
66 nv_wo32(&chan->base.base, 0x78, 0x02001ec1);
67 return 0;
68}
69
70static int
71nv44_mpeg_context_fini(struct nouveau_object *object, bool suspend)
72{
73
74 struct nv44_mpeg_priv *priv = (void *)object->engine;
75 struct nv44_mpeg_chan *chan = (void *)object;
76 u32 inst = 0x80000000 | nv_gpuobj(chan)->addr >> 4;
77
78 nv_mask(priv, 0x00b32c, 0x00000001, 0x00000000);
79 if (nv_rd32(priv, 0x00b318) == inst)
80 nv_mask(priv, 0x00b318, 0x80000000, 0x00000000);
81 nv_mask(priv, 0x00b32c, 0x00000001, 0x00000001);
82 return 0;
83}
84
85static struct nouveau_oclass
86nv44_mpeg_cclass = {
87 .handle = NV_ENGCTX(MPEG, 0x44),
88 .ofuncs = &(struct nouveau_ofuncs) {
89 .ctor = nv44_mpeg_context_ctor,
90 .dtor = _nouveau_mpeg_context_dtor,
91 .init = _nouveau_mpeg_context_init,
92 .fini = nv44_mpeg_context_fini,
93 .rd32 = _nouveau_mpeg_context_rd32,
94 .wr32 = _nouveau_mpeg_context_wr32,
95 },
96};
97
98/*******************************************************************************
99 * PMPEG engine/subdev functions
100 ******************************************************************************/
101
102static void
103nv44_mpeg_intr(struct nouveau_subdev *subdev)
104{
105 struct nouveau_fifo *pfifo = nouveau_fifo(subdev);
106 struct nouveau_engine *engine = nv_engine(subdev);
107 struct nouveau_object *engctx;
108 struct nouveau_handle *handle;
109 struct nv44_mpeg_priv *priv = (void *)subdev;
110 u32 inst = nv_rd32(priv, 0x00b318) & 0x000fffff;
111 u32 stat = nv_rd32(priv, 0x00b100);
112 u32 type = nv_rd32(priv, 0x00b230);
113 u32 mthd = nv_rd32(priv, 0x00b234);
114 u32 data = nv_rd32(priv, 0x00b238);
115 u32 show = stat;
116 int chid;
117
118 engctx = nouveau_engctx_get(engine, inst);
119 chid = pfifo->chid(pfifo, engctx);
120
121 if (stat & 0x01000000) {
122 /* happens on initial binding of the object */
123 if (type == 0x00000020 && mthd == 0x0000) {
124 nv_mask(priv, 0x00b308, 0x00000000, 0x00000000);
125 show &= ~0x01000000;
126 }
127
128 if (type == 0x00000010) {
129 handle = nouveau_handle_get_class(engctx, 0x3174);
130 if (handle && !nv_call(handle->object, mthd, data))
131 show &= ~0x01000000;
132 nouveau_handle_put(handle);
133 }
134 }
135
136 nv_wr32(priv, 0x00b100, stat);
137 nv_wr32(priv, 0x00b230, 0x00000001);
138
139 if (show) {
140 nv_error(priv,
141 "ch %d [0x%08x %s] 0x%08x 0x%08x 0x%08x 0x%08x\n",
142 chid, inst << 4, nouveau_client_name(engctx), stat,
143 type, mthd, data);
144 }
145
146 nouveau_engctx_put(engctx);
147}
148
149static void
150nv44_mpeg_me_intr(struct nouveau_subdev *subdev)
151{
152 struct nv44_mpeg_priv *priv = (void *)subdev;
153 u32 stat;
154
155 if ((stat = nv_rd32(priv, 0x00b100)))
156 nv44_mpeg_intr(subdev);
157
158 if ((stat = nv_rd32(priv, 0x00b800))) {
159 nv_error(priv, "PMSRCH 0x%08x\n", stat);
160 nv_wr32(priv, 0x00b800, stat);
161 }
162}
163
164static int
165nv44_mpeg_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
166 struct nouveau_oclass *oclass, void *data, u32 size,
167 struct nouveau_object **pobject)
168{
169 struct nv44_mpeg_priv *priv;
170 int ret;
171
172 ret = nouveau_mpeg_create(parent, engine, oclass, &priv);
173 *pobject = nv_object(priv);
174 if (ret)
175 return ret;
176
177 nv_subdev(priv)->unit = 0x00000002;
178 nv_subdev(priv)->intr = nv44_mpeg_me_intr;
179 nv_engine(priv)->cclass = &nv44_mpeg_cclass;
180 nv_engine(priv)->sclass = nv40_mpeg_sclass;
181 nv_engine(priv)->tile_prog = nv31_mpeg_tile_prog;
182 return 0;
183}
184
185struct nouveau_oclass
186nv44_mpeg_oclass = {
187 .handle = NV_ENGINE(MPEG, 0x44),
188 .ofuncs = &(struct nouveau_ofuncs) {
189 .ctor = nv44_mpeg_ctor,
190 .dtor = _nouveau_mpeg_dtor,
191 .init = nv31_mpeg_init,
192 .fini = _nouveau_mpeg_fini,
193 },
194};
diff --git a/drivers/gpu/drm/nouveau/core/engine/perfmon/base.c b/drivers/gpu/drm/nouveau/core/engine/perfmon/base.c
new file mode 100644
index 000000000000..e9c5e51943ef
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/perfmon/base.c
@@ -0,0 +1,449 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/option.h>
26#include <core/class.h>
27
28#include <subdev/clock.h>
29
30#include "priv.h"
31
32#define QUAD_MASK 0x0f
33#define QUAD_FREE 0x01
34
35static struct nouveau_perfsig *
36nouveau_perfsig_find_(struct nouveau_perfdom *dom, const char *name, u32 size)
37{
38 char path[64];
39 int i;
40
41 if (name[0] != '/') {
42 for (i = 0; i < dom->signal_nr; i++) {
43 if ( dom->signal[i].name &&
44 !strncmp(name, dom->signal[i].name, size))
45 return &dom->signal[i];
46 }
47 } else {
48 for (i = 0; i < dom->signal_nr; i++) {
49 snprintf(path, sizeof(path), "/%s/%02x", dom->name, i);
50 if (!strncmp(name, path, size))
51 return &dom->signal[i];
52 }
53 }
54
55 return NULL;
56}
57
58struct nouveau_perfsig *
59nouveau_perfsig_find(struct nouveau_perfmon *ppm, const char *name, u32 size,
60 struct nouveau_perfdom **pdom)
61{
62 struct nouveau_perfdom *dom = *pdom;
63 struct nouveau_perfsig *sig;
64
65 if (dom == NULL) {
66 list_for_each_entry(dom, &ppm->domains, head) {
67 sig = nouveau_perfsig_find_(dom, name, size);
68 if (sig) {
69 *pdom = dom;
70 return sig;
71 }
72 }
73
74 return NULL;
75 }
76
77 return nouveau_perfsig_find_(dom, name, size);
78}
79
80struct nouveau_perfctr *
81nouveau_perfsig_wrap(struct nouveau_perfmon *ppm, const char *name,
82 struct nouveau_perfdom **pdom)
83{
84 struct nouveau_perfsig *sig;
85 struct nouveau_perfctr *ctr;
86
87 sig = nouveau_perfsig_find(ppm, name, strlen(name), pdom);
88 if (!sig)
89 return NULL;
90
91 ctr = kzalloc(sizeof(*ctr), GFP_KERNEL);
92 if (ctr) {
93 ctr->signal[0] = sig;
94 ctr->logic_op = 0xaaaa;
95 }
96
97 return ctr;
98}
99
100/*******************************************************************************
101 * Perfmon object classes
102 ******************************************************************************/
103static int
104nouveau_perfctr_query(struct nouveau_object *object, u32 mthd,
105 void *data, u32 size)
106{
107 struct nouveau_device *device = nv_device(object);
108 struct nouveau_perfmon *ppm = (void *)object->engine;
109 struct nouveau_perfdom *dom = NULL, *chk;
110 struct nv_perfctr_query *args = data;
111 const bool all = nouveau_boolopt(device->cfgopt, "NvPmShowAll", false);
112 const bool raw = nouveau_boolopt(device->cfgopt, "NvPmUnnamed", all);
113 const char *name;
114 int tmp = 0, di, si;
115 char path[64];
116
117 if (size < sizeof(*args))
118 return -EINVAL;
119
120 di = (args->iter & 0xff000000) >> 24;
121 si = (args->iter & 0x00ffffff) - 1;
122
123 list_for_each_entry(chk, &ppm->domains, head) {
124 if (tmp++ == di) {
125 dom = chk;
126 break;
127 }
128 }
129
130 if (dom == NULL || si >= (int)dom->signal_nr)
131 return -EINVAL;
132
133 if (si >= 0) {
134 if (raw || !(name = dom->signal[si].name)) {
135 snprintf(path, sizeof(path), "/%s/%02x", dom->name, si);
136 name = path;
137 }
138
139 if (args->name)
140 strncpy(args->name, name, args->size);
141 args->size = strlen(name) + 1;
142 }
143
144 do {
145 while (++si < dom->signal_nr) {
146 if (all || dom->signal[si].name) {
147 args->iter = (di << 24) | ++si;
148 return 0;
149 }
150 }
151 si = -1;
152 di = di + 1;
153 dom = list_entry(dom->head.next, typeof(*dom), head);
154 } while (&dom->head != &ppm->domains);
155
156 args->iter = 0xffffffff;
157 return 0;
158}
159
160static int
161nouveau_perfctr_sample(struct nouveau_object *object, u32 mthd,
162 void *data, u32 size)
163{
164 struct nouveau_perfmon *ppm = (void *)object->engine;
165 struct nouveau_perfctr *ctr, *tmp;
166 struct nouveau_perfdom *dom;
167 struct nv_perfctr_sample *args = data;
168
169 if (size < sizeof(*args))
170 return -EINVAL;
171 ppm->sequence++;
172
173 list_for_each_entry(dom, &ppm->domains, head) {
174 /* sample previous batch of counters */
175 if (dom->quad != QUAD_MASK) {
176 dom->func->next(ppm, dom);
177 tmp = NULL;
178 while (!list_empty(&dom->list)) {
179 ctr = list_first_entry(&dom->list,
180 typeof(*ctr), head);
181 if (ctr->slot < 0) break;
182 if ( tmp && tmp == ctr) break;
183 if (!tmp) tmp = ctr;
184 dom->func->read(ppm, dom, ctr);
185 ctr->slot = -1;
186 list_move_tail(&ctr->head, &dom->list);
187 }
188 }
189
190 dom->quad = QUAD_MASK;
191
192 /* setup next batch of counters for sampling */
193 list_for_each_entry(ctr, &dom->list, head) {
194 ctr->slot = ffs(dom->quad) - 1;
195 if (ctr->slot < 0)
196 break;
197 dom->quad &= ~(QUAD_FREE << ctr->slot);
198 dom->func->init(ppm, dom, ctr);
199 }
200
201 if (dom->quad != QUAD_MASK)
202 dom->func->next(ppm, dom);
203 }
204
205 return 0;
206}
207
208static int
209nouveau_perfctr_read(struct nouveau_object *object, u32 mthd,
210 void *data, u32 size)
211{
212 struct nouveau_perfctr *ctr = (void *)object;
213 struct nv_perfctr_read *args = data;
214
215 if (size < sizeof(*args))
216 return -EINVAL;
217 if (!ctr->clk)
218 return -EAGAIN;
219
220 args->clk = ctr->clk;
221 args->ctr = ctr->ctr;
222 return 0;
223}
224
225static void
226nouveau_perfctr_dtor(struct nouveau_object *object)
227{
228 struct nouveau_perfctr *ctr = (void *)object;
229 if (ctr->head.next)
230 list_del(&ctr->head);
231 nouveau_object_destroy(&ctr->base);
232}
233
234static int
235nouveau_perfctr_ctor(struct nouveau_object *parent,
236 struct nouveau_object *engine,
237 struct nouveau_oclass *oclass, void *data, u32 size,
238 struct nouveau_object **pobject)
239{
240 struct nouveau_perfmon *ppm = (void *)engine;
241 struct nouveau_perfdom *dom = NULL;
242 struct nouveau_perfsig *sig[4] = {};
243 struct nouveau_perfctr *ctr;
244 struct nv_perfctr_class *args = data;
245 int ret, i;
246
247 if (size < sizeof(*args))
248 return -EINVAL;
249
250 for (i = 0; i < ARRAY_SIZE(args->signal) && args->signal[i].name; i++) {
251 sig[i] = nouveau_perfsig_find(ppm, args->signal[i].name,
252 args->signal[i].size, &dom);
253 if (!sig[i])
254 return -EINVAL;
255 }
256
257 ret = nouveau_object_create(parent, engine, oclass, 0, &ctr);
258 *pobject = nv_object(ctr);
259 if (ret)
260 return ret;
261
262 ctr->slot = -1;
263 ctr->logic_op = args->logic_op;
264 ctr->signal[0] = sig[0];
265 ctr->signal[1] = sig[1];
266 ctr->signal[2] = sig[2];
267 ctr->signal[3] = sig[3];
268 if (dom)
269 list_add_tail(&ctr->head, &dom->list);
270 return 0;
271}
272
273static struct nouveau_ofuncs
274nouveau_perfctr_ofuncs = {
275 .ctor = nouveau_perfctr_ctor,
276 .dtor = nouveau_perfctr_dtor,
277 .init = nouveau_object_init,
278 .fini = nouveau_object_fini,
279};
280
281static struct nouveau_omthds
282nouveau_perfctr_omthds[] = {
283 { NV_PERFCTR_QUERY, NV_PERFCTR_QUERY, nouveau_perfctr_query },
284 { NV_PERFCTR_SAMPLE, NV_PERFCTR_SAMPLE, nouveau_perfctr_sample },
285 { NV_PERFCTR_READ, NV_PERFCTR_READ, nouveau_perfctr_read },
286 {}
287};
288
289struct nouveau_oclass
290nouveau_perfmon_sclass[] = {
291 { .handle = NV_PERFCTR_CLASS,
292 .ofuncs = &nouveau_perfctr_ofuncs,
293 .omthds = nouveau_perfctr_omthds,
294 },
295 {},
296};
297
298/*******************************************************************************
299 * PPM context
300 ******************************************************************************/
301static void
302nouveau_perfctx_dtor(struct nouveau_object *object)
303{
304 struct nouveau_perfmon *ppm = (void *)object->engine;
305 mutex_lock(&nv_subdev(ppm)->mutex);
306 ppm->context = NULL;
307 mutex_unlock(&nv_subdev(ppm)->mutex);
308}
309
310static int
311nouveau_perfctx_ctor(struct nouveau_object *parent,
312 struct nouveau_object *engine,
313 struct nouveau_oclass *oclass, void *data, u32 size,
314 struct nouveau_object **pobject)
315{
316 struct nouveau_perfmon *ppm = (void *)engine;
317 struct nouveau_perfctx *ctx;
318 int ret;
319
320 ret = nouveau_engctx_create(parent, engine, oclass, NULL,
321 0, 0, 0, &ctx);
322 *pobject = nv_object(ctx);
323 if (ret)
324 return ret;
325
326 mutex_lock(&nv_subdev(ppm)->mutex);
327 if (ppm->context == NULL)
328 ppm->context = ctx;
329 mutex_unlock(&nv_subdev(ppm)->mutex);
330
331 if (ctx != ppm->context)
332 return -EBUSY;
333
334 return 0;
335}
336
337struct nouveau_oclass
338nouveau_perfmon_cclass = {
339 .handle = NV_ENGCTX(PERFMON, 0x00),
340 .ofuncs = &(struct nouveau_ofuncs) {
341 .ctor = nouveau_perfctx_ctor,
342 .dtor = nouveau_perfctx_dtor,
343 .init = _nouveau_engctx_init,
344 .fini = _nouveau_engctx_fini,
345 },
346};
347
348/*******************************************************************************
349 * PPM engine/subdev functions
350 ******************************************************************************/
351int
352nouveau_perfdom_new(struct nouveau_perfmon *ppm, const char *name, u32 mask,
353 u32 base, u32 size_unit, u32 size_domain,
354 const struct nouveau_specdom *spec)
355{
356 const struct nouveau_specdom *sdom;
357 const struct nouveau_specsig *ssig;
358 struct nouveau_perfdom *dom;
359 int i;
360
361 for (i = 0; i == 0 || mask; i++) {
362 u32 addr = base + (i * size_unit);
363 if (i && !(mask & (1 << i)))
364 continue;
365
366 sdom = spec;
367 while (sdom->signal_nr) {
368 dom = kzalloc(sizeof(*dom) + sdom->signal_nr *
369 sizeof(*dom->signal), GFP_KERNEL);
370 if (!dom)
371 return -ENOMEM;
372
373 if (mask) {
374 snprintf(dom->name, sizeof(dom->name),
375 "%s/%02x/%02x", name, i,
376 (int)(sdom - spec));
377 } else {
378 snprintf(dom->name, sizeof(dom->name),
379 "%s/%02x", name, (int)(sdom - spec));
380 }
381
382 list_add_tail(&dom->head, &ppm->domains);
383 INIT_LIST_HEAD(&dom->list);
384 dom->func = sdom->func;
385 dom->addr = addr;
386 dom->quad = QUAD_MASK;
387 dom->signal_nr = sdom->signal_nr;
388
389 ssig = (sdom++)->signal;
390 while (ssig->name) {
391 dom->signal[ssig->signal].name = ssig->name;
392 ssig++;
393 }
394
395 addr += size_domain;
396 }
397
398 mask &= ~(1 << i);
399 }
400
401 return 0;
402}
403
404int
405_nouveau_perfmon_fini(struct nouveau_object *object, bool suspend)
406{
407 struct nouveau_perfmon *ppm = (void *)object;
408 return nouveau_engine_fini(&ppm->base, suspend);
409}
410
411int
412_nouveau_perfmon_init(struct nouveau_object *object)
413{
414 struct nouveau_perfmon *ppm = (void *)object;
415 return nouveau_engine_init(&ppm->base);
416}
417
418void
419_nouveau_perfmon_dtor(struct nouveau_object *object)
420{
421 struct nouveau_perfmon *ppm = (void *)object;
422 struct nouveau_perfdom *dom, *tmp;
423
424 list_for_each_entry_safe(dom, tmp, &ppm->domains, head) {
425 list_del(&dom->head);
426 kfree(dom);
427 }
428
429 nouveau_engine_destroy(&ppm->base);
430}
431
432int
433nouveau_perfmon_create_(struct nouveau_object *parent,
434 struct nouveau_object *engine,
435 struct nouveau_oclass *oclass,
436 int length, void **pobject)
437{
438 struct nouveau_perfmon *ppm;
439 int ret;
440
441 ret = nouveau_engine_create_(parent, engine, oclass, true, "PPM",
442 "perfmon", length, pobject);
443 ppm = *pobject;
444 if (ret)
445 return ret;
446
447 INIT_LIST_HEAD(&ppm->domains);
448 return 0;
449}
diff --git a/drivers/gpu/drm/nouveau/core/engine/perfmon/daemon.c b/drivers/gpu/drm/nouveau/core/engine/perfmon/daemon.c
new file mode 100644
index 000000000000..50696cc7b7d7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/perfmon/daemon.c
@@ -0,0 +1,109 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "priv.h"
26
27static void
28pwr_perfctr_init(struct nouveau_perfmon *ppm, struct nouveau_perfdom *dom,
29 struct nouveau_perfctr *ctr)
30{
31 u32 mask = 0x00000000;
32 u32 ctrl = 0x00000001;
33 int i;
34
35 for (i = 0; i < ARRAY_SIZE(ctr->signal) && ctr->signal[i]; i++)
36 mask |= 1 << (ctr->signal[i] - dom->signal);
37
38 nv_wr32(ppm, 0x10a504 + (ctr->slot * 0x10), mask);
39 nv_wr32(ppm, 0x10a50c + (ctr->slot * 0x10), ctrl);
40 nv_wr32(ppm, 0x10a50c + (ppm->last * 0x10), 0x00000003);
41}
42
43static void
44pwr_perfctr_read(struct nouveau_perfmon *ppm, struct nouveau_perfdom *dom,
45 struct nouveau_perfctr *ctr)
46{
47 ctr->ctr = ppm->pwr[ctr->slot];
48 ctr->clk = ppm->pwr[ppm->last];
49}
50
51static void
52pwr_perfctr_next(struct nouveau_perfmon *ppm, struct nouveau_perfdom *dom)
53{
54 int i;
55
56 for (i = 0; i <= ppm->last; i++) {
57 ppm->pwr[i] = nv_rd32(ppm, 0x10a508 + (i * 0x10));
58 nv_wr32(ppm, 0x10a508 + (i * 0x10), 0x80000000);
59 }
60}
61
62static const struct nouveau_funcdom
63pwr_perfctr_func = {
64 .init = pwr_perfctr_init,
65 .read = pwr_perfctr_read,
66 .next = pwr_perfctr_next,
67};
68
69const struct nouveau_specdom
70nva3_perfmon_pwr[] = {
71 { 0x20, (const struct nouveau_specsig[]) {
72 { 0x00, "pwr_gr_idle" },
73 { 0x04, "pwr_bsp_idle" },
74 { 0x05, "pwr_vp_idle" },
75 { 0x06, "pwr_ppp_idle" },
76 { 0x13, "pwr_ce0_idle" },
77 {}
78 }, &pwr_perfctr_func },
79 {}
80};
81
82const struct nouveau_specdom
83nvc0_perfmon_pwr[] = {
84 { 0x20, (const struct nouveau_specsig[]) {
85 { 0x00, "pwr_gr_idle" },
86 { 0x04, "pwr_bsp_idle" },
87 { 0x05, "pwr_vp_idle" },
88 { 0x06, "pwr_ppp_idle" },
89 { 0x13, "pwr_ce0_idle" },
90 { 0x14, "pwr_ce1_idle" },
91 {}
92 }, &pwr_perfctr_func },
93 {}
94};
95
96const struct nouveau_specdom
97nve0_perfmon_pwr[] = {
98 { 0x20, (const struct nouveau_specsig[]) {
99 { 0x00, "pwr_gr_idle" },
100 { 0x04, "pwr_bsp_idle" },
101 { 0x05, "pwr_vp_idle" },
102 { 0x06, "pwr_ppp_idle" },
103 { 0x13, "pwr_ce0_idle" },
104 { 0x14, "pwr_ce1_idle" },
105 { 0x15, "pwr_ce2_idle" },
106 {}
107 }, &pwr_perfctr_func },
108 {}
109};
diff --git a/drivers/gpu/drm/nouveau/core/engine/perfmon/nv40.c b/drivers/gpu/drm/nouveau/core/engine/perfmon/nv40.c
new file mode 100644
index 000000000000..b2a10785adb1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/perfmon/nv40.c
@@ -0,0 +1,143 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "nv40.h"
26
27/*******************************************************************************
28 * Perfmon object classes
29 ******************************************************************************/
30
31/*******************************************************************************
32 * PPM context
33 ******************************************************************************/
34
35/*******************************************************************************
36 * PPM engine/subdev functions
37 ******************************************************************************/
38
39static void
40nv40_perfctr_init(struct nouveau_perfmon *ppm, struct nouveau_perfdom *dom,
41 struct nouveau_perfctr *ctr)
42{
43 struct nv40_perfmon_priv *priv = (void *)ppm;
44 struct nv40_perfmon_cntr *cntr = (void *)ctr;
45 u32 log = ctr->logic_op;
46 u32 src = 0x00000000;
47 int i;
48
49 for (i = 0; i < 4 && ctr->signal[i]; i++)
50 src |= (ctr->signal[i] - dom->signal) << (i * 8);
51
52 nv_wr32(priv, 0x00a7c0 + dom->addr, 0x00000001);
53 nv_wr32(priv, 0x00a400 + dom->addr + (cntr->base.slot * 0x40), src);
54 nv_wr32(priv, 0x00a420 + dom->addr + (cntr->base.slot * 0x40), log);
55}
56
57static void
58nv40_perfctr_read(struct nouveau_perfmon *ppm, struct nouveau_perfdom *dom,
59 struct nouveau_perfctr *ctr)
60{
61 struct nv40_perfmon_priv *priv = (void *)ppm;
62 struct nv40_perfmon_cntr *cntr = (void *)ctr;
63
64 switch (cntr->base.slot) {
65 case 0: cntr->base.ctr = nv_rd32(priv, 0x00a700 + dom->addr); break;
66 case 1: cntr->base.ctr = nv_rd32(priv, 0x00a6c0 + dom->addr); break;
67 case 2: cntr->base.ctr = nv_rd32(priv, 0x00a680 + dom->addr); break;
68 case 3: cntr->base.ctr = nv_rd32(priv, 0x00a740 + dom->addr); break;
69 }
70 cntr->base.clk = nv_rd32(priv, 0x00a600 + dom->addr);
71}
72
73static void
74nv40_perfctr_next(struct nouveau_perfmon *ppm, struct nouveau_perfdom *dom)
75{
76 struct nv40_perfmon_priv *priv = (void *)ppm;
77 if (priv->sequence != ppm->sequence) {
78 nv_wr32(priv, 0x400084, 0x00000020);
79 priv->sequence = ppm->sequence;
80 }
81}
82
83const struct nouveau_funcdom
84nv40_perfctr_func = {
85 .init = nv40_perfctr_init,
86 .read = nv40_perfctr_read,
87 .next = nv40_perfctr_next,
88};
89
90static const struct nouveau_specdom
91nv40_perfmon[] = {
92 { 0x20, (const struct nouveau_specsig[]) {
93 {}
94 }, &nv40_perfctr_func },
95 { 0x20, (const struct nouveau_specsig[]) {
96 {}
97 }, &nv40_perfctr_func },
98 { 0x20, (const struct nouveau_specsig[]) {
99 {}
100 }, &nv40_perfctr_func },
101 { 0x20, (const struct nouveau_specsig[]) {
102 {}
103 }, &nv40_perfctr_func },
104 { 0x20, (const struct nouveau_specsig[]) {
105 {}
106 }, &nv40_perfctr_func },
107 {}
108};
109
110int
111nv40_perfmon_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
112 struct nouveau_oclass *oclass, void *data, u32 size,
113 struct nouveau_object **pobject)
114{
115 struct nv40_perfmon_oclass *mclass = (void *)oclass;
116 struct nv40_perfmon_priv *priv;
117 int ret;
118
119 ret = nouveau_perfmon_create(parent, engine, oclass, &priv);
120 *pobject = nv_object(priv);
121 if (ret)
122 return ret;
123
124 ret = nouveau_perfdom_new(&priv->base, "pm", 0, 0, 0, 4, mclass->doms);
125 if (ret)
126 return ret;
127
128 nv_engine(priv)->cclass = &nouveau_perfmon_cclass;
129 nv_engine(priv)->sclass = nouveau_perfmon_sclass;
130 return 0;
131}
132
133struct nouveau_oclass *
134nv40_perfmon_oclass = &(struct nv40_perfmon_oclass) {
135 .base.handle = NV_ENGINE(PERFMON, 0x40),
136 .base.ofuncs = &(struct nouveau_ofuncs) {
137 .ctor = nv40_perfmon_ctor,
138 .dtor = _nouveau_perfmon_dtor,
139 .init = _nouveau_perfmon_init,
140 .fini = _nouveau_perfmon_fini,
141 },
142 .doms = nv40_perfmon,
143}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/perfmon/nv40.h b/drivers/gpu/drm/nouveau/core/engine/perfmon/nv40.h
new file mode 100644
index 000000000000..1b5792d1df14
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/perfmon/nv40.h
@@ -0,0 +1,26 @@
1#ifndef __NVKM_PM_NV40_H__
2#define __NVKM_PM_NV40_H__
3
4#include "priv.h"
5
6struct nv40_perfmon_oclass {
7 struct nouveau_oclass base;
8 const struct nouveau_specdom *doms;
9};
10
11struct nv40_perfmon_priv {
12 struct nouveau_perfmon base;
13 u32 sequence;
14};
15
16int nv40_perfmon_ctor(struct nouveau_object *, struct nouveau_object *,
17 struct nouveau_oclass *, void *data, u32 size,
18 struct nouveau_object **pobject);
19
20struct nv40_perfmon_cntr {
21 struct nouveau_perfctr base;
22};
23
24extern const struct nouveau_funcdom nv40_perfctr_func;
25
26#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/perfmon/nv50.c b/drivers/gpu/drm/nouveau/core/engine/perfmon/nv50.c
new file mode 100644
index 000000000000..94217691fe67
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/perfmon/nv50.c
@@ -0,0 +1,70 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "nv40.h"
26
27/*******************************************************************************
28 * Perfmon object classes
29 ******************************************************************************/
30
31/*******************************************************************************
32 * PPM context
33 ******************************************************************************/
34
35/*******************************************************************************
36 * PPM engine/subdev functions
37 ******************************************************************************/
38
39static const struct nouveau_specdom
40nv50_perfmon[] = {
41 { 0x040, (const struct nouveau_specsig[]) {
42 {}
43 }, &nv40_perfctr_func },
44 { 0x100, (const struct nouveau_specsig[]) {
45 { 0xc8, "gr_idle" },
46 {}
47 }, &nv40_perfctr_func },
48 { 0x100, (const struct nouveau_specsig[]) {
49 {}
50 }, &nv40_perfctr_func },
51 { 0x020, (const struct nouveau_specsig[]) {
52 {}
53 }, &nv40_perfctr_func },
54 { 0x040, (const struct nouveau_specsig[]) {
55 {}
56 }, &nv40_perfctr_func },
57 {}
58};
59
60struct nouveau_oclass *
61nv50_perfmon_oclass = &(struct nv40_perfmon_oclass) {
62 .base.handle = NV_ENGINE(PERFMON, 0x50),
63 .base.ofuncs = &(struct nouveau_ofuncs) {
64 .ctor = nv40_perfmon_ctor,
65 .dtor = _nouveau_perfmon_dtor,
66 .init = _nouveau_perfmon_init,
67 .fini = _nouveau_perfmon_fini,
68 },
69 .doms = nv50_perfmon,
70}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/perfmon/nv84.c b/drivers/gpu/drm/nouveau/core/engine/perfmon/nv84.c
new file mode 100644
index 000000000000..9232c7fc6253
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/perfmon/nv84.c
@@ -0,0 +1,78 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "nv40.h"
26
27/*******************************************************************************
28 * Perfmon object classes
29 ******************************************************************************/
30
31/*******************************************************************************
32 * PPM context
33 ******************************************************************************/
34
35/*******************************************************************************
36 * PPM engine/subdev functions
37 ******************************************************************************/
38
39static const struct nouveau_specdom
40nv84_perfmon[] = {
41 { 0x20, (const struct nouveau_specsig[]) {
42 {}
43 }, &nv40_perfctr_func },
44 { 0x20, (const struct nouveau_specsig[]) {
45 {}
46 }, &nv40_perfctr_func },
47 { 0x20, (const struct nouveau_specsig[]) {
48 {}
49 }, &nv40_perfctr_func },
50 { 0x20, (const struct nouveau_specsig[]) {
51 {}
52 }, &nv40_perfctr_func },
53 { 0x20, (const struct nouveau_specsig[]) {
54 {}
55 }, &nv40_perfctr_func },
56 { 0x20, (const struct nouveau_specsig[]) {
57 {}
58 }, &nv40_perfctr_func },
59 { 0x20, (const struct nouveau_specsig[]) {
60 {}
61 }, &nv40_perfctr_func },
62 { 0x20, (const struct nouveau_specsig[]) {
63 {}
64 }, &nv40_perfctr_func },
65 {}
66};
67
68struct nouveau_oclass *
69nv84_perfmon_oclass = &(struct nv40_perfmon_oclass) {
70 .base.handle = NV_ENGINE(PERFMON, 0x84),
71 .base.ofuncs = &(struct nouveau_ofuncs) {
72 .ctor = nv40_perfmon_ctor,
73 .dtor = _nouveau_perfmon_dtor,
74 .init = _nouveau_perfmon_init,
75 .fini = _nouveau_perfmon_fini,
76 },
77 .doms = nv84_perfmon,
78}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/perfmon/nva3.c b/drivers/gpu/drm/nouveau/core/engine/perfmon/nva3.c
new file mode 100644
index 000000000000..6197ebdeb648
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/perfmon/nva3.c
@@ -0,0 +1,96 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "nv40.h"
26
27/*******************************************************************************
28 * Perfmon object classes
29 ******************************************************************************/
30
31/*******************************************************************************
32 * PPM context
33 ******************************************************************************/
34
35/*******************************************************************************
36 * PPM engine/subdev functions
37 ******************************************************************************/
38
39static const struct nouveau_specdom
40nva3_perfmon[] = {
41 { 0x20, (const struct nouveau_specsig[]) {
42 {}
43 }, &nv40_perfctr_func },
44 { 0x20, (const struct nouveau_specsig[]) {
45 {}
46 }, &nv40_perfctr_func },
47 { 0x20, (const struct nouveau_specsig[]) {
48 {}
49 }, &nv40_perfctr_func },
50 { 0x20, (const struct nouveau_specsig[]) {
51 {}
52 }, &nv40_perfctr_func },
53 { 0x20, (const struct nouveau_specsig[]) {
54 {}
55 }, &nv40_perfctr_func },
56 { 0x20, (const struct nouveau_specsig[]) {
57 {}
58 }, &nv40_perfctr_func },
59 { 0x20, (const struct nouveau_specsig[]) {
60 {}
61 }, &nv40_perfctr_func },
62 { 0x20, (const struct nouveau_specsig[]) {
63 {}
64 }, &nv40_perfctr_func },
65 {}
66};
67
68static int
69nva3_perfmon_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
70 struct nouveau_oclass *oclass, void *data, u32 size,
71 struct nouveau_object **object)
72{
73 int ret = nv40_perfmon_ctor(parent, engine, oclass, data, size, object);
74 if (ret == 0) {
75 struct nv40_perfmon_priv *priv = (void *)*object;
76 ret = nouveau_perfdom_new(&priv->base, "pwr", 0, 0, 0, 0,
77 nva3_perfmon_pwr);
78 if (ret)
79 return ret;
80
81 priv->base.last = 3;
82 }
83 return ret;
84}
85
86struct nouveau_oclass *
87nva3_perfmon_oclass = &(struct nv40_perfmon_oclass) {
88 .base.handle = NV_ENGINE(PERFMON, 0xa3),
89 .base.ofuncs = &(struct nouveau_ofuncs) {
90 .ctor = nva3_perfmon_ctor,
91 .dtor = _nouveau_perfmon_dtor,
92 .init = _nouveau_perfmon_init,
93 .fini = _nouveau_perfmon_fini,
94 },
95 .doms = nva3_perfmon,
96}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/perfmon/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/perfmon/nvc0.c
new file mode 100644
index 000000000000..74b241042502
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/perfmon/nvc0.c
@@ -0,0 +1,173 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "nvc0.h"
26
27/*******************************************************************************
28 * Perfmon object classes
29 ******************************************************************************/
30
31/*******************************************************************************
32 * PPM context
33 ******************************************************************************/
34
35/*******************************************************************************
36 * PPM engine/subdev functions
37 ******************************************************************************/
38
39static const struct nouveau_specdom
40nvc0_perfmon_hub[] = {
41 {}
42};
43
44static const struct nouveau_specdom
45nvc0_perfmon_gpc[] = {
46 {}
47};
48
49static const struct nouveau_specdom
50nvc0_perfmon_part[] = {
51 {}
52};
53
54static void
55nvc0_perfctr_init(struct nouveau_perfmon *ppm, struct nouveau_perfdom *dom,
56 struct nouveau_perfctr *ctr)
57{
58 struct nvc0_perfmon_priv *priv = (void *)ppm;
59 struct nvc0_perfmon_cntr *cntr = (void *)ctr;
60 u32 log = ctr->logic_op;
61 u32 src = 0x00000000;
62 int i;
63
64 for (i = 0; i < 4 && ctr->signal[i]; i++)
65 src |= (ctr->signal[i] - dom->signal) << (i * 8);
66
67 nv_wr32(priv, dom->addr + 0x09c, 0x00040002);
68 nv_wr32(priv, dom->addr + 0x100, 0x00000000);
69 nv_wr32(priv, dom->addr + 0x040 + (cntr->base.slot * 0x08), src);
70 nv_wr32(priv, dom->addr + 0x044 + (cntr->base.slot * 0x08), log);
71}
72
73static void
74nvc0_perfctr_read(struct nouveau_perfmon *ppm, struct nouveau_perfdom *dom,
75 struct nouveau_perfctr *ctr)
76{
77 struct nvc0_perfmon_priv *priv = (void *)ppm;
78 struct nvc0_perfmon_cntr *cntr = (void *)ctr;
79
80 switch (cntr->base.slot) {
81 case 0: cntr->base.ctr = nv_rd32(priv, dom->addr + 0x08c); break;
82 case 1: cntr->base.ctr = nv_rd32(priv, dom->addr + 0x088); break;
83 case 2: cntr->base.ctr = nv_rd32(priv, dom->addr + 0x080); break;
84 case 3: cntr->base.ctr = nv_rd32(priv, dom->addr + 0x090); break;
85 }
86 cntr->base.clk = nv_rd32(priv, dom->addr + 0x070);
87}
88
89static void
90nvc0_perfctr_next(struct nouveau_perfmon *ppm, struct nouveau_perfdom *dom)
91{
92 struct nvc0_perfmon_priv *priv = (void *)ppm;
93 nv_wr32(priv, dom->addr + 0x06c, dom->signal_nr - 0x40 + 0x27);
94 nv_wr32(priv, dom->addr + 0x0ec, 0x00000011);
95}
96
97const struct nouveau_funcdom
98nvc0_perfctr_func = {
99 .init = nvc0_perfctr_init,
100 .read = nvc0_perfctr_read,
101 .next = nvc0_perfctr_next,
102};
103
104int
105nvc0_perfmon_fini(struct nouveau_object *object, bool suspend)
106{
107 struct nvc0_perfmon_priv *priv = (void *)object;
108 nv_mask(priv, 0x000200, 0x10000000, 0x00000000);
109 nv_mask(priv, 0x000200, 0x10000000, 0x10000000);
110 return nouveau_perfmon_fini(&priv->base, suspend);
111}
112
113static int
114nvc0_perfmon_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
115 struct nouveau_oclass *oclass, void *data, u32 size,
116 struct nouveau_object **pobject)
117{
118 struct nvc0_perfmon_priv *priv;
119 u32 mask;
120 int ret;
121
122 ret = nouveau_perfmon_create(parent, engine, oclass, &priv);
123 *pobject = nv_object(priv);
124 if (ret)
125 return ret;
126
127 ret = nouveau_perfdom_new(&priv->base, "pwr", 0, 0, 0, 0,
128 nvc0_perfmon_pwr);
129 if (ret)
130 return ret;
131
132 /* HUB */
133 ret = nouveau_perfdom_new(&priv->base, "hub", 0, 0x1b0000, 0, 0x200,
134 nvc0_perfmon_hub);
135 if (ret)
136 return ret;
137
138 /* GPC */
139 mask = (1 << nv_rd32(priv, 0x022430)) - 1;
140 mask &= ~nv_rd32(priv, 0x022504);
141 mask &= ~nv_rd32(priv, 0x022584);
142
143 ret = nouveau_perfdom_new(&priv->base, "gpc", mask, 0x180000,
144 0x1000, 0x200, nvc0_perfmon_gpc);
145 if (ret)
146 return ret;
147
148 /* PART */
149 mask = (1 << nv_rd32(priv, 0x022438)) - 1;
150 mask &= ~nv_rd32(priv, 0x022548);
151 mask &= ~nv_rd32(priv, 0x0225c8);
152
153 ret = nouveau_perfdom_new(&priv->base, "part", mask, 0x1a0000,
154 0x1000, 0x200, nvc0_perfmon_part);
155 if (ret)
156 return ret;
157
158 nv_engine(priv)->cclass = &nouveau_perfmon_cclass;
159 nv_engine(priv)->sclass = nouveau_perfmon_sclass;
160 priv->base.last = 7;
161 return 0;
162}
163
164struct nouveau_oclass
165nvc0_perfmon_oclass = {
166 .handle = NV_ENGINE(PERFMON, 0xc0),
167 .ofuncs = &(struct nouveau_ofuncs) {
168 .ctor = nvc0_perfmon_ctor,
169 .dtor = _nouveau_perfmon_dtor,
170 .init = _nouveau_perfmon_init,
171 .fini = nvc0_perfmon_fini,
172 },
173};
diff --git a/drivers/gpu/drm/nouveau/core/engine/perfmon/nvc0.h b/drivers/gpu/drm/nouveau/core/engine/perfmon/nvc0.h
new file mode 100644
index 000000000000..f66bca484263
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/perfmon/nvc0.h
@@ -0,0 +1,17 @@
1#ifndef __NVKM_PM_NVC0_H__
2#define __NVKM_PM_NVC0_H__
3
4#include "priv.h"
5
6struct nvc0_perfmon_priv {
7 struct nouveau_perfmon base;
8};
9
10struct nvc0_perfmon_cntr {
11 struct nouveau_perfctr base;
12};
13
14extern const struct nouveau_funcdom nvc0_perfctr_func;
15int nvc0_perfmon_fini(struct nouveau_object *, bool);
16
17#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/perfmon/nve0.c b/drivers/gpu/drm/nouveau/core/engine/perfmon/nve0.c
new file mode 100644
index 000000000000..71d718c12075
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/perfmon/nve0.c
@@ -0,0 +1,162 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "nvc0.h"
26
27/*******************************************************************************
28 * Perfmon object classes
29 ******************************************************************************/
30
31/*******************************************************************************
32 * PPM context
33 ******************************************************************************/
34
35/*******************************************************************************
36 * PPM engine/subdev functions
37 ******************************************************************************/
38
39static const struct nouveau_specdom
40nve0_perfmon_hub[] = {
41 { 0x60, (const struct nouveau_specsig[]) {
42 { 0x47, "hub00_user_0" },
43 {}
44 }, &nvc0_perfctr_func },
45 { 0x40, (const struct nouveau_specsig[]) {
46 { 0x27, "hub01_user_0" },
47 {}
48 }, &nvc0_perfctr_func },
49 { 0x60, (const struct nouveau_specsig[]) {
50 { 0x47, "hub02_user_0" },
51 {}
52 }, &nvc0_perfctr_func },
53 { 0x60, (const struct nouveau_specsig[]) {
54 { 0x47, "hub03_user_0" },
55 {}
56 }, &nvc0_perfctr_func },
57 { 0x40, (const struct nouveau_specsig[]) {
58 { 0x03, "host_mmio_rd" },
59 { 0x27, "hub04_user_0" },
60 {}
61 }, &nvc0_perfctr_func },
62 { 0x60, (const struct nouveau_specsig[]) {
63 { 0x47, "hub05_user_0" },
64 {}
65 }, &nvc0_perfctr_func },
66 { 0xc0, (const struct nouveau_specsig[]) {
67 { 0x74, "host_fb_rd3x" },
68 { 0x75, "host_fb_rd3x_2" },
69 { 0xa7, "hub06_user_0" },
70 {}
71 }, &nvc0_perfctr_func },
72 { 0x60, (const struct nouveau_specsig[]) {
73 { 0x47, "hub07_user_0" },
74 {}
75 }, &nvc0_perfctr_func },
76 {}
77};
78
79static const struct nouveau_specdom
80nve0_perfmon_gpc[] = {
81 { 0xe0, (const struct nouveau_specsig[]) {
82 { 0xc7, "gpc00_user_0" },
83 {}
84 }, &nvc0_perfctr_func },
85 {}
86};
87
88static const struct nouveau_specdom
89nve0_perfmon_part[] = {
90 { 0x60, (const struct nouveau_specsig[]) {
91 { 0x47, "part00_user_0" },
92 {}
93 }, &nvc0_perfctr_func },
94 { 0x60, (const struct nouveau_specsig[]) {
95 { 0x47, "part01_user_0" },
96 {}
97 }, &nvc0_perfctr_func },
98 {}
99};
100
101static int
102nve0_perfmon_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
103 struct nouveau_oclass *oclass, void *data, u32 size,
104 struct nouveau_object **pobject)
105{
106 struct nvc0_perfmon_priv *priv;
107 u32 mask;
108 int ret;
109
110 ret = nouveau_perfmon_create(parent, engine, oclass, &priv);
111 *pobject = nv_object(priv);
112 if (ret)
113 return ret;
114
115 /* PDAEMON */
116 ret = nouveau_perfdom_new(&priv->base, "pwr", 0, 0, 0, 0,
117 nve0_perfmon_pwr);
118 if (ret)
119 return ret;
120
121 /* HUB */
122 ret = nouveau_perfdom_new(&priv->base, "hub", 0, 0x1b0000, 0, 0x200,
123 nve0_perfmon_hub);
124 if (ret)
125 return ret;
126
127 /* GPC */
128 mask = (1 << nv_rd32(priv, 0x022430)) - 1;
129 mask &= ~nv_rd32(priv, 0x022504);
130 mask &= ~nv_rd32(priv, 0x022584);
131
132 ret = nouveau_perfdom_new(&priv->base, "gpc", mask, 0x180000,
133 0x1000, 0x200, nve0_perfmon_gpc);
134 if (ret)
135 return ret;
136
137 /* PART */
138 mask = (1 << nv_rd32(priv, 0x022438)) - 1;
139 mask &= ~nv_rd32(priv, 0x022548);
140 mask &= ~nv_rd32(priv, 0x0225c8);
141
142 ret = nouveau_perfdom_new(&priv->base, "part", mask, 0x1a0000,
143 0x1000, 0x200, nve0_perfmon_part);
144 if (ret)
145 return ret;
146
147 nv_engine(priv)->cclass = &nouveau_perfmon_cclass;
148 nv_engine(priv)->sclass = nouveau_perfmon_sclass;
149 priv->base.last = 7;
150 return 0;
151}
152
153struct nouveau_oclass
154nve0_perfmon_oclass = {
155 .handle = NV_ENGINE(PERFMON, 0xe0),
156 .ofuncs = &(struct nouveau_ofuncs) {
157 .ctor = nve0_perfmon_ctor,
158 .dtor = _nouveau_perfmon_dtor,
159 .init = _nouveau_perfmon_init,
160 .fini = nvc0_perfmon_fini,
161 },
162};
diff --git a/drivers/gpu/drm/nouveau/core/engine/perfmon/nvf0.c b/drivers/gpu/drm/nouveau/core/engine/perfmon/nvf0.c
new file mode 100644
index 000000000000..47256f78a895
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/perfmon/nvf0.c
@@ -0,0 +1,71 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "nvc0.h"
26
27/*******************************************************************************
28 * Perfmon object classes
29 ******************************************************************************/
30
31/*******************************************************************************
32 * PPM context
33 ******************************************************************************/
34
35/*******************************************************************************
36 * PPM engine/subdev functions
37 ******************************************************************************/
38
39static int
40nvf0_perfmon_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
41 struct nouveau_oclass *oclass, void *data, u32 size,
42 struct nouveau_object **pobject)
43{
44 struct nvc0_perfmon_priv *priv;
45 int ret;
46
47 ret = nouveau_perfmon_create(parent, engine, oclass, &priv);
48 *pobject = nv_object(priv);
49 if (ret)
50 return ret;
51
52 ret = nouveau_perfdom_new(&priv->base, "pwr", 0, 0, 0, 0,
53 nve0_perfmon_pwr);
54 if (ret)
55 return ret;
56
57 nv_engine(priv)->cclass = &nouveau_perfmon_cclass;
58 nv_engine(priv)->sclass = nouveau_perfmon_sclass;
59 return 0;
60}
61
62struct nouveau_oclass
63nvf0_perfmon_oclass = {
64 .handle = NV_ENGINE(PERFMON, 0xf0),
65 .ofuncs = &(struct nouveau_ofuncs) {
66 .ctor = nvf0_perfmon_ctor,
67 .dtor = _nouveau_perfmon_dtor,
68 .init = _nouveau_perfmon_init,
69 .fini = nvc0_perfmon_fini,
70 },
71};
diff --git a/drivers/gpu/drm/nouveau/core/engine/perfmon/priv.h b/drivers/gpu/drm/nouveau/core/engine/perfmon/priv.h
new file mode 100644
index 000000000000..0ac8714fe0ba
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/perfmon/priv.h
@@ -0,0 +1,91 @@
1#ifndef __NVKM_PERFMON_PRIV_H__
2#define __NVKM_PERFMON_PRIV_H__
3
4#include <engine/perfmon.h>
5
6struct nouveau_perfctr {
7 struct nouveau_object base;
8 struct list_head head;
9 struct nouveau_perfsig *signal[4];
10 int slot;
11 u32 logic_op;
12 u32 clk;
13 u32 ctr;
14};
15
16extern struct nouveau_oclass nouveau_perfmon_sclass[];
17
18struct nouveau_perfctx {
19 struct nouveau_engctx base;
20};
21
22extern struct nouveau_oclass nouveau_perfmon_cclass;
23
24struct nouveau_specsig {
25 u8 signal;
26 const char *name;
27};
28
29struct nouveau_perfsig {
30 const char *name;
31};
32
33struct nouveau_perfdom;
34struct nouveau_perfctr *
35nouveau_perfsig_wrap(struct nouveau_perfmon *, const char *,
36 struct nouveau_perfdom **);
37
38struct nouveau_specdom {
39 u16 signal_nr;
40 const struct nouveau_specsig *signal;
41 const struct nouveau_funcdom *func;
42};
43
44extern const struct nouveau_specdom nva3_perfmon_pwr[];
45extern const struct nouveau_specdom nvc0_perfmon_pwr[];
46extern const struct nouveau_specdom nve0_perfmon_pwr[];
47
48struct nouveau_perfdom {
49 struct list_head head;
50 struct list_head list;
51 const struct nouveau_funcdom *func;
52 char name[32];
53 u32 addr;
54 u8 quad;
55 u32 signal_nr;
56 struct nouveau_perfsig signal[];
57};
58
59struct nouveau_funcdom {
60 void (*init)(struct nouveau_perfmon *, struct nouveau_perfdom *,
61 struct nouveau_perfctr *);
62 void (*read)(struct nouveau_perfmon *, struct nouveau_perfdom *,
63 struct nouveau_perfctr *);
64 void (*next)(struct nouveau_perfmon *, struct nouveau_perfdom *);
65};
66
67int nouveau_perfdom_new(struct nouveau_perfmon *, const char *, u32,
68 u32, u32, u32, const struct nouveau_specdom *);
69
70#define nouveau_perfmon_create(p,e,o,d) \
71 nouveau_perfmon_create_((p), (e), (o), sizeof(**d), (void **)d)
72#define nouveau_perfmon_dtor(p) ({ \
73 struct nouveau_perfmon *c = (p); \
74 _nouveau_perfmon_dtor(nv_object(c)); \
75})
76#define nouveau_perfmon_init(p) ({ \
77 struct nouveau_perfmon *c = (p); \
78 _nouveau_perfmon_init(nv_object(c)); \
79})
80#define nouveau_perfmon_fini(p,s) ({ \
81 struct nouveau_perfmon *c = (p); \
82 _nouveau_perfmon_fini(nv_object(c), (s)); \
83})
84
85int nouveau_perfmon_create_(struct nouveau_object *, struct nouveau_object *,
86 struct nouveau_oclass *, int, void **);
87void _nouveau_perfmon_dtor(struct nouveau_object *);
88int _nouveau_perfmon_init(struct nouveau_object *);
89int _nouveau_perfmon_fini(struct nouveau_object *, bool);
90
91#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nv04.c b/drivers/gpu/drm/nouveau/core/engine/software/nv04.c
index 2a859a31c30d..c571758e4a27 100644
--- a/drivers/gpu/drm/nouveau/core/engine/software/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/engine/software/nv04.c
@@ -135,8 +135,8 @@ nv04_software_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
135 return 0; 135 return 0;
136} 136}
137 137
138struct nouveau_oclass 138struct nouveau_oclass *
139nv04_software_oclass = { 139nv04_software_oclass = &(struct nouveau_oclass) {
140 .handle = NV_ENGINE(SW, 0x04), 140 .handle = NV_ENGINE(SW, 0x04),
141 .ofuncs = &(struct nouveau_ofuncs) { 141 .ofuncs = &(struct nouveau_ofuncs) {
142 .ctor = nv04_software_ctor, 142 .ctor = nv04_software_ctor,
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nv10.c b/drivers/gpu/drm/nouveau/core/engine/software/nv10.c
index a019364b1e13..a62f11a78430 100644
--- a/drivers/gpu/drm/nouveau/core/engine/software/nv10.c
+++ b/drivers/gpu/drm/nouveau/core/engine/software/nv10.c
@@ -117,8 +117,8 @@ nv10_software_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
117 return 0; 117 return 0;
118} 118}
119 119
120struct nouveau_oclass 120struct nouveau_oclass *
121nv10_software_oclass = { 121nv10_software_oclass = &(struct nouveau_oclass) {
122 .handle = NV_ENGINE(SW, 0x10), 122 .handle = NV_ENGINE(SW, 0x10),
123 .ofuncs = &(struct nouveau_ofuncs) { 123 .ofuncs = &(struct nouveau_ofuncs) {
124 .ctor = nv10_software_ctor, 124 .ctor = nv10_software_ctor,
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nv50.c b/drivers/gpu/drm/nouveau/core/engine/software/nv50.c
index c48e74953771..b574dd4bb828 100644
--- a/drivers/gpu/drm/nouveau/core/engine/software/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/software/nv50.c
@@ -32,16 +32,9 @@
32 32
33#include <subdev/bar.h> 33#include <subdev/bar.h>
34 34
35#include <engine/software.h>
36#include <engine/disp.h> 35#include <engine/disp.h>
37 36
38struct nv50_software_priv { 37#include "nv50.h"
39 struct nouveau_software base;
40};
41
42struct nv50_software_chan {
43 struct nouveau_software_chan base;
44};
45 38
46/******************************************************************************* 39/*******************************************************************************
47 * software object classes 40 * software object classes
@@ -62,7 +55,7 @@ nv50_software_mthd_dma_vblsem(struct nouveau_object *object, u32 mthd,
62 55
63 if (nv_iclass(handle->object, NV_GPUOBJ_CLASS)) { 56 if (nv_iclass(handle->object, NV_GPUOBJ_CLASS)) {
64 struct nouveau_gpuobj *gpuobj = nv_gpuobj(handle->object); 57 struct nouveau_gpuobj *gpuobj = nv_gpuobj(handle->object);
65 chan->base.vblank.ctxdma = gpuobj->node->offset >> 4; 58 chan->vblank.ctxdma = gpuobj->node->offset >> 4;
66 ret = 0; 59 ret = 0;
67 } 60 }
68 nouveau_namedb_put(handle); 61 nouveau_namedb_put(handle);
@@ -74,34 +67,33 @@ nv50_software_mthd_vblsem_offset(struct nouveau_object *object, u32 mthd,
74 void *args, u32 size) 67 void *args, u32 size)
75{ 68{
76 struct nv50_software_chan *chan = (void *)nv_engctx(object->parent); 69 struct nv50_software_chan *chan = (void *)nv_engctx(object->parent);
77 chan->base.vblank.offset = *(u32 *)args; 70 chan->vblank.offset = *(u32 *)args;
78 return 0; 71 return 0;
79} 72}
80 73
81static int 74int
82nv50_software_mthd_vblsem_value(struct nouveau_object *object, u32 mthd, 75nv50_software_mthd_vblsem_value(struct nouveau_object *object, u32 mthd,
83 void *args, u32 size) 76 void *args, u32 size)
84{ 77{
85 struct nv50_software_chan *chan = (void *)nv_engctx(object->parent); 78 struct nv50_software_chan *chan = (void *)nv_engctx(object->parent);
86 chan->base.vblank.value = *(u32 *)args; 79 chan->vblank.value = *(u32 *)args;
87 return 0; 80 return 0;
88} 81}
89 82
90static int 83int
91nv50_software_mthd_vblsem_release(struct nouveau_object *object, u32 mthd, 84nv50_software_mthd_vblsem_release(struct nouveau_object *object, u32 mthd,
92 void *args, u32 size) 85 void *args, u32 size)
93{ 86{
94 struct nv50_software_chan *chan = (void *)nv_engctx(object->parent); 87 struct nv50_software_chan *chan = (void *)nv_engctx(object->parent);
95 struct nouveau_disp *disp = nouveau_disp(object); 88 u32 head = *(u32 *)args;
96 u32 crtc = *(u32 *)args; 89 if (head >= chan->vblank.nr_event)
97 if (crtc > 1)
98 return -EINVAL; 90 return -EINVAL;
99 91
100 nouveau_event_get(disp->vblank, crtc, &chan->base.vblank.event); 92 nouveau_event_get(chan->vblank.event[head]);
101 return 0; 93 return 0;
102} 94}
103 95
104static int 96int
105nv50_software_mthd_flip(struct nouveau_object *object, u32 mthd, 97nv50_software_mthd_flip(struct nouveau_object *object, u32 mthd,
106 void *args, u32 size) 98 void *args, u32 size)
107{ 99{
@@ -132,10 +124,9 @@ nv50_software_sclass[] = {
132 ******************************************************************************/ 124 ******************************************************************************/
133 125
134static int 126static int
135nv50_software_vblsem_release(struct nouveau_eventh *event, int head) 127nv50_software_vblsem_release(void *data, int head)
136{ 128{
137 struct nouveau_software_chan *chan = 129 struct nv50_software_chan *chan = data;
138 container_of(event, struct nouveau_software_chan, vblank.event);
139 struct nv50_software_priv *priv = (void *)nv_object(chan)->engine; 130 struct nv50_software_priv *priv = (void *)nv_object(chan)->engine;
140 struct nouveau_bar *bar = nouveau_bar(priv); 131 struct nouveau_bar *bar = nouveau_bar(priv);
141 132
@@ -154,45 +145,76 @@ nv50_software_vblsem_release(struct nouveau_eventh *event, int head)
154 return NVKM_EVENT_DROP; 145 return NVKM_EVENT_DROP;
155} 146}
156 147
157static int 148void
149nv50_software_context_dtor(struct nouveau_object *object)
150{
151 struct nv50_software_chan *chan = (void *)object;
152 int i;
153
154 if (chan->vblank.event) {
155 for (i = 0; i < chan->vblank.nr_event; i++)
156 nouveau_event_ref(NULL, &chan->vblank.event[i]);
157 kfree(chan->vblank.event);
158 }
159
160 nouveau_software_context_destroy(&chan->base);
161}
162
163int
158nv50_software_context_ctor(struct nouveau_object *parent, 164nv50_software_context_ctor(struct nouveau_object *parent,
159 struct nouveau_object *engine, 165 struct nouveau_object *engine,
160 struct nouveau_oclass *oclass, void *data, u32 size, 166 struct nouveau_oclass *oclass, void *data, u32 size,
161 struct nouveau_object **pobject) 167 struct nouveau_object **pobject)
162{ 168{
169 struct nouveau_disp *pdisp = nouveau_disp(parent);
170 struct nv50_software_cclass *pclass = (void *)oclass;
163 struct nv50_software_chan *chan; 171 struct nv50_software_chan *chan;
164 int ret; 172 int ret, i;
165 173
166 ret = nouveau_software_context_create(parent, engine, oclass, &chan); 174 ret = nouveau_software_context_create(parent, engine, oclass, &chan);
167 *pobject = nv_object(chan); 175 *pobject = nv_object(chan);
168 if (ret) 176 if (ret)
169 return ret; 177 return ret;
170 178
171 chan->base.vblank.channel = nv_gpuobj(parent->parent)->addr >> 12; 179 chan->vblank.nr_event = pdisp->vblank->index_nr;
172 chan->base.vblank.event.func = nv50_software_vblsem_release; 180 chan->vblank.event = kzalloc(chan->vblank.nr_event *
181 sizeof(*chan->vblank.event), GFP_KERNEL);
182 if (!chan->vblank.event)
183 return -ENOMEM;
184
185 for (i = 0; i < chan->vblank.nr_event; i++) {
186 ret = nouveau_event_new(pdisp->vblank, i, pclass->vblank,
187 chan, &chan->vblank.event[i]);
188 if (ret)
189 return ret;
190 }
191
192 chan->vblank.channel = nv_gpuobj(parent->parent)->addr >> 12;
173 return 0; 193 return 0;
174} 194}
175 195
176static struct nouveau_oclass 196static struct nv50_software_cclass
177nv50_software_cclass = { 197nv50_software_cclass = {
178 .handle = NV_ENGCTX(SW, 0x50), 198 .base.handle = NV_ENGCTX(SW, 0x50),
179 .ofuncs = &(struct nouveau_ofuncs) { 199 .base.ofuncs = &(struct nouveau_ofuncs) {
180 .ctor = nv50_software_context_ctor, 200 .ctor = nv50_software_context_ctor,
181 .dtor = _nouveau_software_context_dtor, 201 .dtor = _nouveau_software_context_dtor,
182 .init = _nouveau_software_context_init, 202 .init = _nouveau_software_context_init,
183 .fini = _nouveau_software_context_fini, 203 .fini = _nouveau_software_context_fini,
184 }, 204 },
205 .vblank = nv50_software_vblsem_release,
185}; 206};
186 207
187/******************************************************************************* 208/*******************************************************************************
188 * software engine/subdev functions 209 * software engine/subdev functions
189 ******************************************************************************/ 210 ******************************************************************************/
190 211
191static int 212int
192nv50_software_ctor(struct nouveau_object *parent, struct nouveau_object *engine, 213nv50_software_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
193 struct nouveau_oclass *oclass, void *data, u32 size, 214 struct nouveau_oclass *oclass, void *data, u32 size,
194 struct nouveau_object **pobject) 215 struct nouveau_object **pobject)
195{ 216{
217 struct nv50_software_oclass *pclass = (void *)oclass;
196 struct nv50_software_priv *priv; 218 struct nv50_software_priv *priv;
197 int ret; 219 int ret;
198 220
@@ -201,19 +223,21 @@ nv50_software_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
201 if (ret) 223 if (ret)
202 return ret; 224 return ret;
203 225
204 nv_engine(priv)->cclass = &nv50_software_cclass; 226 nv_engine(priv)->cclass = pclass->cclass;
205 nv_engine(priv)->sclass = nv50_software_sclass; 227 nv_engine(priv)->sclass = pclass->sclass;
206 nv_subdev(priv)->intr = nv04_software_intr; 228 nv_subdev(priv)->intr = nv04_software_intr;
207 return 0; 229 return 0;
208} 230}
209 231
210struct nouveau_oclass 232struct nouveau_oclass *
211nv50_software_oclass = { 233nv50_software_oclass = &(struct nv50_software_oclass) {
212 .handle = NV_ENGINE(SW, 0x50), 234 .base.handle = NV_ENGINE(SW, 0x50),
213 .ofuncs = &(struct nouveau_ofuncs) { 235 .base.ofuncs = &(struct nouveau_ofuncs) {
214 .ctor = nv50_software_ctor, 236 .ctor = nv50_software_ctor,
215 .dtor = _nouveau_software_dtor, 237 .dtor = _nouveau_software_dtor,
216 .init = _nouveau_software_init, 238 .init = _nouveau_software_init,
217 .fini = _nouveau_software_fini, 239 .fini = _nouveau_software_fini,
218 }, 240 },
219}; 241 .cclass = &nv50_software_cclass.base,
242 .sclass = nv50_software_sclass,
243}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nv50.h b/drivers/gpu/drm/nouveau/core/engine/software/nv50.h
new file mode 100644
index 000000000000..2de370c21279
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/software/nv50.h
@@ -0,0 +1,47 @@
1#ifndef __NVKM_SW_NV50_H__
2#define __NVKM_SW_NV50_H__
3
4#include <engine/software.h>
5
6struct nv50_software_oclass {
7 struct nouveau_oclass base;
8 struct nouveau_oclass *cclass;
9 struct nouveau_oclass *sclass;
10};
11
12struct nv50_software_priv {
13 struct nouveau_software base;
14};
15
16int nv50_software_ctor(struct nouveau_object *, struct nouveau_object *,
17 struct nouveau_oclass *, void *, u32,
18 struct nouveau_object **);
19
20struct nv50_software_cclass {
21 struct nouveau_oclass base;
22 int (*vblank)(void *, int);
23};
24
25struct nv50_software_chan {
26 struct nouveau_software_chan base;
27 struct {
28 struct nouveau_eventh **event;
29 int nr_event;
30 u32 channel;
31 u32 ctxdma;
32 u64 offset;
33 u32 value;
34 } vblank;
35};
36
37int nv50_software_context_ctor(struct nouveau_object *,
38 struct nouveau_object *,
39 struct nouveau_oclass *, void *, u32,
40 struct nouveau_object **);
41void nv50_software_context_dtor(struct nouveau_object *);
42
43int nv50_software_mthd_vblsem_value(struct nouveau_object *, u32, void *, u32);
44int nv50_software_mthd_vblsem_release(struct nouveau_object *, u32, void *, u32);
45int nv50_software_mthd_flip(struct nouveau_object *, u32, void *, u32);
46
47#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c
index d698e710ddd4..f9430c1bf3e5 100644
--- a/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c
@@ -32,13 +32,7 @@
32#include <engine/software.h> 32#include <engine/software.h>
33#include <engine/disp.h> 33#include <engine/disp.h>
34 34
35struct nvc0_software_priv { 35#include "nv50.h"
36 struct nouveau_software base;
37};
38
39struct nvc0_software_chan {
40 struct nouveau_software_chan base;
41};
42 36
43/******************************************************************************* 37/*******************************************************************************
44 * software object classes 38 * software object classes
@@ -48,58 +42,24 @@ static int
48nvc0_software_mthd_vblsem_offset(struct nouveau_object *object, u32 mthd, 42nvc0_software_mthd_vblsem_offset(struct nouveau_object *object, u32 mthd,
49 void *args, u32 size) 43 void *args, u32 size)
50{ 44{
51 struct nvc0_software_chan *chan = (void *)nv_engctx(object->parent); 45 struct nv50_software_chan *chan = (void *)nv_engctx(object->parent);
52 u64 data = *(u32 *)args; 46 u64 data = *(u32 *)args;
53 if (mthd == 0x0400) { 47 if (mthd == 0x0400) {
54 chan->base.vblank.offset &= 0x00ffffffffULL; 48 chan->vblank.offset &= 0x00ffffffffULL;
55 chan->base.vblank.offset |= data << 32; 49 chan->vblank.offset |= data << 32;
56 } else { 50 } else {
57 chan->base.vblank.offset &= 0xff00000000ULL; 51 chan->vblank.offset &= 0xff00000000ULL;
58 chan->base.vblank.offset |= data; 52 chan->vblank.offset |= data;
59 } 53 }
60 return 0; 54 return 0;
61} 55}
62 56
63static int 57static int
64nvc0_software_mthd_vblsem_value(struct nouveau_object *object, u32 mthd,
65 void *args, u32 size)
66{
67 struct nvc0_software_chan *chan = (void *)nv_engctx(object->parent);
68 chan->base.vblank.value = *(u32 *)args;
69 return 0;
70}
71
72static int
73nvc0_software_mthd_vblsem_release(struct nouveau_object *object, u32 mthd,
74 void *args, u32 size)
75{
76 struct nvc0_software_chan *chan = (void *)nv_engctx(object->parent);
77 struct nouveau_disp *disp = nouveau_disp(object);
78 u32 crtc = *(u32 *)args;
79
80 if ((nv_device(object)->card_type < NV_E0 && crtc > 1) || crtc > 3)
81 return -EINVAL;
82
83 nouveau_event_get(disp->vblank, crtc, &chan->base.vblank.event);
84 return 0;
85}
86
87static int
88nvc0_software_mthd_flip(struct nouveau_object *object, u32 mthd,
89 void *args, u32 size)
90{
91 struct nvc0_software_chan *chan = (void *)nv_engctx(object->parent);
92 if (chan->base.flip)
93 return chan->base.flip(chan->base.flip_data);
94 return -EINVAL;
95}
96
97static int
98nvc0_software_mthd_mp_control(struct nouveau_object *object, u32 mthd, 58nvc0_software_mthd_mp_control(struct nouveau_object *object, u32 mthd,
99 void *args, u32 size) 59 void *args, u32 size)
100{ 60{
101 struct nvc0_software_chan *chan = (void *)nv_engctx(object->parent); 61 struct nv50_software_chan *chan = (void *)nv_engctx(object->parent);
102 struct nvc0_software_priv *priv = (void *)nv_object(chan)->engine; 62 struct nv50_software_priv *priv = (void *)nv_object(chan)->engine;
103 u32 data = *(u32 *)args; 63 u32 data = *(u32 *)args;
104 64
105 switch (mthd) { 65 switch (mthd) {
@@ -124,9 +84,9 @@ static struct nouveau_omthds
124nvc0_software_omthds[] = { 84nvc0_software_omthds[] = {
125 { 0x0400, 0x0400, nvc0_software_mthd_vblsem_offset }, 85 { 0x0400, 0x0400, nvc0_software_mthd_vblsem_offset },
126 { 0x0404, 0x0404, nvc0_software_mthd_vblsem_offset }, 86 { 0x0404, 0x0404, nvc0_software_mthd_vblsem_offset },
127 { 0x0408, 0x0408, nvc0_software_mthd_vblsem_value }, 87 { 0x0408, 0x0408, nv50_software_mthd_vblsem_value },
128 { 0x040c, 0x040c, nvc0_software_mthd_vblsem_release }, 88 { 0x040c, 0x040c, nv50_software_mthd_vblsem_release },
129 { 0x0500, 0x0500, nvc0_software_mthd_flip }, 89 { 0x0500, 0x0500, nv50_software_mthd_flip },
130 { 0x0600, 0x0600, nvc0_software_mthd_mp_control }, 90 { 0x0600, 0x0600, nvc0_software_mthd_mp_control },
131 { 0x0644, 0x0644, nvc0_software_mthd_mp_control }, 91 { 0x0644, 0x0644, nvc0_software_mthd_mp_control },
132 { 0x06ac, 0x06ac, nvc0_software_mthd_mp_control }, 92 { 0x06ac, 0x06ac, nvc0_software_mthd_mp_control },
@@ -144,11 +104,10 @@ nvc0_software_sclass[] = {
144 ******************************************************************************/ 104 ******************************************************************************/
145 105
146static int 106static int
147nvc0_software_vblsem_release(struct nouveau_eventh *event, int head) 107nvc0_software_vblsem_release(void *data, int head)
148{ 108{
149 struct nouveau_software_chan *chan = 109 struct nv50_software_chan *chan = data;
150 container_of(event, struct nouveau_software_chan, vblank.event); 110 struct nv50_software_priv *priv = (void *)nv_object(chan)->engine;
151 struct nvc0_software_priv *priv = (void *)nv_object(chan)->engine;
152 struct nouveau_bar *bar = nouveau_bar(priv); 111 struct nouveau_bar *bar = nouveau_bar(priv);
153 112
154 nv_wr32(priv, 0x001718, 0x80000000 | chan->vblank.channel); 113 nv_wr32(priv, 0x001718, 0x80000000 | chan->vblank.channel);
@@ -160,66 +119,31 @@ nvc0_software_vblsem_release(struct nouveau_eventh *event, int head)
160 return NVKM_EVENT_DROP; 119 return NVKM_EVENT_DROP;
161} 120}
162 121
163static int 122static struct nv50_software_cclass
164nvc0_software_context_ctor(struct nouveau_object *parent,
165 struct nouveau_object *engine,
166 struct nouveau_oclass *oclass, void *data, u32 size,
167 struct nouveau_object **pobject)
168{
169 struct nvc0_software_chan *chan;
170 int ret;
171
172 ret = nouveau_software_context_create(parent, engine, oclass, &chan);
173 *pobject = nv_object(chan);
174 if (ret)
175 return ret;
176
177 chan->base.vblank.channel = nv_gpuobj(parent->parent)->addr >> 12;
178 chan->base.vblank.event.func = nvc0_software_vblsem_release;
179 return 0;
180}
181
182static struct nouveau_oclass
183nvc0_software_cclass = { 123nvc0_software_cclass = {
184 .handle = NV_ENGCTX(SW, 0xc0), 124 .base.handle = NV_ENGCTX(SW, 0xc0),
185 .ofuncs = &(struct nouveau_ofuncs) { 125 .base.ofuncs = &(struct nouveau_ofuncs) {
186 .ctor = nvc0_software_context_ctor, 126 .ctor = nv50_software_context_ctor,
187 .dtor = _nouveau_software_context_dtor, 127 .dtor = _nouveau_software_context_dtor,
188 .init = _nouveau_software_context_init, 128 .init = _nouveau_software_context_init,
189 .fini = _nouveau_software_context_fini, 129 .fini = _nouveau_software_context_fini,
190 }, 130 },
131 .vblank = nvc0_software_vblsem_release,
191}; 132};
192 133
193/******************************************************************************* 134/*******************************************************************************
194 * software engine/subdev functions 135 * software engine/subdev functions
195 ******************************************************************************/ 136 ******************************************************************************/
196 137
197static int 138struct nouveau_oclass *
198nvc0_software_ctor(struct nouveau_object *parent, struct nouveau_object *engine, 139nvc0_software_oclass = &(struct nv50_software_oclass) {
199 struct nouveau_oclass *oclass, void *data, u32 size, 140 .base.handle = NV_ENGINE(SW, 0xc0),
200 struct nouveau_object **pobject) 141 .base.ofuncs = &(struct nouveau_ofuncs) {
201{ 142 .ctor = nv50_software_ctor,
202 struct nvc0_software_priv *priv;
203 int ret;
204
205 ret = nouveau_software_create(parent, engine, oclass, &priv);
206 *pobject = nv_object(priv);
207 if (ret)
208 return ret;
209
210 nv_engine(priv)->cclass = &nvc0_software_cclass;
211 nv_engine(priv)->sclass = nvc0_software_sclass;
212 nv_subdev(priv)->intr = nv04_software_intr;
213 return 0;
214}
215
216struct nouveau_oclass
217nvc0_software_oclass = {
218 .handle = NV_ENGINE(SW, 0xc0),
219 .ofuncs = &(struct nouveau_ofuncs) {
220 .ctor = nvc0_software_ctor,
221 .dtor = _nouveau_software_dtor, 143 .dtor = _nouveau_software_dtor,
222 .init = _nouveau_software_init, 144 .init = _nouveau_software_init,
223 .fini = _nouveau_software_fini, 145 .fini = _nouveau_software_fini,
224 }, 146 },
225}; 147 .cclass = &nvc0_software_cclass.base,
148 .sclass = nvc0_software_sclass,
149}.base;
diff --git a/drivers/gpu/drm/nouveau/core/include/core/class.h b/drivers/gpu/drm/nouveau/core/include/core/class.h
index 5a5961b6a6a3..560c3593dae7 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/class.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/class.h
@@ -22,7 +22,7 @@
22#define NV_DEVICE_DISABLE_PPP 0x0000004000000000ULL 22#define NV_DEVICE_DISABLE_PPP 0x0000004000000000ULL
23#define NV_DEVICE_DISABLE_COPY0 0x0000008000000000ULL 23#define NV_DEVICE_DISABLE_COPY0 0x0000008000000000ULL
24#define NV_DEVICE_DISABLE_COPY1 0x0000010000000000ULL 24#define NV_DEVICE_DISABLE_COPY1 0x0000010000000000ULL
25#define NV_DEVICE_DISABLE_UNK1C1 0x0000020000000000ULL 25#define NV_DEVICE_DISABLE_VIC 0x0000020000000000ULL
26#define NV_DEVICE_DISABLE_VENC 0x0000040000000000ULL 26#define NV_DEVICE_DISABLE_VENC 0x0000040000000000ULL
27 27
28struct nv_device_class { 28struct nv_device_class {
@@ -98,6 +98,77 @@ struct nv_dma_class {
98 u32 conf0; 98 u32 conf0;
99}; 99};
100 100
101/* Perfmon counter class
102 *
103 * XXXX: NV_PERFCTR
104 */
105#define NV_PERFCTR_CLASS 0x0000ffff
106#define NV_PERFCTR_QUERY 0x00000000
107#define NV_PERFCTR_SAMPLE 0x00000001
108#define NV_PERFCTR_READ 0x00000002
109
110struct nv_perfctr_class {
111 u16 logic_op;
112 struct {
113 char __user *name; /*XXX: use cfu when exposed to userspace */
114 u32 size;
115 } signal[4];
116};
117
118struct nv_perfctr_query {
119 u32 iter;
120 u32 size;
121 char __user *name; /*XXX: use ctu when exposed to userspace */
122};
123
124struct nv_perfctr_sample {
125};
126
127struct nv_perfctr_read {
128 u32 ctr;
129 u32 clk;
130};
131
132/* Device control class
133 *
134 * XXXX: NV_CONTROL
135 */
136#define NV_CONTROL_CLASS 0x0000fffe
137
138#define NV_CONTROL_PSTATE_INFO 0x00000000
139#define NV_CONTROL_PSTATE_INFO_USTATE_DISABLE (-1)
140#define NV_CONTROL_PSTATE_INFO_USTATE_PERFMON (-2)
141#define NV_CONTROL_PSTATE_INFO_PSTATE_UNKNOWN (-1)
142#define NV_CONTROL_PSTATE_INFO_PSTATE_PERFMON (-2)
143#define NV_CONTROL_PSTATE_ATTR 0x00000001
144#define NV_CONTROL_PSTATE_ATTR_STATE_CURRENT (-1)
145#define NV_CONTROL_PSTATE_USER 0x00000002
146#define NV_CONTROL_PSTATE_USER_STATE_UNKNOWN (-1)
147#define NV_CONTROL_PSTATE_USER_STATE_PERFMON (-2)
148
149struct nv_control_pstate_info {
150 u32 count; /* out: number of power states */
151 s32 ustate; /* out: current target pstate index */
152 u32 pstate; /* out: current pstate index */
153};
154
155struct nv_control_pstate_attr {
156 s32 state; /* in: index of pstate to query
157 * out: pstate identifier
158 */
159 u32 index; /* in: index of attribute to query
160 * out: index of next attribute, or 0 if no more
161 */
162 char name[32];
163 char unit[16];
164 u32 min;
165 u32 max;
166};
167
168struct nv_control_pstate_user {
169 s32 state; /* in: pstate identifier */
170};
171
101/* DMA FIFO channel classes 172/* DMA FIFO channel classes
102 * 173 *
103 * 006b: NV03_CHANNEL_DMA 174 * 006b: NV03_CHANNEL_DMA
diff --git a/drivers/gpu/drm/nouveau/core/include/core/debug.h b/drivers/gpu/drm/nouveau/core/include/core/debug.h
index 9ea18dfcb4d0..8092e2e90323 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/debug.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/debug.h
@@ -1,13 +1,20 @@
1#ifndef __NOUVEAU_DEBUG_H__ 1#ifndef __NOUVEAU_DEBUG_H__
2#define __NOUVEAU_DEBUG_H__ 2#define __NOUVEAU_DEBUG_H__
3 3
4extern int nv_info_debug_level;
5
4#define NV_DBG_FATAL 0 6#define NV_DBG_FATAL 0
5#define NV_DBG_ERROR 1 7#define NV_DBG_ERROR 1
6#define NV_DBG_WARN 2 8#define NV_DBG_WARN 2
7#define NV_DBG_INFO 3 9#define NV_DBG_INFO nv_info_debug_level
8#define NV_DBG_DEBUG 4 10#define NV_DBG_DEBUG 4
9#define NV_DBG_TRACE 5 11#define NV_DBG_TRACE 5
10#define NV_DBG_PARANOIA 6 12#define NV_DBG_PARANOIA 6
11#define NV_DBG_SPAM 7 13#define NV_DBG_SPAM 7
12 14
15#define NV_DBG_INFO_NORMAL 3
16#define NV_DBG_INFO_SILENT NV_DBG_DEBUG
17
18#define nv_debug_level(a) nv_info_debug_level = NV_DBG_INFO_##a
19
13#endif 20#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/device.h b/drivers/gpu/drm/nouveau/core/include/core/device.h
index 99b6600fe80a..ac2881d1776a 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/device.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/device.h
@@ -33,9 +33,10 @@ enum nv_subdev_type {
33 NVDEV_SUBDEV_INSTMEM, 33 NVDEV_SUBDEV_INSTMEM,
34 NVDEV_SUBDEV_VM, 34 NVDEV_SUBDEV_VM,
35 NVDEV_SUBDEV_BAR, 35 NVDEV_SUBDEV_BAR,
36 NVDEV_SUBDEV_PWR,
36 NVDEV_SUBDEV_VOLT, 37 NVDEV_SUBDEV_VOLT,
37 NVDEV_SUBDEV_CLOCK,
38 NVDEV_SUBDEV_THERM, 38 NVDEV_SUBDEV_THERM,
39 NVDEV_SUBDEV_CLOCK,
39 40
40 NVDEV_ENGINE_DMAOBJ, 41 NVDEV_ENGINE_DMAOBJ,
41 NVDEV_ENGINE_FIFO, 42 NVDEV_ENGINE_FIFO,
@@ -50,9 +51,10 @@ enum nv_subdev_type {
50 NVDEV_ENGINE_COPY0, 51 NVDEV_ENGINE_COPY0,
51 NVDEV_ENGINE_COPY1, 52 NVDEV_ENGINE_COPY1,
52 NVDEV_ENGINE_COPY2, 53 NVDEV_ENGINE_COPY2,
53 NVDEV_ENGINE_UNK1C1, 54 NVDEV_ENGINE_VIC,
54 NVDEV_ENGINE_VENC, 55 NVDEV_ENGINE_VENC,
55 NVDEV_ENGINE_DISP, 56 NVDEV_ENGINE_DISP,
57 NVDEV_ENGINE_PERFMON,
56 58
57 NVDEV_SUBDEV_NR, 59 NVDEV_SUBDEV_NR,
58}; 60};
@@ -72,6 +74,7 @@ struct nouveau_device {
72 enum { 74 enum {
73 NV_04 = 0x04, 75 NV_04 = 0x04,
74 NV_10 = 0x10, 76 NV_10 = 0x10,
77 NV_11 = 0x11,
75 NV_20 = 0x20, 78 NV_20 = 0x20,
76 NV_30 = 0x30, 79 NV_30 = 0x30,
77 NV_40 = 0x40, 80 NV_40 = 0x40,
diff --git a/drivers/gpu/drm/nouveau/core/include/core/event.h b/drivers/gpu/drm/nouveau/core/include/core/event.h
index 9e094408f14e..5d539ebff3ed 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/event.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/event.h
@@ -5,13 +5,21 @@
5#define NVKM_EVENT_DROP 0 5#define NVKM_EVENT_DROP 0
6#define NVKM_EVENT_KEEP 1 6#define NVKM_EVENT_KEEP 1
7 7
8/* nouveau_eventh.flags bit #s */
9#define NVKM_EVENT_ENABLE 0
10
8struct nouveau_eventh { 11struct nouveau_eventh {
12 struct nouveau_event *event;
9 struct list_head head; 13 struct list_head head;
10 int (*func)(struct nouveau_eventh *, int index); 14 unsigned long flags;
15 int index;
16 int (*func)(void *, int);
17 void *priv;
11}; 18};
12 19
13struct nouveau_event { 20struct nouveau_event {
14 spinlock_t lock; 21 spinlock_t list_lock;
22 spinlock_t refs_lock;
15 23
16 void *priv; 24 void *priv;
17 void (*enable)(struct nouveau_event *, int index); 25 void (*enable)(struct nouveau_event *, int index);
@@ -28,9 +36,11 @@ int nouveau_event_create(int index_nr, struct nouveau_event **);
28void nouveau_event_destroy(struct nouveau_event **); 36void nouveau_event_destroy(struct nouveau_event **);
29void nouveau_event_trigger(struct nouveau_event *, int index); 37void nouveau_event_trigger(struct nouveau_event *, int index);
30 38
31void nouveau_event_get(struct nouveau_event *, int index, 39int nouveau_event_new(struct nouveau_event *, int index,
32 struct nouveau_eventh *); 40 int (*func)(void *, int), void *,
33void nouveau_event_put(struct nouveau_event *, int index, 41 struct nouveau_eventh **);
34 struct nouveau_eventh *); 42void nouveau_event_ref(struct nouveau_eventh *, struct nouveau_eventh **);
43void nouveau_event_get(struct nouveau_eventh *);
44void nouveau_event_put(struct nouveau_eventh *);
35 45
36#endif 46#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/option.h b/drivers/gpu/drm/nouveau/core/include/core/option.h
index 27074957fd21..ed055847887e 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/option.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/option.h
@@ -8,4 +8,13 @@ bool nouveau_boolopt(const char *optstr, const char *opt, bool value);
8 8
9int nouveau_dbgopt(const char *optstr, const char *sub); 9int nouveau_dbgopt(const char *optstr, const char *sub);
10 10
11/* compares unterminated string 'str' with zero-terminated string 'cmp' */
12static inline int
13strncasecmpz(const char *str, const char *cmp, size_t len)
14{
15 if (strlen(cmp) != len)
16 return len;
17 return strncasecmp(str, cmp, len);
18}
19
11#endif 20#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/printk.h b/drivers/gpu/drm/nouveau/core/include/core/printk.h
index d87836e3a704..0f9a37bd32b0 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/printk.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/printk.h
@@ -6,27 +6,12 @@
6 6
7struct nouveau_object; 7struct nouveau_object;
8 8
9#define NV_PRINTK_FATAL KERN_CRIT 9void __printf(3, 4)
10#define NV_PRINTK_ERROR KERN_ERR 10nv_printk_(struct nouveau_object *, int, const char *, ...);
11#define NV_PRINTK_WARN KERN_WARNING
12#define NV_PRINTK_INFO KERN_INFO
13#define NV_PRINTK_DEBUG KERN_DEBUG
14#define NV_PRINTK_PARANOIA KERN_DEBUG
15#define NV_PRINTK_TRACE KERN_DEBUG
16#define NV_PRINTK_SPAM KERN_DEBUG
17
18extern int nv_printk_suspend_level;
19
20#define NV_DBG_SUSPEND (nv_printk_suspend_level)
21#define NV_PRINTK_SUSPEND (nv_printk_level_to_pfx(nv_printk_suspend_level))
22
23const char *nv_printk_level_to_pfx(int level);
24void __printf(4, 5)
25nv_printk_(struct nouveau_object *, const char *, int, const char *, ...);
26 11
27#define nv_printk(o,l,f,a...) do { \ 12#define nv_printk(o,l,f,a...) do { \
28 if (NV_DBG_##l <= CONFIG_NOUVEAU_DEBUG) \ 13 if (NV_DBG_##l <= CONFIG_NOUVEAU_DEBUG) \
29 nv_printk_(nv_object(o), NV_PRINTK_##l, NV_DBG_##l, f, ##a); \ 14 nv_printk_(nv_object(o), NV_DBG_##l, f, ##a); \
30} while(0) 15} while(0)
31 16
32#define nv_fatal(o,f,a...) nv_printk((o), FATAL, f, ##a) 17#define nv_fatal(o,f,a...) nv_printk((o), FATAL, f, ##a)
@@ -37,16 +22,9 @@ nv_printk_(struct nouveau_object *, const char *, int, const char *, ...);
37#define nv_trace(o,f,a...) nv_printk((o), TRACE, f, ##a) 22#define nv_trace(o,f,a...) nv_printk((o), TRACE, f, ##a)
38#define nv_spam(o,f,a...) nv_printk((o), SPAM, f, ##a) 23#define nv_spam(o,f,a...) nv_printk((o), SPAM, f, ##a)
39 24
40#define nv_suspend(o,f,a...) nv_printk((o), SUSPEND, f, ##a)
41
42static inline void nv_suspend_set_printk_level(int level)
43{
44 nv_printk_suspend_level = level;
45}
46
47#define nv_assert(f,a...) do { \ 25#define nv_assert(f,a...) do { \
48 if (NV_DBG_FATAL <= CONFIG_NOUVEAU_DEBUG) \ 26 if (NV_DBG_FATAL <= CONFIG_NOUVEAU_DEBUG) \
49 nv_printk_(NULL, NV_PRINTK_FATAL, NV_DBG_FATAL, f "\n", ##a); \ 27 nv_printk_(NULL, NV_DBG_FATAL, f "\n", ##a); \
50 BUG_ON(1); \ 28 BUG_ON(1); \
51} while(0) 29} while(0)
52 30
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/fifo.h b/drivers/gpu/drm/nouveau/core/include/engine/fifo.h
index 633c2f806482..8c32cf4d83c7 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/fifo.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/fifo.h
@@ -101,14 +101,14 @@ nouveau_client_name_for_fifo_chid(struct nouveau_fifo *fifo, u32 chid);
101#define _nouveau_fifo_init _nouveau_engine_init 101#define _nouveau_fifo_init _nouveau_engine_init
102#define _nouveau_fifo_fini _nouveau_engine_fini 102#define _nouveau_fifo_fini _nouveau_engine_fini
103 103
104extern struct nouveau_oclass nv04_fifo_oclass; 104extern struct nouveau_oclass *nv04_fifo_oclass;
105extern struct nouveau_oclass nv10_fifo_oclass; 105extern struct nouveau_oclass *nv10_fifo_oclass;
106extern struct nouveau_oclass nv17_fifo_oclass; 106extern struct nouveau_oclass *nv17_fifo_oclass;
107extern struct nouveau_oclass nv40_fifo_oclass; 107extern struct nouveau_oclass *nv40_fifo_oclass;
108extern struct nouveau_oclass nv50_fifo_oclass; 108extern struct nouveau_oclass *nv50_fifo_oclass;
109extern struct nouveau_oclass nv84_fifo_oclass; 109extern struct nouveau_oclass *nv84_fifo_oclass;
110extern struct nouveau_oclass nvc0_fifo_oclass; 110extern struct nouveau_oclass *nvc0_fifo_oclass;
111extern struct nouveau_oclass nve0_fifo_oclass; 111extern struct nouveau_oclass *nve0_fifo_oclass;
112 112
113void nv04_fifo_intr(struct nouveau_subdev *); 113void nv04_fifo_intr(struct nouveau_subdev *);
114int nv04_fifo_context_attach(struct nouveau_object *, struct nouveau_object *); 114int nv04_fifo_context_attach(struct nouveau_object *, struct nouveau_object *);
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/mpeg.h b/drivers/gpu/drm/nouveau/core/include/engine/mpeg.h
index 1d1a89a06ee4..9b0d938199f6 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/mpeg.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/mpeg.h
@@ -42,10 +42,13 @@ struct nouveau_mpeg {
42 42
43extern struct nouveau_oclass nv31_mpeg_oclass; 43extern struct nouveau_oclass nv31_mpeg_oclass;
44extern struct nouveau_oclass nv40_mpeg_oclass; 44extern struct nouveau_oclass nv40_mpeg_oclass;
45extern struct nouveau_oclass nv44_mpeg_oclass;
45extern struct nouveau_oclass nv50_mpeg_oclass; 46extern struct nouveau_oclass nv50_mpeg_oclass;
46extern struct nouveau_oclass nv84_mpeg_oclass; 47extern struct nouveau_oclass nv84_mpeg_oclass;
47 48extern struct nouveau_ofuncs nv31_mpeg_ofuncs;
49extern struct nouveau_oclass nv31_mpeg_cclass;
48extern struct nouveau_oclass nv31_mpeg_sclass[]; 50extern struct nouveau_oclass nv31_mpeg_sclass[];
51extern struct nouveau_oclass nv40_mpeg_sclass[];
49void nv31_mpeg_intr(struct nouveau_subdev *); 52void nv31_mpeg_intr(struct nouveau_subdev *);
50void nv31_mpeg_tile_prog(struct nouveau_engine *, int); 53void nv31_mpeg_tile_prog(struct nouveau_engine *, int);
51int nv31_mpeg_init(struct nouveau_object *); 54int nv31_mpeg_init(struct nouveau_object *);
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/perfmon.h b/drivers/gpu/drm/nouveau/core/include/engine/perfmon.h
new file mode 100644
index 000000000000..49b0024910fe
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/engine/perfmon.h
@@ -0,0 +1,39 @@
1#ifndef __NVKM_PERFMON_H__
2#define __NVKM_PERFMON_H__
3
4#include <core/device.h>
5#include <core/engine.h>
6#include <core/engctx.h>
7#include <core/class.h>
8
9struct nouveau_perfdom;
10struct nouveau_perfctr;
11struct nouveau_perfmon {
12 struct nouveau_engine base;
13
14 struct nouveau_perfctx *context;
15 void *profile_data;
16
17 struct list_head domains;
18 u32 sequence;
19
20 /*XXX: temp for daemon backend */
21 u32 pwr[8];
22 u32 last;
23};
24
25static inline struct nouveau_perfmon *
26nouveau_perfmon(void *obj)
27{
28 return (void *)nv_device(obj)->subdev[NVDEV_ENGINE_PERFMON];
29}
30
31extern struct nouveau_oclass *nv40_perfmon_oclass;
32extern struct nouveau_oclass *nv50_perfmon_oclass;
33extern struct nouveau_oclass *nv84_perfmon_oclass;
34extern struct nouveau_oclass *nva3_perfmon_oclass;
35extern struct nouveau_oclass nvc0_perfmon_oclass;
36extern struct nouveau_oclass nve0_perfmon_oclass;
37extern struct nouveau_oclass nvf0_perfmon_oclass;
38
39#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/software.h b/drivers/gpu/drm/nouveau/core/include/engine/software.h
index 45799487e573..23a462b50d03 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/software.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/software.h
@@ -3,19 +3,10 @@
3 3
4#include <core/engine.h> 4#include <core/engine.h>
5#include <core/engctx.h> 5#include <core/engctx.h>
6#include <core/event.h>
7 6
8struct nouveau_software_chan { 7struct nouveau_software_chan {
9 struct nouveau_engctx base; 8 struct nouveau_engctx base;
10 9
11 struct {
12 struct nouveau_eventh event;
13 u32 channel;
14 u32 ctxdma;
15 u64 offset;
16 u32 value;
17 } vblank;
18
19 int (*flip)(void *); 10 int (*flip)(void *);
20 void *flip_data; 11 void *flip_data;
21}; 12};
@@ -50,10 +41,10 @@ struct nouveau_software {
50#define _nouveau_software_init _nouveau_engine_init 41#define _nouveau_software_init _nouveau_engine_init
51#define _nouveau_software_fini _nouveau_engine_fini 42#define _nouveau_software_fini _nouveau_engine_fini
52 43
53extern struct nouveau_oclass nv04_software_oclass; 44extern struct nouveau_oclass *nv04_software_oclass;
54extern struct nouveau_oclass nv10_software_oclass; 45extern struct nouveau_oclass *nv10_software_oclass;
55extern struct nouveau_oclass nv50_software_oclass; 46extern struct nouveau_oclass *nv50_software_oclass;
56extern struct nouveau_oclass nvc0_software_oclass; 47extern struct nouveau_oclass *nvc0_software_oclass;
57 48
58void nv04_software_intr(struct nouveau_subdev *); 49void nv04_software_intr(struct nouveau_subdev *);
59 50
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/boost.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/boost.h
new file mode 100644
index 000000000000..662b20726851
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/boost.h
@@ -0,0 +1,29 @@
1#ifndef __NVBIOS_BOOST_H__
2#define __NVBIOS_BOOST_H__
3
4u16 nvbios_boostTe(struct nouveau_bios *, u8 *, u8 *, u8 *, u8 *, u8 *, u8 *);
5
6struct nvbios_boostE {
7 u8 pstate;
8 u32 min;
9 u32 max;
10};
11
12u16 nvbios_boostEe(struct nouveau_bios *, int idx, u8 *, u8 *, u8 *, u8 *);
13u16 nvbios_boostEp(struct nouveau_bios *, int idx, u8 *, u8 *, u8 *, u8 *,
14 struct nvbios_boostE *);
15u16 nvbios_boostEm(struct nouveau_bios *, u8, u8 *, u8 *, u8 *, u8 *,
16 struct nvbios_boostE *);
17
18struct nvbios_boostS {
19 u8 domain;
20 u8 percent;
21 u32 min;
22 u32 max;
23};
24
25u16 nvbios_boostSe(struct nouveau_bios *, int, u16, u8 *, u8 *, u8, u8);
26u16 nvbios_boostSp(struct nouveau_bios *, int, u16, u8 *, u8 *, u8, u8,
27 struct nvbios_boostS *);
28
29#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/cstep.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/cstep.h
new file mode 100644
index 000000000000..a80a43809883
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/cstep.h
@@ -0,0 +1,28 @@
1#ifndef __NVBIOS_CSTEP_H__
2#define __NVBIOS_CSTEP_H__
3
4u16 nvbios_cstepTe(struct nouveau_bios *,
5 u8 *ver, u8 *hdr, u8 *cnt, u8 *len, u8 *xnr, u8 *xsz);
6
7struct nvbios_cstepE {
8 u8 pstate;
9 u8 index;
10};
11
12u16 nvbios_cstepEe(struct nouveau_bios *, int idx, u8 *ver, u8 *hdr);
13u16 nvbios_cstepEp(struct nouveau_bios *, int idx, u8 *ver, u8 *hdr,
14 struct nvbios_cstepE *);
15u16 nvbios_cstepEm(struct nouveau_bios *, u8 pstate, u8 *ver, u8 *hdr,
16 struct nvbios_cstepE *);
17
18struct nvbios_cstepX {
19 u32 freq;
20 u8 unkn[2];
21 u8 voltage;
22};
23
24u16 nvbios_cstepXe(struct nouveau_bios *, int idx, u8 *ver, u8 *hdr);
25u16 nvbios_cstepXp(struct nouveau_bios *, int idx, u8 *ver, u8 *hdr,
26 struct nvbios_cstepX *);
27
28#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/gpio.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/gpio.h
index 96d3364f6db3..c7b2e586be0b 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bios/gpio.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/gpio.h
@@ -7,7 +7,15 @@ enum dcb_gpio_func_name {
7 DCB_GPIO_TVDAC1 = 0x2d, 7 DCB_GPIO_TVDAC1 = 0x2d,
8 DCB_GPIO_FAN = 0x09, 8 DCB_GPIO_FAN = 0x09,
9 DCB_GPIO_FAN_SENSE = 0x3d, 9 DCB_GPIO_FAN_SENSE = 0x3d,
10 DCB_GPIO_UNUSED = 0xff 10 DCB_GPIO_UNUSED = 0xff,
11 DCB_GPIO_VID0 = 0x04,
12 DCB_GPIO_VID1 = 0x05,
13 DCB_GPIO_VID2 = 0x06,
14 DCB_GPIO_VID3 = 0x1a,
15 DCB_GPIO_VID4 = 0x73,
16 DCB_GPIO_VID5 = 0x74,
17 DCB_GPIO_VID6 = 0x75,
18 DCB_GPIO_VID7 = 0x76,
11}; 19};
12 20
13#define DCB_GPIO_LOG_DIR 0x02 21#define DCB_GPIO_LOG_DIR 0x02
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/perf.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/perf.h
index 0b285e99be5a..16ff06ec2a88 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bios/perf.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/perf.h
@@ -3,6 +3,39 @@
3 3
4struct nouveau_bios; 4struct nouveau_bios;
5 5
6u16 nvbios_perf_table(struct nouveau_bios *, u8 *ver, u8 *hdr,
7 u8 *cnt, u8 *len, u8 *snr, u8 *ssz);
8
9struct nvbios_perfE {
10 u8 pstate;
11 u8 fanspeed;
12 u8 voltage;
13 u32 core;
14 u32 shader;
15 u32 memory;
16 u32 vdec;
17 u32 disp;
18 u32 script;
19};
20
21u16 nvbios_perf_entry(struct nouveau_bios *, int idx,
22 u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
23u16 nvbios_perfEp(struct nouveau_bios *, int idx,
24 u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_perfE *);
25
26struct nvbios_perfS {
27 union {
28 struct {
29 u32 freq;
30 } v40;
31 };
32};
33
34u32 nvbios_perfSe(struct nouveau_bios *, u32 data, int idx,
35 u8 *ver, u8 *hdr, u8 cnt, u8 len);
36u32 nvbios_perfSp(struct nouveau_bios *, u32 data, int idx,
37 u8 *ver, u8 *hdr, u8 cnt, u8 len, struct nvbios_perfS *);
38
6struct nvbios_perf_fan { 39struct nvbios_perf_fan {
7 u32 pwm_divisor; 40 u32 pwm_divisor;
8}; 41};
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/rammap.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/rammap.h
new file mode 100644
index 000000000000..bc15e0320877
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/rammap.h
@@ -0,0 +1,11 @@
1#ifndef __NVBIOS_RAMMAP_H__
2#define __NVBIOS_RAMMAP_H__
3
4u16 nvbios_rammap_table(struct nouveau_bios *, u8 *ver, u8 *hdr,
5 u8 *cnt, u8 *len, u8 *snr, u8 *ssz);
6u16 nvbios_rammap_entry(struct nouveau_bios *, int idx,
7 u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
8u16 nvbios_rammap_match(struct nouveau_bios *, u16 khz,
9 u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
10
11#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/timing.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/timing.h
new file mode 100644
index 000000000000..963694b54224
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/timing.h
@@ -0,0 +1,8 @@
1#ifndef __NVBIOS_TIMING_H__
2#define __NVBIOS_TIMING_H__
3
4u16 nvbios_timing_table(struct nouveau_bios *,
5 u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
6u16 nvbios_timing_entry(struct nouveau_bios *, int idx, u8 *ver, u8 *hdr);
7
8#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/vmap.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/vmap.h
new file mode 100644
index 000000000000..ad5a8f20e113
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/vmap.h
@@ -0,0 +1,25 @@
1#ifndef __NVBIOS_VMAP_H__
2#define __NVBIOS_VMAP_H__
3
4struct nouveau_bios;
5
6struct nvbios_vmap {
7};
8
9u16 nvbios_vmap_table(struct nouveau_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
10u16 nvbios_vmap_parse(struct nouveau_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
11 struct nvbios_vmap *);
12
13struct nvbios_vmap_entry {
14 u8 unk0;
15 u8 link;
16 u32 min;
17 u32 max;
18 s32 arg[6];
19};
20
21u16 nvbios_vmap_entry(struct nouveau_bios *, int idx, u8 *ver, u8 *len);
22u16 nvbios_vmap_entry_parse(struct nouveau_bios *, int idx, u8 *ver, u8 *len,
23 struct nvbios_vmap_entry *);
24
25#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/volt.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/volt.h
new file mode 100644
index 000000000000..6a11dcd59770
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/volt.h
@@ -0,0 +1,27 @@
1#ifndef __NVBIOS_VOLT_H__
2#define __NVBIOS_VOLT_H__
3
4struct nouveau_bios;
5
6struct nvbios_volt {
7 u8 vidmask;
8 u32 min;
9 u32 max;
10 u32 base;
11 s16 step;
12};
13
14u16 nvbios_volt_table(struct nouveau_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
15u16 nvbios_volt_parse(struct nouveau_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
16 struct nvbios_volt *);
17
18struct nvbios_volt_entry {
19 u32 voltage;
20 u8 vid;
21};
22
23u16 nvbios_volt_entry(struct nouveau_bios *, int idx, u8 *ver, u8 *len);
24u16 nvbios_volt_entry_parse(struct nouveau_bios *, int idx, u8 *ver, u8 *len,
25 struct nvbios_volt_entry *);
26
27#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bus.h b/drivers/gpu/drm/nouveau/core/include/subdev/bus.h
index 7d88ec4a6d06..697f7ce70aab 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bus.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bus.h
@@ -11,6 +11,8 @@ struct nouveau_bus_intr {
11 11
12struct nouveau_bus { 12struct nouveau_bus {
13 struct nouveau_subdev base; 13 struct nouveau_subdev base;
14 int (*hwsq_exec)(struct nouveau_bus *, u32 *, u32);
15 u32 hwsq_size;
14}; 16};
15 17
16static inline struct nouveau_bus * 18static inline struct nouveau_bus *
@@ -33,9 +35,19 @@ nouveau_bus(void *obj)
33#define _nouveau_bus_init _nouveau_subdev_init 35#define _nouveau_bus_init _nouveau_subdev_init
34#define _nouveau_bus_fini _nouveau_subdev_fini 36#define _nouveau_bus_fini _nouveau_subdev_fini
35 37
36extern struct nouveau_oclass nv04_bus_oclass; 38extern struct nouveau_oclass *nv04_bus_oclass;
37extern struct nouveau_oclass nv31_bus_oclass; 39extern struct nouveau_oclass *nv31_bus_oclass;
38extern struct nouveau_oclass nv50_bus_oclass; 40extern struct nouveau_oclass *nv50_bus_oclass;
39extern struct nouveau_oclass nvc0_bus_oclass; 41extern struct nouveau_oclass *nv94_bus_oclass;
42extern struct nouveau_oclass *nvc0_bus_oclass;
43
44/* interface to sequencer */
45struct nouveau_hwsq;
46int nouveau_hwsq_init(struct nouveau_bus *, struct nouveau_hwsq **);
47int nouveau_hwsq_fini(struct nouveau_hwsq **, bool exec);
48void nouveau_hwsq_wr32(struct nouveau_hwsq *, u32 addr, u32 data);
49void nouveau_hwsq_setf(struct nouveau_hwsq *, u8 flag, int data);
50void nouveau_hwsq_wait(struct nouveau_hwsq *, u8 flag, u8 data);
51void nouveau_hwsq_nsec(struct nouveau_hwsq *, u32 nsec);
40 52
41#endif 53#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/clock.h b/drivers/gpu/drm/nouveau/core/include/subdev/clock.h
index 89ee289097a6..e2675bc0edba 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/clock.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/clock.h
@@ -7,9 +7,78 @@
7struct nouveau_pll_vals; 7struct nouveau_pll_vals;
8struct nvbios_pll; 8struct nvbios_pll;
9 9
10enum nv_clk_src {
11 nv_clk_src_crystal,
12 nv_clk_src_href,
13
14 nv_clk_src_hclk,
15 nv_clk_src_hclkm3,
16 nv_clk_src_hclkm3d2,
17
18 nv_clk_src_host,
19
20 nv_clk_src_sppll0,
21 nv_clk_src_sppll1,
22
23 nv_clk_src_mpllsrcref,
24 nv_clk_src_mpllsrc,
25 nv_clk_src_mpll,
26 nv_clk_src_mdiv,
27
28 nv_clk_src_core,
29 nv_clk_src_shader,
30
31 nv_clk_src_mem,
32
33 nv_clk_src_gpc,
34 nv_clk_src_rop,
35 nv_clk_src_hubk01,
36 nv_clk_src_hubk06,
37 nv_clk_src_hubk07,
38 nv_clk_src_copy,
39 nv_clk_src_daemon,
40 nv_clk_src_disp,
41 nv_clk_src_vdec,
42
43 nv_clk_src_dom6,
44
45 nv_clk_src_max,
46};
47
48struct nouveau_cstate {
49 struct list_head head;
50 u8 voltage;
51 u32 domain[nv_clk_src_max];
52};
53
54struct nouveau_pstate {
55 struct list_head head;
56 struct list_head list; /* c-states */
57 struct nouveau_cstate base;
58 u8 pstate;
59 u8 fanspeed;
60};
61
10struct nouveau_clock { 62struct nouveau_clock {
11 struct nouveau_subdev base; 63 struct nouveau_subdev base;
12 64
65 struct nouveau_clocks *domains;
66 struct nouveau_pstate bstate;
67
68 struct list_head states;
69 int state_nr;
70
71 int pstate; /* current */
72 int ustate; /* user-requested (-1 disabled, -2 perfmon) */
73 int astate; /* perfmon adjustment (base) */
74 int tstate; /* thermal adjustment (max-) */
75 int dstate; /* display adjustment (min+) */
76
77 int (*read)(struct nouveau_clock *, enum nv_clk_src);
78 int (*calc)(struct nouveau_clock *, struct nouveau_cstate *);
79 int (*prog)(struct nouveau_clock *);
80 void (*tidy)(struct nouveau_clock *);
81
13 /*XXX: die, these are here *only* to support the completely 82 /*XXX: die, these are here *only* to support the completely
14 * bat-shit insane what-was-nouveau_hw.c code 83 * bat-shit insane what-was-nouveau_hw.c code
15 */ 84 */
@@ -25,27 +94,42 @@ nouveau_clock(void *obj)
25 return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_CLOCK]; 94 return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_CLOCK];
26} 95}
27 96
28#define nouveau_clock_create(p,e,o,d) \ 97struct nouveau_clocks {
29 nouveau_subdev_create((p), (e), (o), 0, "CLOCK", "clock", d) 98 enum nv_clk_src name;
30#define nouveau_clock_destroy(p) \ 99 u8 bios; /* 0xff for none */
31 nouveau_subdev_destroy(&(p)->base) 100#define NVKM_CLK_DOM_FLAG_CORE 0x01
32#define nouveau_clock_init(p) \ 101 u8 flags;
33 nouveau_subdev_init(&(p)->base) 102 const char *mname;
103 int mdiv;
104};
105
106#define nouveau_clock_create(p,e,o,i,d) \
107 nouveau_clock_create_((p), (e), (o), (i), sizeof(**d), (void **)d)
108#define nouveau_clock_destroy(p) ({ \
109 struct nouveau_clock *clk = (p); \
110 _nouveau_clock_dtor(nv_object(clk)); \
111})
112#define nouveau_clock_init(p) ({ \
113 struct nouveau_clock *clk = (p); \
114 _nouveau_clock_init(nv_object(clk)); \
115})
34#define nouveau_clock_fini(p,s) \ 116#define nouveau_clock_fini(p,s) \
35 nouveau_subdev_fini(&(p)->base, (s)) 117 nouveau_subdev_fini(&(p)->base, (s))
36 118
37int nouveau_clock_create_(struct nouveau_object *, struct nouveau_object *, 119int nouveau_clock_create_(struct nouveau_object *, struct nouveau_object *,
38 struct nouveau_oclass *, void *, u32, int, void **); 120 struct nouveau_oclass *,
39 121 struct nouveau_clocks *, int, void **);
40#define _nouveau_clock_dtor _nouveau_subdev_dtor 122void _nouveau_clock_dtor(struct nouveau_object *);
41#define _nouveau_clock_init _nouveau_subdev_init 123int _nouveau_clock_init(struct nouveau_object *);
42#define _nouveau_clock_fini _nouveau_subdev_fini 124#define _nouveau_clock_fini _nouveau_subdev_fini
43 125
44extern struct nouveau_oclass nv04_clock_oclass; 126extern struct nouveau_oclass nv04_clock_oclass;
45extern struct nouveau_oclass nv40_clock_oclass; 127extern struct nouveau_oclass nv40_clock_oclass;
46extern struct nouveau_oclass nv50_clock_oclass; 128extern struct nouveau_oclass *nv50_clock_oclass;
129extern struct nouveau_oclass *nv84_clock_oclass;
47extern struct nouveau_oclass nva3_clock_oclass; 130extern struct nouveau_oclass nva3_clock_oclass;
48extern struct nouveau_oclass nvc0_clock_oclass; 131extern struct nouveau_oclass nvc0_clock_oclass;
132extern struct nouveau_oclass nve0_clock_oclass;
49 133
50int nv04_clock_pll_set(struct nouveau_clock *, u32 type, u32 freq); 134int nv04_clock_pll_set(struct nouveau_clock *, u32 type, u32 freq);
51int nv04_clock_pll_calc(struct nouveau_clock *, struct nvbios_pll *, 135int nv04_clock_pll_calc(struct nouveau_clock *, struct nvbios_pll *,
@@ -55,4 +139,9 @@ int nv04_clock_pll_prog(struct nouveau_clock *, u32 reg1,
55int nva3_clock_pll_calc(struct nouveau_clock *, struct nvbios_pll *, 139int nva3_clock_pll_calc(struct nouveau_clock *, struct nvbios_pll *,
56 int clk, struct nouveau_pll_vals *); 140 int clk, struct nouveau_pll_vals *);
57 141
142int nouveau_clock_ustate(struct nouveau_clock *, int req);
143int nouveau_clock_astate(struct nouveau_clock *, int req, int rel);
144int nouveau_clock_dstate(struct nouveau_clock *, int req, int rel);
145int nouveau_clock_tstate(struct nouveau_clock *, int req, int rel);
146
58#endif 147#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/fb.h b/drivers/gpu/drm/nouveau/core/include/subdev/fb.h
index 2e7405084261..8541aa382ff2 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/fb.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/fb.h
@@ -78,23 +78,28 @@ nouveau_fb(void *obj)
78 return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_FB]; 78 return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_FB];
79} 79}
80 80
81extern struct nouveau_oclass nv04_fb_oclass; 81extern struct nouveau_oclass *nv04_fb_oclass;
82extern struct nouveau_oclass nv10_fb_oclass; 82extern struct nouveau_oclass *nv10_fb_oclass;
83extern struct nouveau_oclass nv1a_fb_oclass; 83extern struct nouveau_oclass *nv1a_fb_oclass;
84extern struct nouveau_oclass nv20_fb_oclass; 84extern struct nouveau_oclass *nv20_fb_oclass;
85extern struct nouveau_oclass nv25_fb_oclass; 85extern struct nouveau_oclass *nv25_fb_oclass;
86extern struct nouveau_oclass nv30_fb_oclass; 86extern struct nouveau_oclass *nv30_fb_oclass;
87extern struct nouveau_oclass nv35_fb_oclass; 87extern struct nouveau_oclass *nv35_fb_oclass;
88extern struct nouveau_oclass nv36_fb_oclass; 88extern struct nouveau_oclass *nv36_fb_oclass;
89extern struct nouveau_oclass nv40_fb_oclass; 89extern struct nouveau_oclass *nv40_fb_oclass;
90extern struct nouveau_oclass nv41_fb_oclass; 90extern struct nouveau_oclass *nv41_fb_oclass;
91extern struct nouveau_oclass nv44_fb_oclass; 91extern struct nouveau_oclass *nv44_fb_oclass;
92extern struct nouveau_oclass nv46_fb_oclass; 92extern struct nouveau_oclass *nv46_fb_oclass;
93extern struct nouveau_oclass nv47_fb_oclass; 93extern struct nouveau_oclass *nv47_fb_oclass;
94extern struct nouveau_oclass nv49_fb_oclass; 94extern struct nouveau_oclass *nv49_fb_oclass;
95extern struct nouveau_oclass nv4e_fb_oclass; 95extern struct nouveau_oclass *nv4e_fb_oclass;
96extern struct nouveau_oclass nv50_fb_oclass; 96extern struct nouveau_oclass *nv50_fb_oclass;
97extern struct nouveau_oclass nvc0_fb_oclass; 97extern struct nouveau_oclass *nv84_fb_oclass;
98extern struct nouveau_oclass *nva3_fb_oclass;
99extern struct nouveau_oclass *nvaa_fb_oclass;
100extern struct nouveau_oclass *nvaf_fb_oclass;
101extern struct nouveau_oclass *nvc0_fb_oclass;
102extern struct nouveau_oclass *nve0_fb_oclass;
98 103
99struct nouveau_ram { 104struct nouveau_ram {
100 struct nouveau_object base; 105 struct nouveau_object base;
@@ -121,6 +126,17 @@ struct nouveau_ram {
121 int (*get)(struct nouveau_fb *, u64 size, u32 align, 126 int (*get)(struct nouveau_fb *, u64 size, u32 align,
122 u32 size_nc, u32 type, struct nouveau_mem **); 127 u32 size_nc, u32 type, struct nouveau_mem **);
123 void (*put)(struct nouveau_fb *, struct nouveau_mem **); 128 void (*put)(struct nouveau_fb *, struct nouveau_mem **);
129
130 int (*calc)(struct nouveau_fb *, u32 freq);
131 int (*prog)(struct nouveau_fb *);
132 void (*tidy)(struct nouveau_fb *);
133 struct {
134 u8 version;
135 u32 data;
136 u8 size;
137 } rammap, ramcfg, timing;
138 u32 freq;
139 u32 mr[16];
124}; 140};
125 141
126#endif 142#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h b/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h
index 7e4e2775f249..9fa5da723871 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h
@@ -60,13 +60,18 @@ void _nouveau_i2c_port_dtor(struct nouveau_object *);
60#define _nouveau_i2c_port_init nouveau_object_init 60#define _nouveau_i2c_port_init nouveau_object_init
61#define _nouveau_i2c_port_fini nouveau_object_fini 61#define _nouveau_i2c_port_fini nouveau_object_fini
62 62
63struct nouveau_i2c_board_info {
64 struct i2c_board_info dev;
65 u8 udelay; /* set to 0 to use the standard delay */
66};
67
63struct nouveau_i2c { 68struct nouveau_i2c {
64 struct nouveau_subdev base; 69 struct nouveau_subdev base;
65 70
66 struct nouveau_i2c_port *(*find)(struct nouveau_i2c *, u8 index); 71 struct nouveau_i2c_port *(*find)(struct nouveau_i2c *, u8 index);
67 struct nouveau_i2c_port *(*find_type)(struct nouveau_i2c *, u16 type); 72 struct nouveau_i2c_port *(*find_type)(struct nouveau_i2c *, u16 type);
68 int (*identify)(struct nouveau_i2c *, int index, 73 int (*identify)(struct nouveau_i2c *, int index,
69 const char *what, struct i2c_board_info *, 74 const char *what, struct nouveau_i2c_board_info *,
70 bool (*match)(struct nouveau_i2c_port *, 75 bool (*match)(struct nouveau_i2c_port *,
71 struct i2c_board_info *)); 76 struct i2c_board_info *));
72 struct list_head ports; 77 struct list_head ports;
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/mc.h b/drivers/gpu/drm/nouveau/core/include/subdev/mc.h
index ce6569f365a7..adc88b73d911 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/mc.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/mc.h
@@ -11,7 +11,6 @@ struct nouveau_mc_intr {
11 11
12struct nouveau_mc { 12struct nouveau_mc {
13 struct nouveau_subdev base; 13 struct nouveau_subdev base;
14 const struct nouveau_mc_intr *intr_map;
15 bool use_msi; 14 bool use_msi;
16}; 15};
17 16
@@ -21,8 +20,8 @@ nouveau_mc(void *obj)
21 return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_MC]; 20 return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_MC];
22} 21}
23 22
24#define nouveau_mc_create(p,e,o,m,d) \ 23#define nouveau_mc_create(p,e,o,d) \
25 nouveau_mc_create_((p), (e), (o), (m), sizeof(**d), (void **)d) 24 nouveau_mc_create_((p), (e), (o), sizeof(**d), (void **)d)
26#define nouveau_mc_destroy(p) ({ \ 25#define nouveau_mc_destroy(p) ({ \
27 struct nouveau_mc *pmc = (p); _nouveau_mc_dtor(nv_object(pmc)); \ 26 struct nouveau_mc *pmc = (p); _nouveau_mc_dtor(nv_object(pmc)); \
28}) 27})
@@ -34,20 +33,24 @@ nouveau_mc(void *obj)
34}) 33})
35 34
36int nouveau_mc_create_(struct nouveau_object *, struct nouveau_object *, 35int nouveau_mc_create_(struct nouveau_object *, struct nouveau_object *,
37 struct nouveau_oclass *, const struct nouveau_mc_intr *, 36 struct nouveau_oclass *, int, void **);
38 int, void **);
39void _nouveau_mc_dtor(struct nouveau_object *); 37void _nouveau_mc_dtor(struct nouveau_object *);
40int _nouveau_mc_init(struct nouveau_object *); 38int _nouveau_mc_init(struct nouveau_object *);
41int _nouveau_mc_fini(struct nouveau_object *, bool); 39int _nouveau_mc_fini(struct nouveau_object *, bool);
42 40
43extern struct nouveau_oclass nv04_mc_oclass; 41struct nouveau_mc_oclass {
44extern struct nouveau_oclass nv44_mc_oclass; 42 struct nouveau_oclass base;
45extern struct nouveau_oclass nv50_mc_oclass; 43 const struct nouveau_mc_intr *intr;
46extern struct nouveau_oclass nv98_mc_oclass; 44 void (*msi_rearm)(struct nouveau_mc *);
47extern struct nouveau_oclass nvc0_mc_oclass; 45};
48 46
49extern const struct nouveau_mc_intr nv04_mc_intr[]; 47extern struct nouveau_oclass *nv04_mc_oclass;
50int nv04_mc_init(struct nouveau_object *); 48extern struct nouveau_oclass *nv40_mc_oclass;
51int nv50_mc_init(struct nouveau_object *); 49extern struct nouveau_oclass *nv44_mc_oclass;
50extern struct nouveau_oclass *nv50_mc_oclass;
51extern struct nouveau_oclass *nv94_mc_oclass;
52extern struct nouveau_oclass *nv98_mc_oclass;
53extern struct nouveau_oclass *nvc0_mc_oclass;
54extern struct nouveau_oclass *nvc3_mc_oclass;
52 55
53#endif 56#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/pwr.h b/drivers/gpu/drm/nouveau/core/include/subdev/pwr.h
new file mode 100644
index 000000000000..c5c92cbed33f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/pwr.h
@@ -0,0 +1,80 @@
1#ifndef __NOUVEAU_PWR_H__
2#define __NOUVEAU_PWR_H__
3
4#include <core/subdev.h>
5#include <core/device.h>
6
7struct nouveau_pwr {
8 struct nouveau_subdev base;
9
10 struct {
11 u32 limit;
12 u32 *data;
13 u32 size;
14 } code;
15
16 struct {
17 u32 limit;
18 u32 *data;
19 u32 size;
20 } data;
21
22 struct {
23 u32 base;
24 u32 size;
25 } send;
26
27 struct {
28 u32 base;
29 u32 size;
30
31 struct work_struct work;
32 wait_queue_head_t wait;
33 u32 process;
34 u32 message;
35 u32 data[2];
36 } recv;
37
38 int (*message)(struct nouveau_pwr *, u32[2], u32, u32, u32, u32);
39};
40
41static inline struct nouveau_pwr *
42nouveau_pwr(void *obj)
43{
44 return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_PWR];
45}
46
47#define nouveau_pwr_create(p, e, o, d) \
48 nouveau_pwr_create_((p), (e), (o), sizeof(**d), (void **)d)
49#define nouveau_pwr_destroy(p) \
50 nouveau_subdev_destroy(&(p)->base)
51#define nouveau_pwr_init(p) ({ \
52 struct nouveau_pwr *ppwr = (p); \
53 _nouveau_pwr_init(nv_object(ppwr)); \
54})
55#define nouveau_pwr_fini(p,s) ({ \
56 struct nouveau_pwr *ppwr = (p); \
57 _nouveau_pwr_fini(nv_object(ppwr), (s)); \
58})
59
60int nouveau_pwr_create_(struct nouveau_object *, struct nouveau_object *,
61 struct nouveau_oclass *, int, void **);
62#define _nouveau_pwr_dtor _nouveau_subdev_dtor
63int _nouveau_pwr_init(struct nouveau_object *);
64int _nouveau_pwr_fini(struct nouveau_object *, bool);
65
66extern struct nouveau_oclass nva3_pwr_oclass;
67extern struct nouveau_oclass nvc0_pwr_oclass;
68extern struct nouveau_oclass nvd0_pwr_oclass;
69extern struct nouveau_oclass nv108_pwr_oclass;
70
71/* interface to MEMX process running on PPWR */
72struct nouveau_memx;
73int nouveau_memx_init(struct nouveau_pwr *, struct nouveau_memx **);
74int nouveau_memx_fini(struct nouveau_memx **, bool exec);
75void nouveau_memx_wr32(struct nouveau_memx *, u32 addr, u32 data);
76void nouveau_memx_wait(struct nouveau_memx *,
77 u32 addr, u32 mask, u32 data, u32 nsec);
78void nouveau_memx_nsec(struct nouveau_memx *, u32 nsec);
79
80#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/therm.h b/drivers/gpu/drm/nouveau/core/include/subdev/therm.h
index c075998d82e6..69891d4a3fe7 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/therm.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/therm.h
@@ -71,6 +71,8 @@ void _nouveau_therm_dtor(struct nouveau_object *);
71int _nouveau_therm_init(struct nouveau_object *); 71int _nouveau_therm_init(struct nouveau_object *);
72int _nouveau_therm_fini(struct nouveau_object *, bool); 72int _nouveau_therm_fini(struct nouveau_object *, bool);
73 73
74int nouveau_therm_cstate(struct nouveau_therm *, int, int);
75
74extern struct nouveau_oclass nv40_therm_oclass; 76extern struct nouveau_oclass nv40_therm_oclass;
75extern struct nouveau_oclass nv50_therm_oclass; 77extern struct nouveau_oclass nv50_therm_oclass;
76extern struct nouveau_oclass nv84_therm_oclass; 78extern struct nouveau_oclass nv84_therm_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/volt.h b/drivers/gpu/drm/nouveau/core/include/subdev/volt.h
new file mode 100644
index 000000000000..820b62ffd75b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/volt.h
@@ -0,0 +1,60 @@
1#ifndef __NOUVEAU_VOLT_H__
2#define __NOUVEAU_VOLT_H__
3
4#include <core/subdev.h>
5#include <core/device.h>
6
7struct nouveau_voltage {
8 u32 uv;
9 u8 id;
10};
11
12struct nouveau_volt {
13 struct nouveau_subdev base;
14
15 int (*vid_get)(struct nouveau_volt *);
16 int (*get)(struct nouveau_volt *);
17 int (*vid_set)(struct nouveau_volt *, u8 vid);
18 int (*set)(struct nouveau_volt *, u32 uv);
19 int (*set_id)(struct nouveau_volt *, u8 id, int condition);
20
21 u8 vid_mask;
22 u8 vid_nr;
23 struct {
24 u32 uv;
25 u8 vid;
26 } vid[256];
27};
28
29static inline struct nouveau_volt *
30nouveau_volt(void *obj)
31{
32 return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_VOLT];
33}
34
35#define nouveau_volt_create(p, e, o, d) \
36 nouveau_volt_create_((p), (e), (o), sizeof(**d), (void **)d)
37#define nouveau_volt_destroy(p) ({ \
38 struct nouveau_volt *v = (p); \
39 _nouveau_volt_dtor(nv_object(v)); \
40})
41#define nouveau_volt_init(p) ({ \
42 struct nouveau_volt *v = (p); \
43 _nouveau_volt_init(nv_object(v)); \
44})
45#define nouveau_volt_fini(p,s) \
46 nouveau_subdev_fini((p), (s))
47
48int nouveau_volt_create_(struct nouveau_object *, struct nouveau_object *,
49 struct nouveau_oclass *, int, void **);
50void _nouveau_volt_dtor(struct nouveau_object *);
51int _nouveau_volt_init(struct nouveau_object *);
52#define _nouveau_volt_fini _nouveau_subdev_fini
53
54extern struct nouveau_oclass nv40_volt_oclass;
55
56int nouveau_voltgpio_init(struct nouveau_volt *);
57int nouveau_voltgpio_get(struct nouveau_volt *);
58int nouveau_voltgpio_set(struct nouveau_volt *, u8);
59
60#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/boost.c b/drivers/gpu/drm/nouveau/core/subdev/bios/boost.c
new file mode 100644
index 000000000000..c1835e591c44
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/boost.c
@@ -0,0 +1,127 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/bios.h>
26#include <subdev/bios/bit.h>
27#include <subdev/bios/boost.h>
28
29u16
30nvbios_boostTe(struct nouveau_bios *bios,
31 u8 *ver, u8 *hdr, u8 *cnt, u8 *len, u8 *snr, u8 *ssz)
32{
33 struct bit_entry bit_P;
34 u16 boost = 0x0000;
35
36 if (!bit_entry(bios, 'P', &bit_P)) {
37 if (bit_P.version == 2)
38 boost = nv_ro16(bios, bit_P.offset + 0x30);
39
40 if (boost) {
41 *ver = nv_ro08(bios, boost + 0);
42 switch (*ver) {
43 case 0x11:
44 *hdr = nv_ro08(bios, boost + 1);
45 *cnt = nv_ro08(bios, boost + 5);
46 *len = nv_ro08(bios, boost + 2);
47 *snr = nv_ro08(bios, boost + 4);
48 *ssz = nv_ro08(bios, boost + 3);
49 return boost;
50 default:
51 break;
52 }
53 }
54 }
55
56 return 0x0000;
57}
58
59u16
60nvbios_boostEe(struct nouveau_bios *bios, int idx,
61 u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
62{
63 u8 snr, ssz;
64 u16 data = nvbios_boostTe(bios, ver, hdr, cnt, len, &snr, &ssz);
65 if (data && idx < *cnt) {
66 data = data + *hdr + (idx * (*len + (snr * ssz)));
67 *hdr = *len;
68 *cnt = snr;
69 *len = ssz;
70 return data;
71 }
72 return 0x0000;
73}
74
75u16
76nvbios_boostEp(struct nouveau_bios *bios, int idx,
77 u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_boostE *info)
78{
79 u16 data = nvbios_boostEe(bios, idx, ver, hdr, cnt, len);
80 memset(info, 0x00, sizeof(*info));
81 if (data) {
82 info->pstate = (nv_ro16(bios, data + 0x00) & 0x01e0) >> 5;
83 info->min = nv_ro16(bios, data + 0x02) * 1000;
84 info->max = nv_ro16(bios, data + 0x04) * 1000;
85 }
86 return data;
87}
88
89u16
90nvbios_boostEm(struct nouveau_bios *bios, u8 pstate,
91 u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_boostE *info)
92{
93 u32 data, idx = 0;
94 while ((data = nvbios_boostEp(bios, idx++, ver, hdr, cnt, len, info))) {
95 if (info->pstate == pstate)
96 break;
97 }
98 return data;
99}
100
101u16
102nvbios_boostSe(struct nouveau_bios *bios, int idx,
103 u16 data, u8 *ver, u8 *hdr, u8 cnt, u8 len)
104{
105 if (data && idx < cnt) {
106 data = data + *hdr + (idx * len);
107 *hdr = len;
108 return data;
109 }
110 return 0x0000;
111}
112
113u16
114nvbios_boostSp(struct nouveau_bios *bios, int idx,
115 u16 data, u8 *ver, u8 *hdr, u8 cnt, u8 len,
116 struct nvbios_boostS *info)
117{
118 data = nvbios_boostSe(bios, idx, data, ver, hdr, cnt, len);
119 memset(info, 0x00, sizeof(*info));
120 if (data) {
121 info->domain = nv_ro08(bios, data + 0x00);
122 info->percent = nv_ro08(bios, data + 0x01);
123 info->min = nv_ro16(bios, data + 0x02) * 1000;
124 info->max = nv_ro16(bios, data + 0x04) * 1000;
125 }
126 return data;
127}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/cstep.c b/drivers/gpu/drm/nouveau/core/subdev/bios/cstep.c
new file mode 100644
index 000000000000..d3b15327fbfd
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/cstep.c
@@ -0,0 +1,123 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/bios.h>
26#include <subdev/bios/bit.h>
27#include <subdev/bios/cstep.h>
28
29u16
30nvbios_cstepTe(struct nouveau_bios *bios,
31 u8 *ver, u8 *hdr, u8 *cnt, u8 *len, u8 *xnr, u8 *xsz)
32{
33 struct bit_entry bit_P;
34 u16 cstep = 0x0000;
35
36 if (!bit_entry(bios, 'P', &bit_P)) {
37 if (bit_P.version == 2)
38 cstep = nv_ro16(bios, bit_P.offset + 0x34);
39
40 if (cstep) {
41 *ver = nv_ro08(bios, cstep + 0);
42 switch (*ver) {
43 case 0x10:
44 *hdr = nv_ro08(bios, cstep + 1);
45 *cnt = nv_ro08(bios, cstep + 3);
46 *len = nv_ro08(bios, cstep + 2);
47 *xnr = nv_ro08(bios, cstep + 5);
48 *xsz = nv_ro08(bios, cstep + 4);
49 return cstep;
50 default:
51 break;
52 }
53 }
54 }
55
56 return 0x0000;
57}
58
59u16
60nvbios_cstepEe(struct nouveau_bios *bios, int idx, u8 *ver, u8 *hdr)
61{
62 u8 cnt, len, xnr, xsz;
63 u16 data = nvbios_cstepTe(bios, ver, hdr, &cnt, &len, &xnr, &xsz);
64 if (data && idx < cnt) {
65 data = data + *hdr + (idx * len);
66 *hdr = len;
67 return data;
68 }
69 return 0x0000;
70}
71
72u16
73nvbios_cstepEp(struct nouveau_bios *bios, int idx, u8 *ver, u8 *hdr,
74 struct nvbios_cstepE *info)
75{
76 u16 data = nvbios_cstepEe(bios, idx, ver, hdr);
77 memset(info, 0x00, sizeof(*info));
78 if (data) {
79 info->pstate = (nv_ro16(bios, data + 0x00) & 0x01e0) >> 5;
80 info->index = nv_ro08(bios, data + 0x03);
81 }
82 return data;
83}
84
85u16
86nvbios_cstepEm(struct nouveau_bios *bios, u8 pstate, u8 *ver, u8 *hdr,
87 struct nvbios_cstepE *info)
88{
89 u32 data, idx = 0;
90 while ((data = nvbios_cstepEp(bios, idx++, ver, hdr, info))) {
91 if (info->pstate == pstate)
92 break;
93 }
94 return data;
95}
96
97u16
98nvbios_cstepXe(struct nouveau_bios *bios, int idx, u8 *ver, u8 *hdr)
99{
100 u8 cnt, len, xnr, xsz;
101 u16 data = nvbios_cstepTe(bios, ver, hdr, &cnt, &len, &xnr, &xsz);
102 if (data && idx < xnr) {
103 data = data + *hdr + (cnt * len) + (idx * xsz);
104 *hdr = xsz;
105 return data;
106 }
107 return 0x0000;
108}
109
110u16
111nvbios_cstepXp(struct nouveau_bios *bios, int idx, u8 *ver, u8 *hdr,
112 struct nvbios_cstepX *info)
113{
114 u16 data = nvbios_cstepXe(bios, idx, ver, hdr);
115 memset(info, 0x00, sizeof(*info));
116 if (data) {
117 info->freq = nv_ro16(bios, data + 0x00) * 1000;
118 info->unkn[0] = nv_ro08(bios, data + 0x02);
119 info->unkn[1] = nv_ro08(bios, data + 0x03);
120 info->voltage = nv_ro08(bios, data + 0x04);
121 }
122 return data;
123}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/dp.c b/drivers/gpu/drm/nouveau/core/subdev/bios/dp.c
index 663853bcca82..7628fe759220 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/dp.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/dp.c
@@ -89,6 +89,7 @@ nvbios_dpout_parse(struct nouveau_bios *bios, u8 idx,
89 struct nvbios_dpout *info) 89 struct nvbios_dpout *info)
90{ 90{
91 u16 data = nvbios_dpout_entry(bios, idx, ver, hdr, cnt, len); 91 u16 data = nvbios_dpout_entry(bios, idx, ver, hdr, cnt, len);
92 memset(info, 0x00, sizeof(*info));
92 if (data && *ver) { 93 if (data && *ver) {
93 info->type = nv_ro16(bios, data + 0x00); 94 info->type = nv_ro16(bios, data + 0x00);
94 info->mask = nv_ro16(bios, data + 0x02); 95 info->mask = nv_ro16(bios, data + 0x02);
@@ -99,9 +100,12 @@ nvbios_dpout_parse(struct nouveau_bios *bios, u8 idx,
99 info->script[0] = nv_ro16(bios, data + 0x06); 100 info->script[0] = nv_ro16(bios, data + 0x06);
100 info->script[1] = nv_ro16(bios, data + 0x08); 101 info->script[1] = nv_ro16(bios, data + 0x08);
101 info->lnkcmp = nv_ro16(bios, data + 0x0a); 102 info->lnkcmp = nv_ro16(bios, data + 0x0a);
102 info->script[2] = nv_ro16(bios, data + 0x0c); 103 if (*len >= 0x0f) {
103 info->script[3] = nv_ro16(bios, data + 0x0e); 104 info->script[2] = nv_ro16(bios, data + 0x0c);
104 info->script[4] = nv_ro16(bios, data + 0x10); 105 info->script[3] = nv_ro16(bios, data + 0x0e);
106 }
107 if (*len >= 0x11)
108 info->script[4] = nv_ro16(bios, data + 0x10);
105 break; 109 break;
106 case 0x40: 110 case 0x40:
107 info->flags = nv_ro08(bios, data + 0x04); 111 info->flags = nv_ro08(bios, data + 0x04);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
index 57cda2a1437b..420908cb82b6 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
@@ -2180,7 +2180,7 @@ nvbios_init(struct nouveau_subdev *subdev, bool execute)
2180 u16 data; 2180 u16 data;
2181 2181
2182 if (execute) 2182 if (execute)
2183 nv_suspend(bios, "running init tables\n"); 2183 nv_info(bios, "running init tables\n");
2184 while (!ret && (data = (init_script(bios, ++i)))) { 2184 while (!ret && (data = (init_script(bios, ++i)))) {
2185 struct nvbios_init init = { 2185 struct nvbios_init init = {
2186 .subdev = subdev, 2186 .subdev = subdev,
@@ -2210,5 +2210,5 @@ nvbios_init(struct nouveau_subdev *subdev, bool execute)
2210 ret = nvbios_exec(&init); 2210 ret = nvbios_exec(&init);
2211 } 2211 }
2212 2212
2213 return 0; 2213 return ret;
2214} 2214}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/perf.c b/drivers/gpu/drm/nouveau/core/subdev/bios/perf.c
index bcbb056c2887..675e221680aa 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/perf.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/perf.c
@@ -26,8 +26,9 @@
26#include <subdev/bios/bit.h> 26#include <subdev/bios/bit.h>
27#include <subdev/bios/perf.h> 27#include <subdev/bios/perf.h>
28 28
29static u16 29u16
30perf_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len) 30nvbios_perf_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr,
31 u8 *cnt, u8 *len, u8 *snr, u8 *ssz)
31{ 32{
32 struct bit_entry bit_P; 33 struct bit_entry bit_P;
33 u16 perf = 0x0000; 34 u16 perf = 0x0000;
@@ -38,10 +39,22 @@ perf_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
38 if (perf) { 39 if (perf) {
39 *ver = nv_ro08(bios, perf + 0); 40 *ver = nv_ro08(bios, perf + 0);
40 *hdr = nv_ro08(bios, perf + 1); 41 *hdr = nv_ro08(bios, perf + 1);
42 if (*ver >= 0x40 && *ver < 0x41) {
43 *cnt = nv_ro08(bios, perf + 5);
44 *len = nv_ro08(bios, perf + 2);
45 *snr = nv_ro08(bios, perf + 4);
46 *ssz = nv_ro08(bios, perf + 3);
47 return perf;
48 } else
49 if (*ver >= 0x20 && *ver < 0x40) {
50 *cnt = nv_ro08(bios, perf + 2);
51 *len = nv_ro08(bios, perf + 3);
52 *snr = nv_ro08(bios, perf + 4);
53 *ssz = nv_ro08(bios, perf + 5);
54 return perf;
55 }
41 } 56 }
42 } else 57 }
43 nv_error(bios, "unknown offset for perf in BIT P %d\n",
44 bit_P.version);
45 } 58 }
46 59
47 if (bios->bmp_offset) { 60 if (bios->bmp_offset) {
@@ -50,19 +63,132 @@ perf_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
50 if (perf) { 63 if (perf) {
51 *hdr = nv_ro08(bios, perf + 0); 64 *hdr = nv_ro08(bios, perf + 0);
52 *ver = nv_ro08(bios, perf + 1); 65 *ver = nv_ro08(bios, perf + 1);
66 *cnt = nv_ro08(bios, perf + 2);
67 *len = nv_ro08(bios, perf + 3);
68 *snr = 0;
69 *ssz = 0;
70 return perf;
53 } 71 }
54 } 72 }
55 } 73 }
56 74
75 return 0x0000;
76}
77
78u16
79nvbios_perf_entry(struct nouveau_bios *bios, int idx,
80 u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
81{
82 u8 snr, ssz;
83 u16 perf = nvbios_perf_table(bios, ver, hdr, cnt, len, &snr, &ssz);
84 if (perf && idx < *cnt) {
85 perf = perf + *hdr + (idx * (*len + (snr * ssz)));
86 *hdr = *len;
87 *cnt = snr;
88 *len = ssz;
89 return perf;
90 }
91 return 0x0000;
92}
93
94u16
95nvbios_perfEp(struct nouveau_bios *bios, int idx,
96 u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
97 struct nvbios_perfE *info)
98{
99 u16 perf = nvbios_perf_entry(bios, idx, ver, hdr, cnt, len);
100 memset(info, 0x00, sizeof(*info));
101 info->pstate = nv_ro08(bios, perf + 0x00);
102 switch (!!perf * *ver) {
103 case 0x12:
104 case 0x13:
105 case 0x14:
106 info->core = nv_ro32(bios, perf + 0x01) * 10;
107 info->memory = nv_ro32(bios, perf + 0x05) * 20;
108 info->fanspeed = nv_ro08(bios, perf + 0x37);
109 if (*hdr > 0x38)
110 info->voltage = nv_ro08(bios, perf + 0x38);
111 break;
112 case 0x21:
113 case 0x23:
114 case 0x24:
115 info->fanspeed = nv_ro08(bios, perf + 0x04);
116 info->voltage = nv_ro08(bios, perf + 0x05);
117 info->shader = nv_ro16(bios, perf + 0x06) * 1000;
118 info->core = info->shader + (signed char)
119 nv_ro08(bios, perf + 0x08) * 1000;
120 switch (nv_device(bios)->chipset) {
121 case 0x49:
122 case 0x4b:
123 info->memory = nv_ro16(bios, perf + 0x0b) * 1000;
124 break;
125 default:
126 info->memory = nv_ro16(bios, perf + 0x0b) * 2000;
127 break;
128 }
129 break;
130 case 0x25:
131 info->fanspeed = nv_ro08(bios, perf + 0x04);
132 info->voltage = nv_ro08(bios, perf + 0x05);
133 info->core = nv_ro16(bios, perf + 0x06) * 1000;
134 info->shader = nv_ro16(bios, perf + 0x0a) * 1000;
135 info->memory = nv_ro16(bios, perf + 0x0c) * 1000;
136 break;
137 case 0x30:
138 info->script = nv_ro16(bios, perf + 0x02);
139 case 0x35:
140 info->fanspeed = nv_ro08(bios, perf + 0x06);
141 info->voltage = nv_ro08(bios, perf + 0x07);
142 info->core = nv_ro16(bios, perf + 0x08) * 1000;
143 info->shader = nv_ro16(bios, perf + 0x0a) * 1000;
144 info->memory = nv_ro16(bios, perf + 0x0c) * 1000;
145 info->vdec = nv_ro16(bios, perf + 0x10) * 1000;
146 info->disp = nv_ro16(bios, perf + 0x14) * 1000;
147 break;
148 case 0x40:
149 info->voltage = nv_ro08(bios, perf + 0x02);
150 break;
151 default:
152 return 0x0000;
153 }
57 return perf; 154 return perf;
58} 155}
59 156
157u32
158nvbios_perfSe(struct nouveau_bios *bios, u32 perfE, int idx,
159 u8 *ver, u8 *hdr, u8 cnt, u8 len)
160{
161 u32 data = 0x00000000;
162 if (idx < cnt) {
163 data = perfE + *hdr + (idx * len);
164 *hdr = len;
165 }
166 return data;
167}
168
169u32
170nvbios_perfSp(struct nouveau_bios *bios, u32 perfE, int idx,
171 u8 *ver, u8 *hdr, u8 cnt, u8 len,
172 struct nvbios_perfS *info)
173{
174 u32 data = nvbios_perfSe(bios, perfE, idx, ver, hdr, cnt, len);
175 memset(info, 0x00, sizeof(*info));
176 switch (!!data * *ver) {
177 case 0x40:
178 info->v40.freq = (nv_ro16(bios, data + 0x00) & 0x3fff) * 1000;
179 break;
180 default:
181 break;
182 }
183 return data;
184}
185
60int 186int
61nvbios_perf_fan_parse(struct nouveau_bios *bios, 187nvbios_perf_fan_parse(struct nouveau_bios *bios,
62 struct nvbios_perf_fan *fan) 188 struct nvbios_perf_fan *fan)
63{ 189{
64 u8 ver = 0, hdr = 0, cnt = 0, len = 0; 190 u8 ver, hdr, cnt, len, snr, ssz;
65 u16 perf = perf_table(bios, &ver, &hdr, &cnt, &len); 191 u16 perf = nvbios_perf_table(bios, &ver, &hdr, &cnt, &len, &snr, &ssz);
66 if (!perf) 192 if (!perf)
67 return -ENODEV; 193 return -ENODEV;
68 194
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/pll.c b/drivers/gpu/drm/nouveau/core/subdev/bios/pll.c
index f835501203e5..1f76de597d4b 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/pll.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/pll.c
@@ -114,6 +114,7 @@ pll_map(struct nouveau_bios *bios)
114 switch (nv_device(bios)->card_type) { 114 switch (nv_device(bios)->card_type) {
115 case NV_04: 115 case NV_04:
116 case NV_10: 116 case NV_10:
117 case NV_11:
117 case NV_20: 118 case NV_20:
118 case NV_30: 119 case NV_30:
119 return nv04_pll_mapping; 120 return nv04_pll_mapping;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/rammap.c b/drivers/gpu/drm/nouveau/core/subdev/bios/rammap.c
new file mode 100644
index 000000000000..916fa9d302b7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/rammap.c
@@ -0,0 +1,88 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/bios.h>
26#include <subdev/bios/bit.h>
27#include <subdev/bios/rammap.h>
28
29u16
30nvbios_rammap_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr,
31 u8 *cnt, u8 *len, u8 *snr, u8 *ssz)
32{
33 struct bit_entry bit_P;
34 u16 rammap = 0x0000;
35
36 if (!bit_entry(bios, 'P', &bit_P)) {
37 if (bit_P.version == 2)
38 rammap = nv_ro16(bios, bit_P.offset + 4);
39
40 if (rammap) {
41 *ver = nv_ro08(bios, rammap + 0);
42 switch (*ver) {
43 case 0x10:
44 case 0x11:
45 *hdr = nv_ro08(bios, rammap + 1);
46 *cnt = nv_ro08(bios, rammap + 5);
47 *len = nv_ro08(bios, rammap + 2);
48 *snr = nv_ro08(bios, rammap + 4);
49 *ssz = nv_ro08(bios, rammap + 3);
50 return rammap;
51 default:
52 break;
53 }
54 }
55 }
56
57 return 0x0000;
58}
59
60u16
61nvbios_rammap_entry(struct nouveau_bios *bios, int idx,
62 u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
63{
64 u8 snr, ssz;
65 u16 rammap = nvbios_rammap_table(bios, ver, hdr, cnt, len, &snr, &ssz);
66 if (rammap && idx < *cnt) {
67 rammap = rammap + *hdr + (idx * (*len + (snr * ssz)));
68 *hdr = *len;
69 *cnt = snr;
70 *len = ssz;
71 return rammap;
72 }
73 return 0x0000;
74}
75
76u16
77nvbios_rammap_match(struct nouveau_bios *bios, u16 khz,
78 u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
79{
80 int idx = 0;
81 u32 data;
82 while ((data = nvbios_rammap_entry(bios, idx++, ver, hdr, cnt, len))) {
83 if (khz >= nv_ro16(bios, data + 0x00) &&
84 khz <= nv_ro16(bios, data + 0x02))
85 break;
86 }
87 return data;
88}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/timing.c b/drivers/gpu/drm/nouveau/core/subdev/bios/timing.c
new file mode 100644
index 000000000000..151c2d6aaee8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/timing.c
@@ -0,0 +1,73 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/bios.h>
26#include <subdev/bios/bit.h>
27#include <subdev/bios/timing.h>
28
29u16
30nvbios_timing_table(struct nouveau_bios *bios,
31 u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
32{
33 struct bit_entry bit_P;
34 u16 timing = 0x0000;
35
36 if (!bit_entry(bios, 'P', &bit_P)) {
37 if (bit_P.version == 1)
38 timing = nv_ro16(bios, bit_P.offset + 4);
39 else
40 if (bit_P.version == 2)
41 timing = nv_ro16(bios, bit_P.offset + 8);
42
43 if (timing) {
44 *ver = nv_ro08(bios, timing + 0);
45 switch (*ver) {
46 case 0x10:
47 *hdr = nv_ro08(bios, timing + 1);
48 *cnt = nv_ro08(bios, timing + 2);
49 *len = nv_ro08(bios, timing + 3);
50 return timing;
51 case 0x20:
52 *hdr = nv_ro08(bios, timing + 1);
53 *cnt = nv_ro08(bios, timing + 3);
54 *len = nv_ro08(bios, timing + 2);
55 return timing;
56 default:
57 break;
58 }
59 }
60 }
61
62 return 0x0000;
63}
64
65u16
66nvbios_timing_entry(struct nouveau_bios *bios, int idx, u8 *ver, u8 *len)
67{
68 u8 hdr, cnt;
69 u16 timing = nvbios_timing_table(bios, ver, &hdr, &cnt, len);
70 if (timing && idx < cnt)
71 return timing + hdr + (idx * *len);
72 return 0x0000;
73}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/vmap.c b/drivers/gpu/drm/nouveau/core/subdev/bios/vmap.c
new file mode 100644
index 000000000000..f343a1b060e8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/vmap.c
@@ -0,0 +1,112 @@
1/*
2 * Copyright 2012 Nouveau Community
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Martin Peres
23 */
24
25#include <subdev/bios.h>
26#include <subdev/bios/bit.h>
27#include <subdev/bios/vmap.h>
28
29u16
30nvbios_vmap_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
31{
32 struct bit_entry bit_P;
33 u16 vmap = 0x0000;
34
35 if (!bit_entry(bios, 'P', &bit_P)) {
36 if (bit_P.version == 2) {
37 vmap = nv_ro16(bios, bit_P.offset + 0x20);
38 if (vmap) {
39 *ver = nv_ro08(bios, vmap + 0);
40 switch (*ver) {
41 case 0x10:
42 case 0x20:
43 *hdr = nv_ro08(bios, vmap + 1);
44 *cnt = nv_ro08(bios, vmap + 3);
45 *len = nv_ro08(bios, vmap + 2);
46 return vmap;
47 default:
48 break;
49 }
50 }
51 }
52 }
53
54 return 0x0000;
55}
56
57u16
58nvbios_vmap_parse(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
59 struct nvbios_vmap *info)
60{
61 u16 vmap = nvbios_vmap_table(bios, ver, hdr, cnt, len);
62 memset(info, 0x00, sizeof(*info));
63 switch (!!vmap * *ver) {
64 case 0x10:
65 case 0x20:
66 break;
67 }
68 return vmap;
69}
70
71u16
72nvbios_vmap_entry(struct nouveau_bios *bios, int idx, u8 *ver, u8 *len)
73{
74 u8 hdr, cnt;
75 u16 vmap = nvbios_vmap_table(bios, ver, &hdr, &cnt, len);
76 if (vmap && idx < cnt) {
77 vmap = vmap + hdr + (idx * *len);
78 return vmap;
79 }
80 return 0x0000;
81}
82
83u16
84nvbios_vmap_entry_parse(struct nouveau_bios *bios, int idx, u8 *ver, u8 *len,
85 struct nvbios_vmap_entry *info)
86{
87 u16 vmap = nvbios_vmap_entry(bios, idx, ver, len);
88 memset(info, 0x00, sizeof(*info));
89 switch (!!vmap * *ver) {
90 case 0x10:
91 info->link = 0xff;
92 info->min = nv_ro32(bios, vmap + 0x00);
93 info->max = nv_ro32(bios, vmap + 0x04);
94 info->arg[0] = nv_ro32(bios, vmap + 0x08);
95 info->arg[1] = nv_ro32(bios, vmap + 0x0c);
96 info->arg[2] = nv_ro32(bios, vmap + 0x10);
97 break;
98 case 0x20:
99 info->unk0 = nv_ro08(bios, vmap + 0x00);
100 info->link = nv_ro08(bios, vmap + 0x01);
101 info->min = nv_ro32(bios, vmap + 0x02);
102 info->max = nv_ro32(bios, vmap + 0x06);
103 info->arg[0] = nv_ro32(bios, vmap + 0x0a);
104 info->arg[1] = nv_ro32(bios, vmap + 0x0e);
105 info->arg[2] = nv_ro32(bios, vmap + 0x12);
106 info->arg[3] = nv_ro32(bios, vmap + 0x16);
107 info->arg[4] = nv_ro32(bios, vmap + 0x1a);
108 info->arg[5] = nv_ro32(bios, vmap + 0x1e);
109 break;
110 }
111 return vmap;
112}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/volt.c b/drivers/gpu/drm/nouveau/core/subdev/bios/volt.c
new file mode 100644
index 000000000000..bb590de4ecb2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/volt.c
@@ -0,0 +1,137 @@
1/*
2 * Copyright 2012 Nouveau Community
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Martin Peres
23 */
24
25#include <subdev/bios.h>
26#include <subdev/bios/bit.h>
27#include <subdev/bios/volt.h>
28
29u16
30nvbios_volt_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
31{
32 struct bit_entry bit_P;
33 u16 volt = 0x0000;
34
35 if (!bit_entry(bios, 'P', &bit_P)) {
36 if (bit_P.version == 2)
37 volt = nv_ro16(bios, bit_P.offset + 0x0c);
38 else
39 if (bit_P.version == 1)
40 volt = nv_ro16(bios, bit_P.offset + 0x10);
41
42 if (volt) {
43 *ver = nv_ro08(bios, volt + 0);
44 switch (*ver) {
45 case 0x12:
46 *hdr = 5;
47 *cnt = nv_ro08(bios, volt + 2);
48 *len = nv_ro08(bios, volt + 1);
49 return volt;
50 case 0x20:
51 *hdr = nv_ro08(bios, volt + 1);
52 *cnt = nv_ro08(bios, volt + 2);
53 *len = nv_ro08(bios, volt + 3);
54 return volt;
55 case 0x30:
56 case 0x40:
57 case 0x50:
58 *hdr = nv_ro08(bios, volt + 1);
59 *cnt = nv_ro08(bios, volt + 3);
60 *len = nv_ro08(bios, volt + 2);
61 return volt;
62 }
63 }
64 }
65
66 return 0x0000;
67}
68
69u16
70nvbios_volt_parse(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
71 struct nvbios_volt *info)
72{
73 u16 volt = nvbios_volt_table(bios, ver, hdr, cnt, len);
74 memset(info, 0x00, sizeof(*info));
75 switch (!!volt * *ver) {
76 case 0x12:
77 info->vidmask = nv_ro08(bios, volt + 0x04);
78 break;
79 case 0x20:
80 info->vidmask = nv_ro08(bios, volt + 0x05);
81 break;
82 case 0x30:
83 info->vidmask = nv_ro08(bios, volt + 0x04);
84 break;
85 case 0x40:
86 info->base = nv_ro32(bios, volt + 0x04);
87 info->step = nv_ro16(bios, volt + 0x08);
88 info->vidmask = nv_ro08(bios, volt + 0x0b);
89 /*XXX*/
90 info->min = 0;
91 info->max = info->base;
92 break;
93 case 0x50:
94 info->vidmask = nv_ro08(bios, volt + 0x06);
95 info->min = nv_ro32(bios, volt + 0x0a);
96 info->max = nv_ro32(bios, volt + 0x0e);
97 info->base = nv_ro32(bios, volt + 0x12) & 0x00ffffff;
98 info->step = nv_ro16(bios, volt + 0x16);
99 break;
100 }
101 return volt;
102}
103
104u16
105nvbios_volt_entry(struct nouveau_bios *bios, int idx, u8 *ver, u8 *len)
106{
107 u8 hdr, cnt;
108 u16 volt = nvbios_volt_table(bios, ver, &hdr, &cnt, len);
109 if (volt && idx < cnt) {
110 volt = volt + hdr + (idx * *len);
111 return volt;
112 }
113 return 0x0000;
114}
115
116u16
117nvbios_volt_entry_parse(struct nouveau_bios *bios, int idx, u8 *ver, u8 *len,
118 struct nvbios_volt_entry *info)
119{
120 u16 volt = nvbios_volt_entry(bios, idx, ver, len);
121 memset(info, 0x00, sizeof(*info));
122 switch (!!volt * *ver) {
123 case 0x12:
124 case 0x20:
125 info->voltage = nv_ro08(bios, volt + 0x00) * 10000;
126 info->vid = nv_ro08(bios, volt + 0x01);
127 break;
128 case 0x30:
129 info->voltage = nv_ro08(bios, volt + 0x00) * 10000;
130 info->vid = nv_ro08(bios, volt + 0x01) >> 2;
131 break;
132 case 0x40:
133 case 0x50:
134 break;
135 }
136 return volt;
137}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bus/hwsq.c b/drivers/gpu/drm/nouveau/core/subdev/bus/hwsq.c
new file mode 100644
index 000000000000..f757470e2284
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bus/hwsq.c
@@ -0,0 +1,145 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */
24
25#include <subdev/timer.h>
26#include <subdev/bus.h>
27
28struct nouveau_hwsq {
29 struct nouveau_bus *pbus;
30 u32 addr;
31 u32 data;
32 struct {
33 u8 data[512];
34 u8 size;
35 } c;
36};
37
38static void
39hwsq_cmd(struct nouveau_hwsq *hwsq, int size, u8 data[])
40{
41 memcpy(&hwsq->c.data[hwsq->c.size], data, size * sizeof(data[0]));
42 hwsq->c.size += size;
43}
44
45int
46nouveau_hwsq_init(struct nouveau_bus *pbus, struct nouveau_hwsq **phwsq)
47{
48 struct nouveau_hwsq *hwsq;
49
50 hwsq = *phwsq = kmalloc(sizeof(*hwsq), GFP_KERNEL);
51 if (hwsq) {
52 hwsq->pbus = pbus;
53 hwsq->addr = ~0;
54 hwsq->data = ~0;
55 memset(hwsq->c.data, 0x7f, sizeof(hwsq->c.data));
56 hwsq->c.size = 0;
57 }
58
59 return hwsq ? 0 : -ENOMEM;
60}
61
62int
63nouveau_hwsq_fini(struct nouveau_hwsq **phwsq, bool exec)
64{
65 struct nouveau_hwsq *hwsq = *phwsq;
66 int ret = 0, i;
67 if (hwsq) {
68 struct nouveau_bus *pbus = hwsq->pbus;
69 hwsq->c.size = (hwsq->c.size + 4) / 4;
70 if (hwsq->c.size <= pbus->hwsq_size) {
71 if (exec)
72 ret = pbus->hwsq_exec(pbus, (u32 *)hwsq->c.data,
73 hwsq->c.size);
74 if (ret)
75 nv_error(pbus, "hwsq exec failed: %d\n", ret);
76 } else {
77 nv_error(pbus, "hwsq ucode too large\n");
78 ret = -ENOSPC;
79 }
80
81 for (i = 0; ret && i < hwsq->c.size; i++)
82 nv_error(pbus, "\t0x%08x\n", ((u32 *)hwsq->c.data)[i]);
83
84 *phwsq = NULL;
85 kfree(hwsq);
86 }
87 return ret;
88}
89
90void
91nouveau_hwsq_wr32(struct nouveau_hwsq *hwsq, u32 addr, u32 data)
92{
93 nv_debug(hwsq->pbus, "R[%06x] = 0x%08x\n", addr, data);
94
95 if (hwsq->data != data) {
96 if ((data & 0xffff0000) != (hwsq->data & 0xffff0000)) {
97 hwsq_cmd(hwsq, 5, (u8[]){ 0xe2, data, data >> 8,
98 data >> 16, data >> 24 });
99 } else {
100 hwsq_cmd(hwsq, 3, (u8[]){ 0x42, data, data >> 8 });
101 }
102 }
103
104 if ((addr & 0xffff0000) != (hwsq->addr & 0xffff0000)) {
105 hwsq_cmd(hwsq, 5, (u8[]){ 0xe0, addr, addr >> 8,
106 addr >> 16, addr >> 24 });
107 } else {
108 hwsq_cmd(hwsq, 3, (u8[]){ 0x40, addr, addr >> 8 });
109 }
110
111 hwsq->addr = addr;
112 hwsq->data = data;
113}
114
115void
116nouveau_hwsq_setf(struct nouveau_hwsq *hwsq, u8 flag, int data)
117{
118 nv_debug(hwsq->pbus, " FLAG[%02x] = %d\n", flag, data);
119 flag += 0x80;
120 if (data >= 0)
121 flag += 0x20;
122 if (data >= 1)
123 flag += 0x20;
124 hwsq_cmd(hwsq, 1, (u8[]){ flag });
125}
126
127void
128nouveau_hwsq_wait(struct nouveau_hwsq *hwsq, u8 flag, u8 data)
129{
130 nv_debug(hwsq->pbus, " WAIT[%02x] = %d\n", flag, data);
131 hwsq_cmd(hwsq, 3, (u8[]){ 0x5f, flag, data });
132}
133
134void
135nouveau_hwsq_nsec(struct nouveau_hwsq *hwsq, u32 nsec)
136{
137 u8 shift = 0, usec = nsec / 1000;
138 while (usec & ~3) {
139 usec >>= 2;
140 shift++;
141 }
142
143 nv_debug(hwsq->pbus, " DELAY = %d ns\n", nsec);
144 hwsq_cmd(hwsq, 1, (u8[]){ 0x00 | (shift << 2) | usec });
145}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bus/hwsq.h b/drivers/gpu/drm/nouveau/core/subdev/bus/hwsq.h
new file mode 100644
index 000000000000..12176f9c1bc6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bus/hwsq.h
@@ -0,0 +1,113 @@
1#ifndef __NVKM_BUS_HWSQ_H__
2#define __NVKM_BUS_HWSQ_H__
3
4#include <subdev/bus.h>
5
6struct hwsq {
7 struct nouveau_subdev *subdev;
8 struct nouveau_hwsq *hwsq;
9 int sequence;
10};
11
12struct hwsq_reg {
13 int sequence;
14 bool force;
15 u32 addr[2];
16 u32 data;
17};
18
19static inline struct hwsq_reg
20hwsq_reg2(u32 addr1, u32 addr2)
21{
22 return (struct hwsq_reg) {
23 .sequence = 0,
24 .force = 0,
25 .addr = { addr1, addr2 },
26 .data = 0xdeadbeef,
27 };
28}
29
30static inline struct hwsq_reg
31hwsq_reg(u32 addr)
32{
33 return hwsq_reg2(addr, addr);
34}
35
36static inline int
37hwsq_init(struct hwsq *ram, struct nouveau_subdev *subdev)
38{
39 struct nouveau_bus *pbus = nouveau_bus(subdev);
40 int ret;
41
42 ret = nouveau_hwsq_init(pbus, &ram->hwsq);
43 if (ret)
44 return ret;
45
46 ram->sequence++;
47 ram->subdev = subdev;
48 return 0;
49}
50
51static inline int
52hwsq_exec(struct hwsq *ram, bool exec)
53{
54 int ret = 0;
55 if (ram->subdev) {
56 ret = nouveau_hwsq_fini(&ram->hwsq, exec);
57 ram->subdev = NULL;
58 }
59 return ret;
60}
61
62static inline u32
63hwsq_rd32(struct hwsq *ram, struct hwsq_reg *reg)
64{
65 if (reg->sequence != ram->sequence)
66 reg->data = nv_rd32(ram->subdev, reg->addr[0]);
67 return reg->data;
68}
69
70static inline void
71hwsq_wr32(struct hwsq *ram, struct hwsq_reg *reg, u32 data)
72{
73 reg->sequence = ram->sequence;
74 reg->data = data;
75 if (reg->addr[0] != reg->addr[1])
76 nouveau_hwsq_wr32(ram->hwsq, reg->addr[1], reg->data);
77 nouveau_hwsq_wr32(ram->hwsq, reg->addr[0], reg->data);
78}
79
80static inline void
81hwsq_nuke(struct hwsq *ram, struct hwsq_reg *reg)
82{
83 reg->force = true;
84}
85
86static inline u32
87hwsq_mask(struct hwsq *ram, struct hwsq_reg *reg, u32 mask, u32 data)
88{
89 u32 temp = hwsq_rd32(ram, reg);
90 if (temp != ((temp & ~mask) | data) || reg->force)
91 hwsq_wr32(ram, reg, (temp & ~mask) | data);
92 return temp;
93}
94
95static inline void
96hwsq_setf(struct hwsq *ram, u8 flag, int data)
97{
98 nouveau_hwsq_setf(ram->hwsq, flag, data);
99}
100
101static inline void
102hwsq_wait(struct hwsq *ram, u8 flag, u8 data)
103{
104 nouveau_hwsq_wait(ram->hwsq, flag, data);
105}
106
107static inline void
108hwsq_nsec(struct hwsq *ram, u32 nsec)
109{
110 nouveau_hwsq_nsec(ram->hwsq, nsec);
111}
112
113#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bus/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/bus/nv04.c
index 8c7f8057a185..23921b5351db 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bus/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bus/nv04.c
@@ -23,11 +23,7 @@
23 * Ben Skeggs 23 * Ben Skeggs
24 */ 24 */
25 25
26#include <subdev/bus.h> 26#include "nv04.h"
27
28struct nv04_bus_priv {
29 struct nouveau_bus base;
30};
31 27
32static void 28static void
33nv04_bus_intr(struct nouveau_subdev *subdev) 29nv04_bus_intr(struct nouveau_subdev *subdev)
@@ -56,10 +52,22 @@ nv04_bus_intr(struct nouveau_subdev *subdev)
56} 52}
57 53
58static int 54static int
55nv04_bus_init(struct nouveau_object *object)
56{
57 struct nv04_bus_priv *priv = (void *)object;
58
59 nv_wr32(priv, 0x001100, 0xffffffff);
60 nv_wr32(priv, 0x001140, 0x00000111);
61
62 return nouveau_bus_init(&priv->base);
63}
64
65int
59nv04_bus_ctor(struct nouveau_object *parent, struct nouveau_object *engine, 66nv04_bus_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
60 struct nouveau_oclass *oclass, void *data, u32 size, 67 struct nouveau_oclass *oclass, void *data, u32 size,
61 struct nouveau_object **pobject) 68 struct nouveau_object **pobject)
62{ 69{
70 struct nv04_bus_impl *impl = (void *)oclass;
63 struct nv04_bus_priv *priv; 71 struct nv04_bus_priv *priv;
64 int ret; 72 int ret;
65 73
@@ -68,28 +76,20 @@ nv04_bus_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
68 if (ret) 76 if (ret)
69 return ret; 77 return ret;
70 78
71 nv_subdev(priv)->intr = nv04_bus_intr; 79 nv_subdev(priv)->intr = impl->intr;
80 priv->base.hwsq_exec = impl->hwsq_exec;
81 priv->base.hwsq_size = impl->hwsq_size;
72 return 0; 82 return 0;
73} 83}
74 84
75static int 85struct nouveau_oclass *
76nv04_bus_init(struct nouveau_object *object) 86nv04_bus_oclass = &(struct nv04_bus_impl) {
77{ 87 .base.handle = NV_SUBDEV(BUS, 0x04),
78 struct nv04_bus_priv *priv = (void *)object; 88 .base.ofuncs = &(struct nouveau_ofuncs) {
79
80 nv_wr32(priv, 0x001100, 0xffffffff);
81 nv_wr32(priv, 0x001140, 0x00000111);
82
83 return nouveau_bus_init(&priv->base);
84}
85
86struct nouveau_oclass
87nv04_bus_oclass = {
88 .handle = NV_SUBDEV(BUS, 0x04),
89 .ofuncs = &(struct nouveau_ofuncs) {
90 .ctor = nv04_bus_ctor, 89 .ctor = nv04_bus_ctor,
91 .dtor = _nouveau_bus_dtor, 90 .dtor = _nouveau_bus_dtor,
92 .init = nv04_bus_init, 91 .init = nv04_bus_init,
93 .fini = _nouveau_bus_fini, 92 .fini = _nouveau_bus_fini,
94 }, 93 },
95}; 94 .intr = nv04_bus_intr,
95}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bus/nv04.h b/drivers/gpu/drm/nouveau/core/subdev/bus/nv04.h
new file mode 100644
index 000000000000..4d7602450a20
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bus/nv04.h
@@ -0,0 +1,23 @@
1#ifndef __NVKM_BUS_NV04_H__
2#define __NVKM_BUS_NV04_H__
3
4#include <subdev/bus.h>
5
6struct nv04_bus_priv {
7 struct nouveau_bus base;
8};
9
10int nv04_bus_ctor(struct nouveau_object *, struct nouveau_object *,
11 struct nouveau_oclass *, void *, u32,
12 struct nouveau_object **);
13int nv50_bus_init(struct nouveau_object *);
14void nv50_bus_intr(struct nouveau_subdev *);
15
16struct nv04_bus_impl {
17 struct nouveau_oclass base;
18 void (*intr)(struct nouveau_subdev *);
19 int (*hwsq_exec)(struct nouveau_bus *, u32 *, u32);
20 u32 hwsq_size;
21};
22
23#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bus/nv31.c b/drivers/gpu/drm/nouveau/core/subdev/bus/nv31.c
index 34132aef34e1..94da46f61627 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bus/nv31.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bus/nv31.c
@@ -23,11 +23,7 @@
23 * Ben Skeggs 23 * Ben Skeggs
24 */ 24 */
25 25
26#include <subdev/bus.h> 26#include "nv04.h"
27
28struct nv31_bus_priv {
29 struct nouveau_bus base;
30};
31 27
32static void 28static void
33nv31_bus_intr(struct nouveau_subdev *subdev) 29nv31_bus_intr(struct nouveau_subdev *subdev)
@@ -71,7 +67,7 @@ nv31_bus_intr(struct nouveau_subdev *subdev)
71static int 67static int
72nv31_bus_init(struct nouveau_object *object) 68nv31_bus_init(struct nouveau_object *object)
73{ 69{
74 struct nv31_bus_priv *priv = (void *)object; 70 struct nv04_bus_priv *priv = (void *)object;
75 int ret; 71 int ret;
76 72
77 ret = nouveau_bus_init(&priv->base); 73 ret = nouveau_bus_init(&priv->base);
@@ -83,30 +79,14 @@ nv31_bus_init(struct nouveau_object *object)
83 return 0; 79 return 0;
84} 80}
85 81
86static int 82struct nouveau_oclass *
87nv31_bus_ctor(struct nouveau_object *parent, struct nouveau_object *engine, 83nv31_bus_oclass = &(struct nv04_bus_impl) {
88 struct nouveau_oclass *oclass, void *data, u32 size, 84 .base.handle = NV_SUBDEV(BUS, 0x31),
89 struct nouveau_object **pobject) 85 .base.ofuncs = &(struct nouveau_ofuncs) {
90{ 86 .ctor = nv04_bus_ctor,
91 struct nv31_bus_priv *priv;
92 int ret;
93
94 ret = nouveau_bus_create(parent, engine, oclass, &priv);
95 *pobject = nv_object(priv);
96 if (ret)
97 return ret;
98
99 nv_subdev(priv)->intr = nv31_bus_intr;
100 return 0;
101}
102
103struct nouveau_oclass
104nv31_bus_oclass = {
105 .handle = NV_SUBDEV(BUS, 0x31),
106 .ofuncs = &(struct nouveau_ofuncs) {
107 .ctor = nv31_bus_ctor,
108 .dtor = _nouveau_bus_dtor, 87 .dtor = _nouveau_bus_dtor,
109 .init = nv31_bus_init, 88 .init = nv31_bus_init,
110 .fini = _nouveau_bus_fini, 89 .fini = _nouveau_bus_fini,
111 }, 90 },
112}; 91 .intr = nv31_bus_intr,
92}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bus/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/bus/nv50.c
index f5b2117fa8c6..11918f7e2aca 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bus/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bus/nv50.c
@@ -23,13 +23,27 @@
23 * Ben Skeggs 23 * Ben Skeggs
24 */ 24 */
25 25
26#include <subdev/bus.h> 26#include <subdev/timer.h>
27 27
28struct nv50_bus_priv { 28#include "nv04.h"
29 struct nouveau_bus base;
30};
31 29
32static void 30static int
31nv50_bus_hwsq_exec(struct nouveau_bus *pbus, u32 *data, u32 size)
32{
33 struct nv50_bus_priv *priv = (void *)pbus;
34 int i;
35
36 nv_mask(pbus, 0x001098, 0x00000008, 0x00000000);
37 nv_wr32(pbus, 0x001304, 0x00000000);
38 for (i = 0; i < size; i++)
39 nv_wr32(priv, 0x001400 + (i * 4), data[i]);
40 nv_mask(pbus, 0x001098, 0x00000018, 0x00000018);
41 nv_wr32(pbus, 0x00130c, 0x00000003);
42
43 return nv_wait(pbus, 0x001308, 0x00000100, 0x00000000) ? 0 : -ETIMEDOUT;
44}
45
46void
33nv50_bus_intr(struct nouveau_subdev *subdev) 47nv50_bus_intr(struct nouveau_subdev *subdev)
34{ 48{
35 struct nouveau_bus *pbus = nouveau_bus(subdev); 49 struct nouveau_bus *pbus = nouveau_bus(subdev);
@@ -61,10 +75,10 @@ nv50_bus_intr(struct nouveau_subdev *subdev)
61 } 75 }
62} 76}
63 77
64static int 78int
65nv50_bus_init(struct nouveau_object *object) 79nv50_bus_init(struct nouveau_object *object)
66{ 80{
67 struct nv50_bus_priv *priv = (void *)object; 81 struct nv04_bus_priv *priv = (void *)object;
68 int ret; 82 int ret;
69 83
70 ret = nouveau_bus_init(&priv->base); 84 ret = nouveau_bus_init(&priv->base);
@@ -76,30 +90,16 @@ nv50_bus_init(struct nouveau_object *object)
76 return 0; 90 return 0;
77} 91}
78 92
79static int 93struct nouveau_oclass *
80nv50_bus_ctor(struct nouveau_object *parent, struct nouveau_object *engine, 94nv50_bus_oclass = &(struct nv04_bus_impl) {
81 struct nouveau_oclass *oclass, void *data, u32 size, 95 .base.handle = NV_SUBDEV(BUS, 0x50),
82 struct nouveau_object **pobject) 96 .base.ofuncs = &(struct nouveau_ofuncs) {
83{ 97 .ctor = nv04_bus_ctor,
84 struct nv50_bus_priv *priv;
85 int ret;
86
87 ret = nouveau_bus_create(parent, engine, oclass, &priv);
88 *pobject = nv_object(priv);
89 if (ret)
90 return ret;
91
92 nv_subdev(priv)->intr = nv50_bus_intr;
93 return 0;
94}
95
96struct nouveau_oclass
97nv50_bus_oclass = {
98 .handle = NV_SUBDEV(BUS, 0x50),
99 .ofuncs = &(struct nouveau_ofuncs) {
100 .ctor = nv50_bus_ctor,
101 .dtor = _nouveau_bus_dtor, 98 .dtor = _nouveau_bus_dtor,
102 .init = nv50_bus_init, 99 .init = nv50_bus_init,
103 .fini = _nouveau_bus_fini, 100 .fini = _nouveau_bus_fini,
104 }, 101 },
105}; 102 .intr = nv50_bus_intr,
103 .hwsq_exec = nv50_bus_hwsq_exec,
104 .hwsq_size = 64,
105}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bus/nv94.c b/drivers/gpu/drm/nouveau/core/subdev/bus/nv94.c
new file mode 100644
index 000000000000..d3659055fa4b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bus/nv94.c
@@ -0,0 +1,59 @@
1/*
2 * Copyright 2012 Nouveau Community
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Martin Peres <martin.peres@labri.fr>
23 * Ben Skeggs
24 */
25
26#include <subdev/timer.h>
27
28#include "nv04.h"
29
30static int
31nv94_bus_hwsq_exec(struct nouveau_bus *pbus, u32 *data, u32 size)
32{
33 struct nv50_bus_priv *priv = (void *)pbus;
34 int i;
35
36 nv_mask(pbus, 0x001098, 0x00000008, 0x00000000);
37 nv_wr32(pbus, 0x001304, 0x00000000);
38 nv_wr32(pbus, 0x001318, 0x00000000);
39 for (i = 0; i < size; i++)
40 nv_wr32(priv, 0x080000 + (i * 4), data[i]);
41 nv_mask(pbus, 0x001098, 0x00000018, 0x00000018);
42 nv_wr32(pbus, 0x00130c, 0x00000001);
43
44 return nv_wait(pbus, 0x001308, 0x00000100, 0x00000000) ? 0 : -ETIMEDOUT;
45}
46
47struct nouveau_oclass *
48nv94_bus_oclass = &(struct nv04_bus_impl) {
49 .base.handle = NV_SUBDEV(BUS, 0x94),
50 .base.ofuncs = &(struct nouveau_ofuncs) {
51 .ctor = nv04_bus_ctor,
52 .dtor = _nouveau_bus_dtor,
53 .init = nv50_bus_init,
54 .fini = _nouveau_bus_fini,
55 },
56 .intr = nv50_bus_intr,
57 .hwsq_exec = nv94_bus_hwsq_exec,
58 .hwsq_size = 128,
59}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bus/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/bus/nvc0.c
index b192d6246363..73839d7151a7 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bus/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bus/nvc0.c
@@ -23,11 +23,7 @@
23 * Ben Skeggs 23 * Ben Skeggs
24 */ 24 */
25 25
26#include <subdev/bus.h> 26#include "nv04.h"
27
28struct nvc0_bus_priv {
29 struct nouveau_bus base;
30};
31 27
32static void 28static void
33nvc0_bus_intr(struct nouveau_subdev *subdev) 29nvc0_bus_intr(struct nouveau_subdev *subdev)
@@ -60,7 +56,7 @@ nvc0_bus_intr(struct nouveau_subdev *subdev)
60static int 56static int
61nvc0_bus_init(struct nouveau_object *object) 57nvc0_bus_init(struct nouveau_object *object)
62{ 58{
63 struct nvc0_bus_priv *priv = (void *)object; 59 struct nv04_bus_priv *priv = (void *)object;
64 int ret; 60 int ret;
65 61
66 ret = nouveau_bus_init(&priv->base); 62 ret = nouveau_bus_init(&priv->base);
@@ -72,30 +68,14 @@ nvc0_bus_init(struct nouveau_object *object)
72 return 0; 68 return 0;
73} 69}
74 70
75static int 71struct nouveau_oclass *
76nvc0_bus_ctor(struct nouveau_object *parent, struct nouveau_object *engine, 72nvc0_bus_oclass = &(struct nv04_bus_impl) {
77 struct nouveau_oclass *oclass, void *data, u32 size, 73 .base.handle = NV_SUBDEV(BUS, 0xc0),
78 struct nouveau_object **pobject) 74 .base.ofuncs = &(struct nouveau_ofuncs) {
79{ 75 .ctor = nv04_bus_ctor,
80 struct nvc0_bus_priv *priv;
81 int ret;
82
83 ret = nouveau_bus_create(parent, engine, oclass, &priv);
84 *pobject = nv_object(priv);
85 if (ret)
86 return ret;
87
88 nv_subdev(priv)->intr = nvc0_bus_intr;
89 return 0;
90}
91
92struct nouveau_oclass
93nvc0_bus_oclass = {
94 .handle = NV_SUBDEV(BUS, 0xc0),
95 .ofuncs = &(struct nouveau_ofuncs) {
96 .ctor = nvc0_bus_ctor,
97 .dtor = _nouveau_bus_dtor, 76 .dtor = _nouveau_bus_dtor,
98 .init = nvc0_bus_init, 77 .init = nvc0_bus_init,
99 .fini = _nouveau_bus_fini, 78 .fini = _nouveau_bus_fini,
100 }, 79 },
101}; 80 .intr = nvc0_bus_intr,
81}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/base.c b/drivers/gpu/drm/nouveau/core/subdev/clock/base.c
new file mode 100644
index 000000000000..e2938a21b06f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/base.c
@@ -0,0 +1,494 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/option.h>
26
27#include <subdev/clock.h>
28#include <subdev/therm.h>
29#include <subdev/volt.h>
30#include <subdev/fb.h>
31
32#include <subdev/bios.h>
33#include <subdev/bios/boost.h>
34#include <subdev/bios/cstep.h>
35#include <subdev/bios/perf.h>
36
37/******************************************************************************
38 * misc
39 *****************************************************************************/
40static u32
41nouveau_clock_adjust(struct nouveau_clock *clk, bool adjust,
42 u8 pstate, u8 domain, u32 input)
43{
44 struct nouveau_bios *bios = nouveau_bios(clk);
45 struct nvbios_boostE boostE;
46 u8 ver, hdr, cnt, len;
47 u16 data;
48
49 data = nvbios_boostEm(bios, pstate, &ver, &hdr, &cnt, &len, &boostE);
50 if (data) {
51 struct nvbios_boostS boostS;
52 u8 idx = 0, sver, shdr;
53 u16 subd;
54
55 input = max(boostE.min, input);
56 input = min(boostE.max, input);
57 do {
58 sver = ver;
59 shdr = hdr;
60 subd = nvbios_boostSp(bios, idx++, data, &sver, &shdr,
61 cnt, len, &boostS);
62 if (subd && boostS.domain == domain) {
63 if (adjust)
64 input = input * boostS.percent / 100;
65 input = max(boostS.min, input);
66 input = min(boostS.max, input);
67 break;
68 }
69 } while (subd);
70 }
71
72 return input;
73}
74
75/******************************************************************************
76 * C-States
77 *****************************************************************************/
78static int
79nouveau_cstate_prog(struct nouveau_clock *clk,
80 struct nouveau_pstate *pstate, int cstatei)
81{
82 struct nouveau_therm *ptherm = nouveau_therm(clk);
83 struct nouveau_volt *volt = nouveau_volt(clk);
84 struct nouveau_cstate *cstate;
85 int ret;
86
87 if (!list_empty(&pstate->list)) {
88 cstate = list_entry(pstate->list.prev, typeof(*cstate), head);
89 } else {
90 cstate = &pstate->base;
91 }
92
93 ret = nouveau_therm_cstate(ptherm, pstate->fanspeed, +1);
94 if (ret && ret != -ENODEV) {
95 nv_error(clk, "failed to raise fan speed: %d\n", ret);
96 return ret;
97 }
98
99 ret = volt->set_id(volt, cstate->voltage, +1);
100 if (ret && ret != -ENODEV) {
101 nv_error(clk, "failed to raise voltage: %d\n", ret);
102 return ret;
103 }
104
105 ret = clk->calc(clk, cstate);
106 if (ret == 0) {
107 ret = clk->prog(clk);
108 clk->tidy(clk);
109 }
110
111 ret = volt->set_id(volt, cstate->voltage, -1);
112 if (ret && ret != -ENODEV)
113 nv_error(clk, "failed to lower voltage: %d\n", ret);
114
115 ret = nouveau_therm_cstate(ptherm, pstate->fanspeed, -1);
116 if (ret && ret != -ENODEV)
117 nv_error(clk, "failed to lower fan speed: %d\n", ret);
118
119 return 0;
120}
121
122static void
123nouveau_cstate_del(struct nouveau_cstate *cstate)
124{
125 list_del(&cstate->head);
126 kfree(cstate);
127}
128
129static int
130nouveau_cstate_new(struct nouveau_clock *clk, int idx,
131 struct nouveau_pstate *pstate)
132{
133 struct nouveau_bios *bios = nouveau_bios(clk);
134 struct nouveau_clocks *domain = clk->domains;
135 struct nouveau_cstate *cstate = NULL;
136 struct nvbios_cstepX cstepX;
137 u8 ver, hdr;
138 u16 data;
139
140 data = nvbios_cstepXp(bios, idx, &ver, &hdr, &cstepX);
141 if (!data)
142 return -ENOENT;
143
144 cstate = kzalloc(sizeof(*cstate), GFP_KERNEL);
145 if (!cstate)
146 return -ENOMEM;
147
148 *cstate = pstate->base;
149 cstate->voltage = cstepX.voltage;
150
151 while (domain && domain->name != nv_clk_src_max) {
152 if (domain->flags & NVKM_CLK_DOM_FLAG_CORE) {
153 u32 freq = nouveau_clock_adjust(clk, true,
154 pstate->pstate,
155 domain->bios,
156 cstepX.freq);
157 cstate->domain[domain->name] = freq;
158 }
159 domain++;
160 }
161
162 list_add(&cstate->head, &pstate->list);
163 return 0;
164}
165
166/******************************************************************************
167 * P-States
168 *****************************************************************************/
169static int
170nouveau_pstate_prog(struct nouveau_clock *clk, int pstatei)
171{
172 struct nouveau_fb *pfb = nouveau_fb(clk);
173 struct nouveau_pstate *pstate;
174 int ret, idx = 0;
175
176 list_for_each_entry(pstate, &clk->states, head) {
177 if (idx++ == pstatei)
178 break;
179 }
180
181 nv_debug(clk, "setting performance state %d\n", pstatei);
182 clk->pstate = pstatei;
183
184 if (pfb->ram->calc) {
185 ret = pfb->ram->calc(pfb, pstate->base.domain[nv_clk_src_mem]);
186 if (ret == 0)
187 ret = pfb->ram->prog(pfb);
188 pfb->ram->tidy(pfb);
189 }
190
191 return nouveau_cstate_prog(clk, pstate, 0);
192}
193
194static int
195nouveau_pstate_calc(struct nouveau_clock *clk)
196{
197 int pstate, ret = 0;
198
199 nv_trace(clk, "P %d U %d A %d T %d D %d\n", clk->pstate,
200 clk->ustate, clk->astate, clk->tstate, clk->dstate);
201
202 if (clk->state_nr && clk->ustate != -1) {
203 pstate = (clk->ustate < 0) ? clk->astate : clk->ustate;
204 pstate = min(pstate, clk->state_nr - 1 - clk->tstate);
205 pstate = max(pstate, clk->dstate);
206 } else {
207 pstate = clk->pstate = -1;
208 }
209
210 nv_trace(clk, "-> %d\n", pstate);
211 if (pstate != clk->pstate)
212 ret = nouveau_pstate_prog(clk, pstate);
213 return ret;
214}
215
216static void
217nouveau_pstate_info(struct nouveau_clock *clk, struct nouveau_pstate *pstate)
218{
219 struct nouveau_clocks *clock = clk->domains - 1;
220 struct nouveau_cstate *cstate;
221 char info[3][32] = { "", "", "" };
222 char name[4] = "--";
223 int i = -1;
224
225 if (pstate->pstate != 0xff)
226 snprintf(name, sizeof(name), "%02x", pstate->pstate);
227
228 while ((++clock)->name != nv_clk_src_max) {
229 u32 lo = pstate->base.domain[clock->name];
230 u32 hi = lo;
231 if (hi == 0)
232 continue;
233
234 nv_debug(clk, "%02x: %10d KHz\n", clock->name, lo);
235 list_for_each_entry(cstate, &pstate->list, head) {
236 u32 freq = cstate->domain[clock->name];
237 lo = min(lo, freq);
238 hi = max(hi, freq);
239 nv_debug(clk, "%10d KHz\n", freq);
240 }
241
242 if (clock->mname && ++i < ARRAY_SIZE(info)) {
243 lo /= clock->mdiv;
244 hi /= clock->mdiv;
245 if (lo == hi) {
246 snprintf(info[i], sizeof(info[i]), "%s %d MHz",
247 clock->mname, lo);
248 } else {
249 snprintf(info[i], sizeof(info[i]),
250 "%s %d-%d MHz", clock->mname, lo, hi);
251 }
252 }
253 }
254
255 nv_info(clk, "%s: %s %s %s\n", name, info[0], info[1], info[2]);
256}
257
258static void
259nouveau_pstate_del(struct nouveau_pstate *pstate)
260{
261 struct nouveau_cstate *cstate, *temp;
262
263 list_for_each_entry_safe(cstate, temp, &pstate->list, head) {
264 nouveau_cstate_del(cstate);
265 }
266
267 list_del(&pstate->head);
268 kfree(pstate);
269}
270
271static int
272nouveau_pstate_new(struct nouveau_clock *clk, int idx)
273{
274 struct nouveau_bios *bios = nouveau_bios(clk);
275 struct nouveau_clocks *domain = clk->domains - 1;
276 struct nouveau_pstate *pstate;
277 struct nouveau_cstate *cstate;
278 struct nvbios_cstepE cstepE;
279 struct nvbios_perfE perfE;
280 u8 ver, hdr, cnt, len;
281 u16 data;
282
283 data = nvbios_perfEp(bios, idx, &ver, &hdr, &cnt, &len, &perfE);
284 if (!data)
285 return -EINVAL;
286 if (perfE.pstate == 0xff)
287 return 0;
288
289 pstate = kzalloc(sizeof(*pstate), GFP_KERNEL);
290 cstate = &pstate->base;
291 if (!pstate)
292 return -ENOMEM;
293
294 INIT_LIST_HEAD(&pstate->list);
295
296 pstate->pstate = perfE.pstate;
297 pstate->fanspeed = perfE.fanspeed;
298 cstate->voltage = perfE.voltage;
299 cstate->domain[nv_clk_src_core] = perfE.core;
300 cstate->domain[nv_clk_src_shader] = perfE.shader;
301 cstate->domain[nv_clk_src_mem] = perfE.memory;
302 cstate->domain[nv_clk_src_vdec] = perfE.vdec;
303 cstate->domain[nv_clk_src_dom6] = perfE.disp;
304
305 while (ver >= 0x40 && (++domain)->name != nv_clk_src_max) {
306 struct nvbios_perfS perfS;
307 u8 sver = ver, shdr = hdr;
308 u32 perfSe = nvbios_perfSp(bios, data, domain->bios,
309 &sver, &shdr, cnt, len, &perfS);
310 if (perfSe == 0 || sver != 0x40)
311 continue;
312
313 if (domain->flags & NVKM_CLK_DOM_FLAG_CORE) {
314 perfS.v40.freq = nouveau_clock_adjust(clk, false,
315 pstate->pstate,
316 domain->bios,
317 perfS.v40.freq);
318 }
319
320 cstate->domain[domain->name] = perfS.v40.freq;
321 }
322
323 data = nvbios_cstepEm(bios, pstate->pstate, &ver, &hdr, &cstepE);
324 if (data) {
325 int idx = cstepE.index;
326 do {
327 nouveau_cstate_new(clk, idx, pstate);
328 } while(idx--);
329 }
330
331 nouveau_pstate_info(clk, pstate);
332 list_add_tail(&pstate->head, &clk->states);
333 clk->state_nr++;
334 return 0;
335}
336
337/******************************************************************************
338 * Adjustment triggers
339 *****************************************************************************/
340static int
341nouveau_clock_ustate_update(struct nouveau_clock *clk, int req)
342{
343 struct nouveau_pstate *pstate;
344 int i = 0;
345
346 /* YKW repellant */
347 return -ENOSYS;
348
349 if (req != -1 && req != -2) {
350 list_for_each_entry(pstate, &clk->states, head) {
351 if (pstate->pstate == req)
352 break;
353 i++;
354 }
355
356 if (pstate->pstate != req)
357 return -EINVAL;
358 req = i;
359 }
360
361 clk->ustate = req;
362 return 0;
363}
364
365int
366nouveau_clock_ustate(struct nouveau_clock *clk, int req)
367{
368 int ret = nouveau_clock_ustate_update(clk, req);
369 if (ret)
370 return ret;
371 return nouveau_pstate_calc(clk);
372}
373
374int
375nouveau_clock_astate(struct nouveau_clock *clk, int req, int rel)
376{
377 if (!rel) clk->astate = req;
378 if ( rel) clk->astate += rel;
379 clk->astate = min(clk->astate, clk->state_nr - 1);
380 clk->astate = max(clk->astate, 0);
381 return nouveau_pstate_calc(clk);
382}
383
384int
385nouveau_clock_tstate(struct nouveau_clock *clk, int req, int rel)
386{
387 if (!rel) clk->tstate = req;
388 if ( rel) clk->tstate += rel;
389 clk->tstate = min(clk->tstate, 0);
390 clk->tstate = max(clk->tstate, -(clk->state_nr - 1));
391 return nouveau_pstate_calc(clk);
392}
393
394int
395nouveau_clock_dstate(struct nouveau_clock *clk, int req, int rel)
396{
397 if (!rel) clk->dstate = req;
398 if ( rel) clk->dstate += rel;
399 clk->dstate = min(clk->dstate, clk->state_nr - 1);
400 clk->dstate = max(clk->dstate, 0);
401 return nouveau_pstate_calc(clk);
402}
403
404/******************************************************************************
405 * subdev base class implementation
406 *****************************************************************************/
407int
408_nouveau_clock_init(struct nouveau_object *object)
409{
410 struct nouveau_clock *clk = (void *)object;
411 struct nouveau_clocks *clock = clk->domains;
412 int ret;
413
414 memset(&clk->bstate, 0x00, sizeof(clk->bstate));
415 INIT_LIST_HEAD(&clk->bstate.list);
416 clk->bstate.pstate = 0xff;
417
418 while (clock->name != nv_clk_src_max) {
419 ret = clk->read(clk, clock->name);
420 if (ret < 0) {
421 nv_error(clk, "%02x freq unknown\n", clock->name);
422 return ret;
423 }
424 clk->bstate.base.domain[clock->name] = ret;
425 clock++;
426 }
427
428 nouveau_pstate_info(clk, &clk->bstate);
429
430 clk->astate = clk->state_nr - 1;
431 clk->tstate = 0;
432 clk->dstate = 0;
433 clk->pstate = -1;
434 nouveau_pstate_calc(clk);
435 return 0;
436}
437
438void
439_nouveau_clock_dtor(struct nouveau_object *object)
440{
441 struct nouveau_clock *clk = (void *)object;
442 struct nouveau_pstate *pstate, *temp;
443
444 list_for_each_entry_safe(pstate, temp, &clk->states, head) {
445 nouveau_pstate_del(pstate);
446 }
447
448 nouveau_subdev_destroy(&clk->base);
449}
450
451int
452nouveau_clock_create_(struct nouveau_object *parent,
453 struct nouveau_object *engine,
454 struct nouveau_oclass *oclass,
455 struct nouveau_clocks *clocks,
456 int length, void **object)
457{
458 struct nouveau_device *device = nv_device(parent);
459 struct nouveau_clock *clk;
460 int ret, idx, arglen;
461 const char *mode;
462
463 ret = nouveau_subdev_create_(parent, engine, oclass, 0, "CLK",
464 "clock", length, object);
465 clk = *object;
466 if (ret)
467 return ret;
468
469 INIT_LIST_HEAD(&clk->states);
470 clk->domains = clocks;
471 clk->ustate = -1;
472
473 idx = 0;
474 do {
475 ret = nouveau_pstate_new(clk, idx++);
476 } while (ret == 0);
477
478 mode = nouveau_stropt(device->cfgopt, "NvClkMode", &arglen);
479 if (mode) {
480 if (!strncasecmpz(mode, "disabled", arglen)) {
481 clk->ustate = -1;
482 } else {
483 char save = mode[arglen];
484 long v;
485
486 ((char *)mode)[arglen] = '\0';
487 if (!kstrtol(mode, 0, &v))
488 nouveau_clock_ustate_update(clk, v);
489 ((char *)mode)[arglen] = save;
490 }
491 }
492
493 return 0;
494}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c
index a14277586595..da50c1b12928 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c
@@ -77,7 +77,7 @@ nv04_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
77 struct nv04_clock_priv *priv; 77 struct nv04_clock_priv *priv;
78 int ret; 78 int ret;
79 79
80 ret = nouveau_clock_create(parent, engine, oclass, &priv); 80 ret = nouveau_clock_create(parent, engine, oclass, NULL, &priv);
81 *pobject = nv_object(priv); 81 *pobject = nv_object(priv);
82 if (ret) 82 if (ret)
83 return ret; 83 return ret;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nv40.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nv40.c
index 0db5dbfd91b5..db7346f79080 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nv40.c
@@ -23,11 +23,188 @@
23 */ 23 */
24 24
25#include <subdev/clock.h> 25#include <subdev/clock.h>
26#include <subdev/bios.h>
27#include <subdev/bios/pll.h>
28
29#include "pll.h"
26 30
27struct nv40_clock_priv { 31struct nv40_clock_priv {
28 struct nouveau_clock base; 32 struct nouveau_clock base;
33 u32 ctrl;
34 u32 npll_ctrl;
35 u32 npll_coef;
36 u32 spll;
37};
38
39static struct nouveau_clocks
40nv40_domain[] = {
41 { nv_clk_src_crystal, 0xff },
42 { nv_clk_src_href , 0xff },
43 { nv_clk_src_core , 0xff, 0, "core", 1000 },
44 { nv_clk_src_shader , 0xff, 0, "shader", 1000 },
45 { nv_clk_src_mem , 0xff, 0, "memory", 1000 },
46 { nv_clk_src_max }
29}; 47};
30 48
49static u32
50read_pll_1(struct nv40_clock_priv *priv, u32 reg)
51{
52 u32 ctrl = nv_rd32(priv, reg + 0x00);
53 int P = (ctrl & 0x00070000) >> 16;
54 int N = (ctrl & 0x0000ff00) >> 8;
55 int M = (ctrl & 0x000000ff) >> 0;
56 u32 ref = 27000, clk = 0;
57
58 if (ctrl & 0x80000000)
59 clk = ref * N / M;
60
61 return clk >> P;
62}
63
64static u32
65read_pll_2(struct nv40_clock_priv *priv, u32 reg)
66{
67 u32 ctrl = nv_rd32(priv, reg + 0x00);
68 u32 coef = nv_rd32(priv, reg + 0x04);
69 int N2 = (coef & 0xff000000) >> 24;
70 int M2 = (coef & 0x00ff0000) >> 16;
71 int N1 = (coef & 0x0000ff00) >> 8;
72 int M1 = (coef & 0x000000ff) >> 0;
73 int P = (ctrl & 0x00070000) >> 16;
74 u32 ref = 27000, clk = 0;
75
76 if ((ctrl & 0x80000000) && M1) {
77 clk = ref * N1 / M1;
78 if ((ctrl & 0x40000100) == 0x40000000) {
79 if (M2)
80 clk = clk * N2 / M2;
81 else
82 clk = 0;
83 }
84 }
85
86 return clk >> P;
87}
88
89static u32
90read_clk(struct nv40_clock_priv *priv, u32 src)
91{
92 switch (src) {
93 case 3:
94 return read_pll_2(priv, 0x004000);
95 case 2:
96 return read_pll_1(priv, 0x004008);
97 default:
98 break;
99 }
100
101 return 0;
102}
103
104static int
105nv40_clock_read(struct nouveau_clock *clk, enum nv_clk_src src)
106{
107 struct nv40_clock_priv *priv = (void *)clk;
108 u32 mast = nv_rd32(priv, 0x00c040);
109
110 switch (src) {
111 case nv_clk_src_crystal:
112 return nv_device(priv)->crystal;
113 case nv_clk_src_href:
114 return 100000; /*XXX: PCIE/AGP differ*/
115 case nv_clk_src_core:
116 return read_clk(priv, (mast & 0x00000003) >> 0);
117 case nv_clk_src_shader:
118 return read_clk(priv, (mast & 0x00000030) >> 4);
119 case nv_clk_src_mem:
120 return read_pll_2(priv, 0x4020);
121 default:
122 break;
123 }
124
125 nv_debug(priv, "unknown clock source %d 0x%08x\n", src, mast);
126 return -EINVAL;
127}
128
129static int
130nv40_clock_calc_pll(struct nv40_clock_priv *priv, u32 reg, u32 clk,
131 int *N1, int *M1, int *N2, int *M2, int *log2P)
132{
133 struct nouveau_bios *bios = nouveau_bios(priv);
134 struct nvbios_pll pll;
135 int ret;
136
137 ret = nvbios_pll_parse(bios, reg, &pll);
138 if (ret)
139 return ret;
140
141 if (clk < pll.vco1.max_freq)
142 pll.vco2.max_freq = 0;
143
144 ret = nv04_pll_calc(nv_subdev(priv), &pll, clk, N1, M1, N2, M2, log2P);
145 if (ret == 0)
146 return -ERANGE;
147 return ret;
148}
149
150static int
151nv40_clock_calc(struct nouveau_clock *clk, struct nouveau_cstate *cstate)
152{
153 struct nv40_clock_priv *priv = (void *)clk;
154 int gclk = cstate->domain[nv_clk_src_core];
155 int sclk = cstate->domain[nv_clk_src_shader];
156 int N1, M1, N2, M2, log2P;
157 int ret;
158
159 /* core/geometric clock */
160 ret = nv40_clock_calc_pll(priv, 0x004000, gclk,
161 &N1, &M1, &N2, &M2, &log2P);
162 if (ret < 0)
163 return ret;
164
165 if (N2 == M2) {
166 priv->npll_ctrl = 0x80000100 | (log2P << 16);
167 priv->npll_coef = (N1 << 8) | M1;
168 } else {
169 priv->npll_ctrl = 0xc0000000 | (log2P << 16);
170 priv->npll_coef = (N2 << 24) | (M2 << 16) | (N1 << 8) | M1;
171 }
172
173 /* use the second pll for shader/rop clock, if it differs from core */
174 if (sclk && sclk != gclk) {
175 ret = nv40_clock_calc_pll(priv, 0x004008, sclk,
176 &N1, &M1, NULL, NULL, &log2P);
177 if (ret < 0)
178 return ret;
179
180 priv->spll = 0xc0000000 | (log2P << 16) | (N1 << 8) | M1;
181 priv->ctrl = 0x00000223;
182 } else {
183 priv->spll = 0x00000000;
184 priv->ctrl = 0x00000333;
185 }
186
187 return 0;
188}
189
190static int
191nv40_clock_prog(struct nouveau_clock *clk)
192{
193 struct nv40_clock_priv *priv = (void *)clk;
194 nv_mask(priv, 0x00c040, 0x00000333, 0x00000000);
195 nv_wr32(priv, 0x004004, priv->npll_coef);
196 nv_mask(priv, 0x004000, 0xc0070100, priv->npll_ctrl);
197 nv_mask(priv, 0x004008, 0xc007ffff, priv->spll);
198 mdelay(5);
199 nv_mask(priv, 0x00c040, 0x00000333, priv->ctrl);
200 return 0;
201}
202
203static void
204nv40_clock_tidy(struct nouveau_clock *clk)
205{
206}
207
31static int 208static int
32nv40_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine, 209nv40_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
33 struct nouveau_oclass *oclass, void *data, u32 size, 210 struct nouveau_oclass *oclass, void *data, u32 size,
@@ -36,13 +213,17 @@ nv40_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
36 struct nv40_clock_priv *priv; 213 struct nv40_clock_priv *priv;
37 int ret; 214 int ret;
38 215
39 ret = nouveau_clock_create(parent, engine, oclass, &priv); 216 ret = nouveau_clock_create(parent, engine, oclass, nv40_domain, &priv);
40 *pobject = nv_object(priv); 217 *pobject = nv_object(priv);
41 if (ret) 218 if (ret)
42 return ret; 219 return ret;
43 220
44 priv->base.pll_calc = nv04_clock_pll_calc; 221 priv->base.pll_calc = nv04_clock_pll_calc;
45 priv->base.pll_prog = nv04_clock_pll_prog; 222 priv->base.pll_prog = nv04_clock_pll_prog;
223 priv->base.read = nv40_clock_read;
224 priv->base.calc = nv40_clock_calc;
225 priv->base.prog = nv40_clock_prog;
226 priv->base.tidy = nv40_clock_tidy;
46 return 0; 227 return 0;
47} 228}
48 229
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nv50.c
index d09d3e78040c..250a6d96016b 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nv50.c
@@ -22,40 +22,538 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include <subdev/clock.h>
26#include <subdev/bios.h> 25#include <subdev/bios.h>
27#include <subdev/bios/pll.h> 26#include <subdev/bios/pll.h>
28 27
28#include "nv50.h"
29#include "pll.h" 29#include "pll.h"
30#include "seq.h"
30 31
31struct nv50_clock_priv { 32static u32
32 struct nouveau_clock base; 33read_div(struct nv50_clock_priv *priv)
33}; 34{
35 switch (nv_device(priv)->chipset) {
36 case 0x50: /* it exists, but only has bit 31, not the dividers.. */
37 case 0x84:
38 case 0x86:
39 case 0x98:
40 case 0xa0:
41 return nv_rd32(priv, 0x004700);
42 case 0x92:
43 case 0x94:
44 case 0x96:
45 return nv_rd32(priv, 0x004800);
46 default:
47 return 0x00000000;
48 }
49}
50
51static u32
52read_pll_src(struct nv50_clock_priv *priv, u32 base)
53{
54 struct nouveau_clock *clk = &priv->base;
55 u32 coef, ref = clk->read(clk, nv_clk_src_crystal);
56 u32 rsel = nv_rd32(priv, 0x00e18c);
57 int P, N, M, id;
58
59 switch (nv_device(priv)->chipset) {
60 case 0x50:
61 case 0xa0:
62 switch (base) {
63 case 0x4020:
64 case 0x4028: id = !!(rsel & 0x00000004); break;
65 case 0x4008: id = !!(rsel & 0x00000008); break;
66 case 0x4030: id = 0; break;
67 default:
68 nv_error(priv, "ref: bad pll 0x%06x\n", base);
69 return 0;
70 }
71
72 coef = nv_rd32(priv, 0x00e81c + (id * 0x0c));
73 ref *= (coef & 0x01000000) ? 2 : 4;
74 P = (coef & 0x00070000) >> 16;
75 N = ((coef & 0x0000ff00) >> 8) + 1;
76 M = ((coef & 0x000000ff) >> 0) + 1;
77 break;
78 case 0x84:
79 case 0x86:
80 case 0x92:
81 coef = nv_rd32(priv, 0x00e81c);
82 P = (coef & 0x00070000) >> 16;
83 N = (coef & 0x0000ff00) >> 8;
84 M = (coef & 0x000000ff) >> 0;
85 break;
86 case 0x94:
87 case 0x96:
88 case 0x98:
89 rsel = nv_rd32(priv, 0x00c050);
90 switch (base) {
91 case 0x4020: rsel = (rsel & 0x00000003) >> 0; break;
92 case 0x4008: rsel = (rsel & 0x0000000c) >> 2; break;
93 case 0x4028: rsel = (rsel & 0x00001800) >> 11; break;
94 case 0x4030: rsel = 3; break;
95 default:
96 nv_error(priv, "ref: bad pll 0x%06x\n", base);
97 return 0;
98 }
99
100 switch (rsel) {
101 case 0: id = 1; break;
102 case 1: return clk->read(clk, nv_clk_src_crystal);
103 case 2: return clk->read(clk, nv_clk_src_href);
104 case 3: id = 0; break;
105 }
106
107 coef = nv_rd32(priv, 0x00e81c + (id * 0x28));
108 P = (nv_rd32(priv, 0x00e824 + (id * 0x28)) >> 16) & 7;
109 P += (coef & 0x00070000) >> 16;
110 N = (coef & 0x0000ff00) >> 8;
111 M = (coef & 0x000000ff) >> 0;
112 break;
113 default:
114 BUG_ON(1);
115 }
116
117 if (M)
118 return (ref * N / M) >> P;
119 return 0;
120}
121
122static u32
123read_pll_ref(struct nv50_clock_priv *priv, u32 base)
124{
125 struct nouveau_clock *clk = &priv->base;
126 u32 src, mast = nv_rd32(priv, 0x00c040);
127
128 switch (base) {
129 case 0x004028:
130 src = !!(mast & 0x00200000);
131 break;
132 case 0x004020:
133 src = !!(mast & 0x00400000);
134 break;
135 case 0x004008:
136 src = !!(mast & 0x00010000);
137 break;
138 case 0x004030:
139 src = !!(mast & 0x02000000);
140 break;
141 case 0x00e810:
142 return clk->read(clk, nv_clk_src_crystal);
143 default:
144 nv_error(priv, "bad pll 0x%06x\n", base);
145 return 0;
146 }
147
148 if (src)
149 return clk->read(clk, nv_clk_src_href);
150 return read_pll_src(priv, base);
151}
152
153static u32
154read_pll(struct nv50_clock_priv *priv, u32 base)
155{
156 struct nouveau_clock *clk = &priv->base;
157 u32 mast = nv_rd32(priv, 0x00c040);
158 u32 ctrl = nv_rd32(priv, base + 0);
159 u32 coef = nv_rd32(priv, base + 4);
160 u32 ref = read_pll_ref(priv, base);
161 u32 freq = 0;
162 int N1, N2, M1, M2;
163
164 if (base == 0x004028 && (mast & 0x00100000)) {
165 /* wtf, appears to only disable post-divider on nva0 */
166 if (nv_device(priv)->chipset != 0xa0)
167 return clk->read(clk, nv_clk_src_dom6);
168 }
169
170 N2 = (coef & 0xff000000) >> 24;
171 M2 = (coef & 0x00ff0000) >> 16;
172 N1 = (coef & 0x0000ff00) >> 8;
173 M1 = (coef & 0x000000ff);
174 if ((ctrl & 0x80000000) && M1) {
175 freq = ref * N1 / M1;
176 if ((ctrl & 0x40000100) == 0x40000000) {
177 if (M2)
178 freq = freq * N2 / M2;
179 else
180 freq = 0;
181 }
182 }
183
184 return freq;
185}
34 186
35static int 187static int
188nv50_clock_read(struct nouveau_clock *clk, enum nv_clk_src src)
189{
190 struct nv50_clock_priv *priv = (void *)clk;
191 u32 mast = nv_rd32(priv, 0x00c040);
192 u32 P = 0;
193
194 switch (src) {
195 case nv_clk_src_crystal:
196 return nv_device(priv)->crystal;
197 case nv_clk_src_href:
198 return 100000; /* PCIE reference clock */
199 case nv_clk_src_hclk:
200 return div_u64((u64)clk->read(clk, nv_clk_src_href) * 27778, 10000);
201 case nv_clk_src_hclkm3:
202 return clk->read(clk, nv_clk_src_hclk) * 3;
203 case nv_clk_src_hclkm3d2:
204 return clk->read(clk, nv_clk_src_hclk) * 3 / 2;
205 case nv_clk_src_host:
206 switch (mast & 0x30000000) {
207 case 0x00000000: return clk->read(clk, nv_clk_src_href);
208 case 0x10000000: break;
209 case 0x20000000: /* !0x50 */
210 case 0x30000000: return clk->read(clk, nv_clk_src_hclk);
211 }
212 break;
213 case nv_clk_src_core:
214 if (!(mast & 0x00100000))
215 P = (nv_rd32(priv, 0x004028) & 0x00070000) >> 16;
216 switch (mast & 0x00000003) {
217 case 0x00000000: return clk->read(clk, nv_clk_src_crystal) >> P;
218 case 0x00000001: return clk->read(clk, nv_clk_src_dom6);
219 case 0x00000002: return read_pll(priv, 0x004020) >> P;
220 case 0x00000003: return read_pll(priv, 0x004028) >> P;
221 }
222 break;
223 case nv_clk_src_shader:
224 P = (nv_rd32(priv, 0x004020) & 0x00070000) >> 16;
225 switch (mast & 0x00000030) {
226 case 0x00000000:
227 if (mast & 0x00000080)
228 return clk->read(clk, nv_clk_src_host) >> P;
229 return clk->read(clk, nv_clk_src_crystal) >> P;
230 case 0x00000010: break;
231 case 0x00000020: return read_pll(priv, 0x004028) >> P;
232 case 0x00000030: return read_pll(priv, 0x004020) >> P;
233 }
234 break;
235 case nv_clk_src_mem:
236 P = (nv_rd32(priv, 0x004008) & 0x00070000) >> 16;
237 if (nv_rd32(priv, 0x004008) & 0x00000200) {
238 switch (mast & 0x0000c000) {
239 case 0x00000000:
240 return clk->read(clk, nv_clk_src_crystal) >> P;
241 case 0x00008000:
242 case 0x0000c000:
243 return clk->read(clk, nv_clk_src_href) >> P;
244 }
245 } else {
246 return read_pll(priv, 0x004008) >> P;
247 }
248 break;
249 case nv_clk_src_vdec:
250 P = (read_div(priv) & 0x00000700) >> 8;
251 switch (nv_device(priv)->chipset) {
252 case 0x84:
253 case 0x86:
254 case 0x92:
255 case 0x94:
256 case 0x96:
257 case 0xa0:
258 switch (mast & 0x00000c00) {
259 case 0x00000000:
260 if (nv_device(priv)->chipset == 0xa0) /* wtf?? */
261 return clk->read(clk, nv_clk_src_core) >> P;
262 return clk->read(clk, nv_clk_src_crystal) >> P;
263 case 0x00000400:
264 return 0;
265 case 0x00000800:
266 if (mast & 0x01000000)
267 return read_pll(priv, 0x004028) >> P;
268 return read_pll(priv, 0x004030) >> P;
269 case 0x00000c00:
270 return clk->read(clk, nv_clk_src_core) >> P;
271 }
272 break;
273 case 0x98:
274 switch (mast & 0x00000c00) {
275 case 0x00000000:
276 return clk->read(clk, nv_clk_src_core) >> P;
277 case 0x00000400:
278 return 0;
279 case 0x00000800:
280 return clk->read(clk, nv_clk_src_hclkm3d2) >> P;
281 case 0x00000c00:
282 return clk->read(clk, nv_clk_src_mem) >> P;
283 }
284 break;
285 }
286 break;
287 case nv_clk_src_dom6:
288 switch (nv_device(priv)->chipset) {
289 case 0x50:
290 case 0xa0:
291 return read_pll(priv, 0x00e810) >> 2;
292 case 0x84:
293 case 0x86:
294 case 0x92:
295 case 0x94:
296 case 0x96:
297 case 0x98:
298 P = (read_div(priv) & 0x00000007) >> 0;
299 switch (mast & 0x0c000000) {
300 case 0x00000000: return clk->read(clk, nv_clk_src_href);
301 case 0x04000000: break;
302 case 0x08000000: return clk->read(clk, nv_clk_src_hclk);
303 case 0x0c000000:
304 return clk->read(clk, nv_clk_src_hclkm3) >> P;
305 }
306 break;
307 default:
308 break;
309 }
310 default:
311 break;
312 }
313
314 nv_debug(priv, "unknown clock source %d 0x%08x\n", src, mast);
315 return -EINVAL;
316}
317
318static u32
319calc_pll(struct nv50_clock_priv *priv, u32 reg, u32 clk, int *N, int *M, int *P)
320{
321 struct nouveau_bios *bios = nouveau_bios(priv);
322 struct nvbios_pll pll;
323 int ret;
324
325 ret = nvbios_pll_parse(bios, reg, &pll);
326 if (ret)
327 return 0;
328
329 pll.vco2.max_freq = 0;
330 pll.refclk = read_pll_ref(priv, reg);
331 if (!pll.refclk)
332 return 0;
333
334 return nv04_pll_calc(nv_subdev(priv), &pll, clk, N, M, NULL, NULL, P);
335}
336
337static inline u32
338calc_div(u32 src, u32 target, int *div)
339{
340 u32 clk0 = src, clk1 = src;
341 for (*div = 0; *div <= 7; (*div)++) {
342 if (clk0 <= target) {
343 clk1 = clk0 << (*div ? 1 : 0);
344 break;
345 }
346 clk0 >>= 1;
347 }
348
349 if (target - clk0 <= clk1 - target)
350 return clk0;
351 (*div)--;
352 return clk1;
353}
354
355static inline u32
356clk_same(u32 a, u32 b)
357{
358 return ((a / 1000) == (b / 1000));
359}
360
361static int
362nv50_clock_calc(struct nouveau_clock *clk, struct nouveau_cstate *cstate)
363{
364 struct nv50_clock_priv *priv = (void *)clk;
365 struct nv50_clock_hwsq *hwsq = &priv->hwsq;
366 const int shader = cstate->domain[nv_clk_src_shader];
367 const int core = cstate->domain[nv_clk_src_core];
368 const int vdec = cstate->domain[nv_clk_src_vdec];
369 const int dom6 = cstate->domain[nv_clk_src_dom6];
370 u32 mastm = 0, mastv = 0;
371 u32 divsm = 0, divsv = 0;
372 int N, M, P1, P2;
373 int freq, out;
374
375 /* prepare a hwsq script from which we'll perform the reclock */
376 out = clk_init(hwsq, nv_subdev(clk));
377 if (out)
378 return out;
379
380 clk_wr32(hwsq, fifo, 0x00000001); /* block fifo */
381 clk_nsec(hwsq, 8000);
382 clk_setf(hwsq, 0x10, 0x00); /* disable fb */
383 clk_wait(hwsq, 0x00, 0x01); /* wait for fb disabled */
384
385 /* vdec: avoid modifying xpll until we know exactly how the other
386 * clock domains work, i suspect at least some of them can also be
387 * tied to xpll...
388 */
389 if (vdec) {
390 /* see how close we can get using nvclk as a source */
391 freq = calc_div(core, vdec, &P1);
392
393 /* see how close we can get using xpll/hclk as a source */
394 if (nv_device(priv)->chipset != 0x98)
395 out = read_pll(priv, 0x004030);
396 else
397 out = clk->read(clk, nv_clk_src_hclkm3d2);
398 out = calc_div(out, vdec, &P2);
399
400 /* select whichever gets us closest */
401 if (abs(vdec - freq) <= abs(vdec - out)) {
402 if (nv_device(priv)->chipset != 0x98)
403 mastv |= 0x00000c00;
404 divsv |= P1 << 8;
405 } else {
406 mastv |= 0x00000800;
407 divsv |= P2 << 8;
408 }
409
410 mastm |= 0x00000c00;
411 divsm |= 0x00000700;
412 }
413
414 /* dom6: nfi what this is, but we're limited to various combinations
415 * of the host clock frequency
416 */
417 if (dom6) {
418 if (clk_same(dom6, clk->read(clk, nv_clk_src_href))) {
419 mastv |= 0x00000000;
420 } else
421 if (clk_same(dom6, clk->read(clk, nv_clk_src_hclk))) {
422 mastv |= 0x08000000;
423 } else {
424 freq = clk->read(clk, nv_clk_src_hclk) * 3;
425 freq = calc_div(freq, dom6, &P1);
426
427 mastv |= 0x0c000000;
428 divsv |= P1;
429 }
430
431 mastm |= 0x0c000000;
432 divsm |= 0x00000007;
433 }
434
435 /* vdec/dom6: switch to "safe" clocks temporarily, update dividers
436 * and then switch to target clocks
437 */
438 clk_mask(hwsq, mast, mastm, 0x00000000);
439 clk_mask(hwsq, divs, divsm, divsv);
440 clk_mask(hwsq, mast, mastm, mastv);
441
442 /* core/shader: disconnect nvclk/sclk from their PLLs (nvclk to dom6,
443 * sclk to hclk) before reprogramming
444 */
445 if (nv_device(priv)->chipset < 0x92)
446 clk_mask(hwsq, mast, 0x001000b0, 0x00100080);
447 else
448 clk_mask(hwsq, mast, 0x000000b3, 0x00000081);
449
450 /* core: for the moment at least, always use nvpll */
451 freq = calc_pll(priv, 0x4028, core, &N, &M, &P1);
452 if (freq == 0)
453 return -ERANGE;
454
455 clk_mask(hwsq, nvpll[0], 0xc03f0100,
456 0x80000000 | (P1 << 19) | (P1 << 16));
457 clk_mask(hwsq, nvpll[1], 0x0000ffff, (N << 8) | M);
458
459 /* shader: tie to nvclk if possible, otherwise use spll. have to be
460 * very careful that the shader clock is at least twice the core, or
461 * some chipsets will be very unhappy. i expect most or all of these
462 * cases will be handled by tying to nvclk, but it's possible there's
463 * corners
464 */
465 if (P1-- && shader == (core << 1)) {
466 clk_mask(hwsq, spll[0], 0xc03f0100, (P1 << 19) | (P1 << 16));
467 clk_mask(hwsq, mast, 0x00100033, 0x00000023);
468 } else {
469 freq = calc_pll(priv, 0x4020, shader, &N, &M, &P1);
470 if (freq == 0)
471 return -ERANGE;
472
473 clk_mask(hwsq, spll[0], 0xc03f0100,
474 0x80000000 | (P1 << 19) | (P1 << 16));
475 clk_mask(hwsq, spll[1], 0x0000ffff, (N << 8) | M);
476 clk_mask(hwsq, mast, 0x00100033, 0x00000033);
477 }
478
479 /* restore normal operation */
480 clk_setf(hwsq, 0x10, 0x01); /* enable fb */
481 clk_wait(hwsq, 0x00, 0x00); /* wait for fb enabled */
482 clk_wr32(hwsq, fifo, 0x00000000); /* un-block fifo */
483 return 0;
484}
485
486static int
487nv50_clock_prog(struct nouveau_clock *clk)
488{
489 struct nv50_clock_priv *priv = (void *)clk;
490 return clk_exec(&priv->hwsq, true);
491}
492
493static void
494nv50_clock_tidy(struct nouveau_clock *clk)
495{
496 struct nv50_clock_priv *priv = (void *)clk;
497 clk_exec(&priv->hwsq, false);
498}
499
500int
36nv50_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine, 501nv50_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
37 struct nouveau_oclass *oclass, void *data, u32 size, 502 struct nouveau_oclass *oclass, void *data, u32 size,
38 struct nouveau_object **pobject) 503 struct nouveau_object **pobject)
39{ 504{
505 struct nv50_clock_oclass *pclass = (void *)oclass;
40 struct nv50_clock_priv *priv; 506 struct nv50_clock_priv *priv;
41 int ret; 507 int ret;
42 508
43 ret = nouveau_clock_create(parent, engine, oclass, &priv); 509 ret = nouveau_clock_create(parent, engine, oclass, pclass->domains,
510 &priv);
44 *pobject = nv_object(priv); 511 *pobject = nv_object(priv);
45 if (ret) 512 if (ret)
46 return ret; 513 return ret;
47 514
48 priv->base.pll_calc = nv04_clock_pll_calc; 515 priv->hwsq.r_fifo = hwsq_reg(0x002504);
516 priv->hwsq.r_spll[0] = hwsq_reg(0x004020);
517 priv->hwsq.r_spll[1] = hwsq_reg(0x004024);
518 priv->hwsq.r_nvpll[0] = hwsq_reg(0x004028);
519 priv->hwsq.r_nvpll[1] = hwsq_reg(0x00402c);
520 switch (nv_device(priv)->chipset) {
521 case 0x92:
522 case 0x94:
523 case 0x96:
524 priv->hwsq.r_divs = hwsq_reg(0x004800);
525 break;
526 default:
527 priv->hwsq.r_divs = hwsq_reg(0x004700);
528 break;
529 }
530 priv->hwsq.r_mast = hwsq_reg(0x00c040);
531
532 priv->base.read = nv50_clock_read;
533 priv->base.calc = nv50_clock_calc;
534 priv->base.prog = nv50_clock_prog;
535 priv->base.tidy = nv50_clock_tidy;
49 return 0; 536 return 0;
50} 537}
51 538
52struct nouveau_oclass 539static struct nouveau_clocks
53nv50_clock_oclass = { 540nv50_domains[] = {
54 .handle = NV_SUBDEV(CLOCK, 0x50), 541 { nv_clk_src_crystal, 0xff },
55 .ofuncs = &(struct nouveau_ofuncs) { 542 { nv_clk_src_href , 0xff },
543 { nv_clk_src_core , 0xff, 0, "core", 1000 },
544 { nv_clk_src_shader , 0xff, 0, "shader", 1000 },
545 { nv_clk_src_mem , 0xff, 0, "memory", 1000 },
546 { nv_clk_src_max }
547};
548
549struct nouveau_oclass *
550nv50_clock_oclass = &(struct nv50_clock_oclass) {
551 .base.handle = NV_SUBDEV(CLOCK, 0x50),
552 .base.ofuncs = &(struct nouveau_ofuncs) {
56 .ctor = nv50_clock_ctor, 553 .ctor = nv50_clock_ctor,
57 .dtor = _nouveau_clock_dtor, 554 .dtor = _nouveau_clock_dtor,
58 .init = _nouveau_clock_init, 555 .init = _nouveau_clock_init,
59 .fini = _nouveau_clock_fini, 556 .fini = _nouveau_clock_fini,
60 }, 557 },
61}; 558 .domains = nv50_domains,
559}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nv50.h b/drivers/gpu/drm/nouveau/core/subdev/clock/nv50.h
new file mode 100644
index 000000000000..f10917d789e8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nv50.h
@@ -0,0 +1,31 @@
1#ifndef __NVKM_CLK_NV50_H__
2#define __NVKM_CLK_NV50_H__
3
4#include <subdev/bus.h>
5#include <subdev/bus/hwsq.h>
6#include <subdev/clock.h>
7
8struct nv50_clock_hwsq {
9 struct hwsq base;
10 struct hwsq_reg r_fifo;
11 struct hwsq_reg r_spll[2];
12 struct hwsq_reg r_nvpll[2];
13 struct hwsq_reg r_divs;
14 struct hwsq_reg r_mast;
15};
16
17struct nv50_clock_priv {
18 struct nouveau_clock base;
19 struct nv50_clock_hwsq hwsq;
20};
21
22int nv50_clock_ctor(struct nouveau_object *, struct nouveau_object *,
23 struct nouveau_oclass *, void *, u32,
24 struct nouveau_object **);
25
26struct nv50_clock_oclass {
27 struct nouveau_oclass base;
28 struct nouveau_clocks *domains;
29};
30
31#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nv84.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nv84.c
new file mode 100644
index 000000000000..b0b7c1437f10
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nv84.c
@@ -0,0 +1,48 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */
24
25#include "nv50.h"
26
27static struct nouveau_clocks
28nv84_domains[] = {
29 { nv_clk_src_crystal, 0xff },
30 { nv_clk_src_href , 0xff },
31 { nv_clk_src_core , 0xff, 0, "core", 1000 },
32 { nv_clk_src_shader , 0xff, 0, "shader", 1000 },
33 { nv_clk_src_mem , 0xff, 0, "memory", 1000 },
34 { nv_clk_src_vdec , 0xff },
35 { nv_clk_src_max }
36};
37
38struct nouveau_oclass *
39nv84_clock_oclass = &(struct nv50_clock_oclass) {
40 .base.handle = NV_SUBDEV(CLOCK, 0x84),
41 .base.ofuncs = &(struct nouveau_ofuncs) {
42 .ctor = nv50_clock_ctor,
43 .dtor = _nouveau_clock_dtor,
44 .init = _nouveau_clock_init,
45 .fini = _nouveau_clock_fini,
46 },
47 .domains = nv84_domains,
48}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c
index f074cd20bc9c..4f5a1373f002 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c
@@ -22,33 +22,277 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include <subdev/clock.h>
26#include <subdev/bios.h> 25#include <subdev/bios.h>
27#include <subdev/bios/pll.h> 26#include <subdev/bios/pll.h>
27#include <subdev/timer.h>
28 28
29#include "pll.h" 29#include "pll.h"
30 30
31#include "nva3.h"
32
31struct nva3_clock_priv { 33struct nva3_clock_priv {
32 struct nouveau_clock base; 34 struct nouveau_clock base;
35 struct nva3_clock_info eng[nv_clk_src_max];
33}; 36};
34 37
38static u32 read_clk(struct nva3_clock_priv *, int, bool);
39static u32 read_pll(struct nva3_clock_priv *, int, u32);
40
41static u32
42read_vco(struct nva3_clock_priv *priv, int clk)
43{
44 u32 sctl = nv_rd32(priv, 0x4120 + (clk * 4));
45 if ((sctl & 0x00000030) != 0x00000030)
46 return read_pll(priv, 0x41, 0x00e820);
47 return read_pll(priv, 0x42, 0x00e8a0);
48}
49
50static u32
51read_clk(struct nva3_clock_priv *priv, int clk, bool ignore_en)
52{
53 u32 sctl, sdiv, sclk;
54
55 /* refclk for the 0xe8xx plls is a fixed frequency */
56 if (clk >= 0x40) {
57 if (nv_device(priv)->chipset == 0xaf) {
58 /* no joke.. seriously.. sigh.. */
59 return nv_rd32(priv, 0x00471c) * 1000;
60 }
61
62 return nv_device(priv)->crystal;
63 }
64
65 sctl = nv_rd32(priv, 0x4120 + (clk * 4));
66 if (!ignore_en && !(sctl & 0x00000100))
67 return 0;
68
69 switch (sctl & 0x00003000) {
70 case 0x00000000:
71 return nv_device(priv)->crystal;
72 case 0x00002000:
73 if (sctl & 0x00000040)
74 return 108000;
75 return 100000;
76 case 0x00003000:
77 sclk = read_vco(priv, clk);
78 sdiv = ((sctl & 0x003f0000) >> 16) + 2;
79 return (sclk * 2) / sdiv;
80 default:
81 return 0;
82 }
83}
84
85static u32
86read_pll(struct nva3_clock_priv *priv, int clk, u32 pll)
87{
88 u32 ctrl = nv_rd32(priv, pll + 0);
89 u32 sclk = 0, P = 1, N = 1, M = 1;
90
91 if (!(ctrl & 0x00000008)) {
92 if (ctrl & 0x00000001) {
93 u32 coef = nv_rd32(priv, pll + 4);
94 M = (coef & 0x000000ff) >> 0;
95 N = (coef & 0x0000ff00) >> 8;
96 P = (coef & 0x003f0000) >> 16;
97
98 /* no post-divider on these.. */
99 if ((pll & 0x00ff00) == 0x00e800)
100 P = 1;
101
102 sclk = read_clk(priv, 0x00 + clk, false);
103 }
104 } else {
105 sclk = read_clk(priv, 0x10 + clk, false);
106 }
107
108 if (M * P)
109 return sclk * N / (M * P);
110 return 0;
111}
112
113static int
114nva3_clock_read(struct nouveau_clock *clk, enum nv_clk_src src)
115{
116 struct nva3_clock_priv *priv = (void *)clk;
117
118 switch (src) {
119 case nv_clk_src_crystal:
120 return nv_device(priv)->crystal;
121 case nv_clk_src_href:
122 return 100000;
123 case nv_clk_src_core:
124 return read_pll(priv, 0x00, 0x4200);
125 case nv_clk_src_shader:
126 return read_pll(priv, 0x01, 0x4220);
127 case nv_clk_src_mem:
128 return read_pll(priv, 0x02, 0x4000);
129 case nv_clk_src_disp:
130 return read_clk(priv, 0x20, false);
131 case nv_clk_src_vdec:
132 return read_clk(priv, 0x21, false);
133 case nv_clk_src_daemon:
134 return read_clk(priv, 0x25, false);
135 default:
136 nv_error(clk, "invalid clock source %d\n", src);
137 return -EINVAL;
138 }
139}
140
35int 141int
36nva3_clock_pll_calc(struct nouveau_clock *clock, struct nvbios_pll *info, 142nva3_clock_info(struct nouveau_clock *clock, int clk, u32 pll, u32 khz,
37 int clk, struct nouveau_pll_vals *pv) 143 struct nva3_clock_info *info)
38{ 144{
39 int ret, N, M, P; 145 struct nouveau_bios *bios = nouveau_bios(clock);
146 struct nva3_clock_priv *priv = (void *)clock;
147 struct nvbios_pll limits;
148 u32 oclk, sclk, sdiv;
149 int P, N, M, diff;
150 int ret;
151
152 info->pll = 0;
153 info->clk = 0;
154
155 switch (khz) {
156 case 27000:
157 info->clk = 0x00000100;
158 return khz;
159 case 100000:
160 info->clk = 0x00002100;
161 return khz;
162 case 108000:
163 info->clk = 0x00002140;
164 return khz;
165 default:
166 sclk = read_vco(priv, clk);
167 sdiv = min((sclk * 2) / (khz - 2999), (u32)65);
168 /* if the clock has a PLL attached, and we can get a within
169 * [-2, 3) MHz of a divider, we'll disable the PLL and use
170 * the divider instead.
171 *
172 * divider can go as low as 2, limited here because NVIDIA
173 * and the VBIOS on my NVA8 seem to prefer using the PLL
174 * for 810MHz - is there a good reason?
175 */
176 if (sdiv > 4) {
177 oclk = (sclk * 2) / sdiv;
178 diff = khz - oclk;
179 if (!pll || (diff >= -2000 && diff < 3000)) {
180 info->clk = (((sdiv - 2) << 16) | 0x00003100);
181 return oclk;
182 }
183 }
184
185 if (!pll)
186 return -ERANGE;
187 break;
188 }
40 189
41 ret = nva3_pll_calc(nv_subdev(clock), info, clk, &N, NULL, &M, &P); 190 ret = nvbios_pll_parse(bios, pll, &limits);
191 if (ret)
192 return ret;
193
194 limits.refclk = read_clk(priv, clk - 0x10, true);
195 if (!limits.refclk)
196 return -EINVAL;
42 197
43 if (ret > 0) { 198 ret = nva3_pll_calc(nv_subdev(priv), &limits, khz, &N, NULL, &M, &P);
44 pv->refclk = info->refclk; 199 if (ret >= 0) {
45 pv->N1 = N; 200 info->clk = nv_rd32(priv, 0x4120 + (clk * 4));
46 pv->M1 = M; 201 info->pll = (P << 16) | (N << 8) | M;
47 pv->log2P = P;
48 } 202 }
203
204 return ret ? ret : -ERANGE;
205}
206
207static int
208calc_clk(struct nva3_clock_priv *priv, struct nouveau_cstate *cstate,
209 int clk, u32 pll, int idx)
210{
211 int ret = nva3_clock_info(&priv->base, clk, pll, cstate->domain[idx],
212 &priv->eng[idx]);
213 if (ret >= 0)
214 return 0;
49 return ret; 215 return ret;
50} 216}
51 217
218static void
219prog_pll(struct nva3_clock_priv *priv, int clk, u32 pll, int idx)
220{
221 struct nva3_clock_info *info = &priv->eng[idx];
222 const u32 src0 = 0x004120 + (clk * 4);
223 const u32 src1 = 0x004160 + (clk * 4);
224 const u32 ctrl = pll + 0;
225 const u32 coef = pll + 4;
226
227 if (info->pll) {
228 nv_mask(priv, src0, 0x00000101, 0x00000101);
229 nv_wr32(priv, coef, info->pll);
230 nv_mask(priv, ctrl, 0x00000015, 0x00000015);
231 nv_mask(priv, ctrl, 0x00000010, 0x00000000);
232 nv_wait(priv, ctrl, 0x00020000, 0x00020000);
233 nv_mask(priv, ctrl, 0x00000010, 0x00000010);
234 nv_mask(priv, ctrl, 0x00000008, 0x00000000);
235 nv_mask(priv, src1, 0x00000100, 0x00000000);
236 nv_mask(priv, src1, 0x00000001, 0x00000000);
237 } else {
238 nv_mask(priv, src1, 0x003f3141, 0x00000101 | info->clk);
239 nv_mask(priv, ctrl, 0x00000018, 0x00000018);
240 udelay(20);
241 nv_mask(priv, ctrl, 0x00000001, 0x00000000);
242 nv_mask(priv, src0, 0x00000100, 0x00000000);
243 nv_mask(priv, src0, 0x00000001, 0x00000000);
244 }
245}
246
247static void
248prog_clk(struct nva3_clock_priv *priv, int clk, int idx)
249{
250 struct nva3_clock_info *info = &priv->eng[idx];
251 nv_mask(priv, 0x004120 + (clk * 4), 0x003f3141, 0x00000101 | info->clk);
252}
253
254static int
255nva3_clock_calc(struct nouveau_clock *clk, struct nouveau_cstate *cstate)
256{
257 struct nva3_clock_priv *priv = (void *)clk;
258 int ret;
259
260 if ((ret = calc_clk(priv, cstate, 0x10, 0x4200, nv_clk_src_core)) ||
261 (ret = calc_clk(priv, cstate, 0x11, 0x4220, nv_clk_src_shader)) ||
262 (ret = calc_clk(priv, cstate, 0x20, 0x0000, nv_clk_src_disp)) ||
263 (ret = calc_clk(priv, cstate, 0x21, 0x0000, nv_clk_src_vdec)))
264 return ret;
265
266 return 0;
267}
268
269static int
270nva3_clock_prog(struct nouveau_clock *clk)
271{
272 struct nva3_clock_priv *priv = (void *)clk;
273 prog_pll(priv, 0x00, 0x004200, nv_clk_src_core);
274 prog_pll(priv, 0x01, 0x004220, nv_clk_src_shader);
275 prog_clk(priv, 0x20, nv_clk_src_disp);
276 prog_clk(priv, 0x21, nv_clk_src_vdec);
277 return 0;
278}
279
280static void
281nva3_clock_tidy(struct nouveau_clock *clk)
282{
283}
284
285static struct nouveau_clocks
286nva3_domain[] = {
287 { nv_clk_src_crystal, 0xff },
288 { nv_clk_src_href , 0xff },
289 { nv_clk_src_core , 0x00, 0, "core", 1000 },
290 { nv_clk_src_shader , 0x01, 0, "shader", 1000 },
291 { nv_clk_src_mem , 0x02, 0, "memory", 1000 },
292 { nv_clk_src_vdec , 0x03 },
293 { nv_clk_src_disp , 0x04 },
294 { nv_clk_src_max }
295};
52 296
53static int 297static int
54nva3_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine, 298nva3_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
@@ -58,12 +302,15 @@ nva3_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
58 struct nva3_clock_priv *priv; 302 struct nva3_clock_priv *priv;
59 int ret; 303 int ret;
60 304
61 ret = nouveau_clock_create(parent, engine, oclass, &priv); 305 ret = nouveau_clock_create(parent, engine, oclass, nva3_domain, &priv);
62 *pobject = nv_object(priv); 306 *pobject = nv_object(priv);
63 if (ret) 307 if (ret)
64 return ret; 308 return ret;
65 309
66 priv->base.pll_calc = nva3_clock_pll_calc; 310 priv->base.read = nva3_clock_read;
311 priv->base.calc = nva3_clock_calc;
312 priv->base.prog = nva3_clock_prog;
313 priv->base.tidy = nva3_clock_tidy;
67 return 0; 314 return 0;
68} 315}
69 316
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.h b/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.h
new file mode 100644
index 000000000000..6229a509b42e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.h
@@ -0,0 +1,14 @@
1#ifndef __NVKM_CLK_NVA3_H__
2#define __NVKM_CLK_NVA3_H__
3
4#include <subdev/clock.h>
5
6struct nva3_clock_info {
7 u32 clk;
8 u32 pll;
9};
10
11int nva3_clock_info(struct nouveau_clock *, int, u32, u32,
12 struct nva3_clock_info *);
13
14#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c
index 439d81c26130..c3105720ed24 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c
@@ -25,11 +25,408 @@
25#include <subdev/clock.h> 25#include <subdev/clock.h>
26#include <subdev/bios.h> 26#include <subdev/bios.h>
27#include <subdev/bios/pll.h> 27#include <subdev/bios/pll.h>
28#include <subdev/timer.h>
28 29
29#include "pll.h" 30#include "pll.h"
30 31
32struct nvc0_clock_info {
33 u32 freq;
34 u32 ssel;
35 u32 mdiv;
36 u32 dsrc;
37 u32 ddiv;
38 u32 coef;
39};
40
31struct nvc0_clock_priv { 41struct nvc0_clock_priv {
32 struct nouveau_clock base; 42 struct nouveau_clock base;
43 struct nvc0_clock_info eng[16];
44};
45
46static u32 read_div(struct nvc0_clock_priv *, int, u32, u32);
47
48static u32
49read_vco(struct nvc0_clock_priv *priv, u32 dsrc)
50{
51 struct nouveau_clock *clk = &priv->base;
52 u32 ssrc = nv_rd32(priv, dsrc);
53 if (!(ssrc & 0x00000100))
54 return clk->read(clk, nv_clk_src_sppll0);
55 return clk->read(clk, nv_clk_src_sppll1);
56}
57
58static u32
59read_pll(struct nvc0_clock_priv *priv, u32 pll)
60{
61 struct nouveau_clock *clk = &priv->base;
62 u32 ctrl = nv_rd32(priv, pll + 0x00);
63 u32 coef = nv_rd32(priv, pll + 0x04);
64 u32 P = (coef & 0x003f0000) >> 16;
65 u32 N = (coef & 0x0000ff00) >> 8;
66 u32 M = (coef & 0x000000ff) >> 0;
67 u32 sclk;
68
69 if (!(ctrl & 0x00000001))
70 return 0;
71
72 switch (pll) {
73 case 0x00e800:
74 case 0x00e820:
75 sclk = nv_device(priv)->crystal;
76 P = 1;
77 break;
78 case 0x132000:
79 sclk = clk->read(clk, nv_clk_src_mpllsrc);
80 break;
81 case 0x132020:
82 sclk = clk->read(clk, nv_clk_src_mpllsrcref);
83 break;
84 case 0x137000:
85 case 0x137020:
86 case 0x137040:
87 case 0x1370e0:
88 sclk = read_div(priv, (pll & 0xff) / 0x20, 0x137120, 0x137140);
89 break;
90 default:
91 return 0;
92 }
93
94 return sclk * N / M / P;
95}
96
97static u32
98read_div(struct nvc0_clock_priv *priv, int doff, u32 dsrc, u32 dctl)
99{
100 u32 ssrc = nv_rd32(priv, dsrc + (doff * 4));
101 u32 sctl = nv_rd32(priv, dctl + (doff * 4));
102
103 switch (ssrc & 0x00000003) {
104 case 0:
105 if ((ssrc & 0x00030000) != 0x00030000)
106 return nv_device(priv)->crystal;
107 return 108000;
108 case 2:
109 return 100000;
110 case 3:
111 if (sctl & 0x80000000) {
112 u32 sclk = read_vco(priv, dsrc + (doff * 4));
113 u32 sdiv = (sctl & 0x0000003f) + 2;
114 return (sclk * 2) / sdiv;
115 }
116
117 return read_vco(priv, dsrc + (doff * 4));
118 default:
119 return 0;
120 }
121}
122
123static u32
124read_clk(struct nvc0_clock_priv *priv, int clk)
125{
126 u32 sctl = nv_rd32(priv, 0x137250 + (clk * 4));
127 u32 ssel = nv_rd32(priv, 0x137100);
128 u32 sclk, sdiv;
129
130 if (ssel & (1 << clk)) {
131 if (clk < 7)
132 sclk = read_pll(priv, 0x137000 + (clk * 0x20));
133 else
134 sclk = read_pll(priv, 0x1370e0);
135 sdiv = ((sctl & 0x00003f00) >> 8) + 2;
136 } else {
137 sclk = read_div(priv, clk, 0x137160, 0x1371d0);
138 sdiv = ((sctl & 0x0000003f) >> 0) + 2;
139 }
140
141 if (sctl & 0x80000000)
142 return (sclk * 2) / sdiv;
143
144 return sclk;
145}
146
147static int
148nvc0_clock_read(struct nouveau_clock *clk, enum nv_clk_src src)
149{
150 struct nouveau_device *device = nv_device(clk);
151 struct nvc0_clock_priv *priv = (void *)clk;
152
153 switch (src) {
154 case nv_clk_src_crystal:
155 return device->crystal;
156 case nv_clk_src_href:
157 return 100000;
158 case nv_clk_src_sppll0:
159 return read_pll(priv, 0x00e800);
160 case nv_clk_src_sppll1:
161 return read_pll(priv, 0x00e820);
162
163 case nv_clk_src_mpllsrcref:
164 return read_div(priv, 0, 0x137320, 0x137330);
165 case nv_clk_src_mpllsrc:
166 return read_pll(priv, 0x132020);
167 case nv_clk_src_mpll:
168 return read_pll(priv, 0x132000);
169 case nv_clk_src_mdiv:
170 return read_div(priv, 0, 0x137300, 0x137310);
171 case nv_clk_src_mem:
172 if (nv_rd32(priv, 0x1373f0) & 0x00000002)
173 return clk->read(clk, nv_clk_src_mpll);
174 return clk->read(clk, nv_clk_src_mdiv);
175
176 case nv_clk_src_gpc:
177 return read_clk(priv, 0x00);
178 case nv_clk_src_rop:
179 return read_clk(priv, 0x01);
180 case nv_clk_src_hubk07:
181 return read_clk(priv, 0x02);
182 case nv_clk_src_hubk06:
183 return read_clk(priv, 0x07);
184 case nv_clk_src_hubk01:
185 return read_clk(priv, 0x08);
186 case nv_clk_src_copy:
187 return read_clk(priv, 0x09);
188 case nv_clk_src_daemon:
189 return read_clk(priv, 0x0c);
190 case nv_clk_src_vdec:
191 return read_clk(priv, 0x0e);
192 default:
193 nv_error(clk, "invalid clock source %d\n", src);
194 return -EINVAL;
195 }
196}
197
198static u32
199calc_div(struct nvc0_clock_priv *priv, int clk, u32 ref, u32 freq, u32 *ddiv)
200{
201 u32 div = min((ref * 2) / freq, (u32)65);
202 if (div < 2)
203 div = 2;
204
205 *ddiv = div - 2;
206 return (ref * 2) / div;
207}
208
209static u32
210calc_src(struct nvc0_clock_priv *priv, int clk, u32 freq, u32 *dsrc, u32 *ddiv)
211{
212 u32 sclk;
213
214 /* use one of the fixed frequencies if possible */
215 *ddiv = 0x00000000;
216 switch (freq) {
217 case 27000:
218 case 108000:
219 *dsrc = 0x00000000;
220 if (freq == 108000)
221 *dsrc |= 0x00030000;
222 return freq;
223 case 100000:
224 *dsrc = 0x00000002;
225 return freq;
226 default:
227 *dsrc = 0x00000003;
228 break;
229 }
230
231 /* otherwise, calculate the closest divider */
232 sclk = read_vco(priv, 0x137160 + (clk * 4));
233 if (clk < 7)
234 sclk = calc_div(priv, clk, sclk, freq, ddiv);
235 return sclk;
236}
237
238static u32
239calc_pll(struct nvc0_clock_priv *priv, int clk, u32 freq, u32 *coef)
240{
241 struct nouveau_bios *bios = nouveau_bios(priv);
242 struct nvbios_pll limits;
243 int N, M, P, ret;
244
245 ret = nvbios_pll_parse(bios, 0x137000 + (clk * 0x20), &limits);
246 if (ret)
247 return 0;
248
249 limits.refclk = read_div(priv, clk, 0x137120, 0x137140);
250 if (!limits.refclk)
251 return 0;
252
253 ret = nva3_pll_calc(nv_subdev(priv), &limits, freq, &N, NULL, &M, &P);
254 if (ret <= 0)
255 return 0;
256
257 *coef = (P << 16) | (N << 8) | M;
258 return ret;
259}
260
261static int
262calc_clk(struct nvc0_clock_priv *priv,
263 struct nouveau_cstate *cstate, int clk, int dom)
264{
265 struct nvc0_clock_info *info = &priv->eng[clk];
266 u32 freq = cstate->domain[dom];
267 u32 src0, div0, div1D, div1P = 0;
268 u32 clk0, clk1 = 0;
269
270 /* invalid clock domain */
271 if (!freq)
272 return 0;
273
274 /* first possible path, using only dividers */
275 clk0 = calc_src(priv, clk, freq, &src0, &div0);
276 clk0 = calc_div(priv, clk, clk0, freq, &div1D);
277
278 /* see if we can get any closer using PLLs */
279 if (clk0 != freq && (0x00004387 & (1 << clk))) {
280 if (clk <= 7)
281 clk1 = calc_pll(priv, clk, freq, &info->coef);
282 else
283 clk1 = cstate->domain[nv_clk_src_hubk06];
284 clk1 = calc_div(priv, clk, clk1, freq, &div1P);
285 }
286
287 /* select the method which gets closest to target freq */
288 if (abs((int)freq - clk0) <= abs((int)freq - clk1)) {
289 info->dsrc = src0;
290 if (div0) {
291 info->ddiv |= 0x80000000;
292 info->ddiv |= div0 << 8;
293 info->ddiv |= div0;
294 }
295 if (div1D) {
296 info->mdiv |= 0x80000000;
297 info->mdiv |= div1D;
298 }
299 info->ssel = info->coef = 0;
300 info->freq = clk0;
301 } else {
302 if (div1P) {
303 info->mdiv |= 0x80000000;
304 info->mdiv |= div1P << 8;
305 }
306 info->ssel = (1 << clk);
307 info->freq = clk1;
308 }
309
310 return 0;
311}
312
313static int
314nvc0_clock_calc(struct nouveau_clock *clk, struct nouveau_cstate *cstate)
315{
316 struct nvc0_clock_priv *priv = (void *)clk;
317 int ret;
318
319 if ((ret = calc_clk(priv, cstate, 0x00, nv_clk_src_gpc)) ||
320 (ret = calc_clk(priv, cstate, 0x01, nv_clk_src_rop)) ||
321 (ret = calc_clk(priv, cstate, 0x02, nv_clk_src_hubk07)) ||
322 (ret = calc_clk(priv, cstate, 0x07, nv_clk_src_hubk06)) ||
323 (ret = calc_clk(priv, cstate, 0x08, nv_clk_src_hubk01)) ||
324 (ret = calc_clk(priv, cstate, 0x09, nv_clk_src_copy)) ||
325 (ret = calc_clk(priv, cstate, 0x0c, nv_clk_src_daemon)) ||
326 (ret = calc_clk(priv, cstate, 0x0e, nv_clk_src_vdec)))
327 return ret;
328
329 return 0;
330}
331
332static void
333nvc0_clock_prog_0(struct nvc0_clock_priv *priv, int clk)
334{
335 struct nvc0_clock_info *info = &priv->eng[clk];
336 if (clk < 7 && !info->ssel) {
337 nv_mask(priv, 0x1371d0 + (clk * 0x04), 0x80003f3f, info->ddiv);
338 nv_wr32(priv, 0x137160 + (clk * 0x04), info->dsrc);
339 }
340}
341
342static void
343nvc0_clock_prog_1(struct nvc0_clock_priv *priv, int clk)
344{
345 nv_mask(priv, 0x137100, (1 << clk), 0x00000000);
346 nv_wait(priv, 0x137100, (1 << clk), 0x00000000);
347}
348
349static void
350nvc0_clock_prog_2(struct nvc0_clock_priv *priv, int clk)
351{
352 struct nvc0_clock_info *info = &priv->eng[clk];
353 const u32 addr = 0x137000 + (clk * 0x20);
354 if (clk <= 7) {
355 nv_mask(priv, addr + 0x00, 0x00000004, 0x00000000);
356 nv_mask(priv, addr + 0x00, 0x00000001, 0x00000000);
357 if (info->coef) {
358 nv_wr32(priv, addr + 0x04, info->coef);
359 nv_mask(priv, addr + 0x00, 0x00000001, 0x00000001);
360 nv_wait(priv, addr + 0x00, 0x00020000, 0x00020000);
361 nv_mask(priv, addr + 0x00, 0x00020004, 0x00000004);
362 }
363 }
364}
365
366static void
367nvc0_clock_prog_3(struct nvc0_clock_priv *priv, int clk)
368{
369 struct nvc0_clock_info *info = &priv->eng[clk];
370 if (info->ssel) {
371 nv_mask(priv, 0x137100, (1 << clk), info->ssel);
372 nv_wait(priv, 0x137100, (1 << clk), info->ssel);
373 }
374}
375
376static void
377nvc0_clock_prog_4(struct nvc0_clock_priv *priv, int clk)
378{
379 struct nvc0_clock_info *info = &priv->eng[clk];
380 nv_mask(priv, 0x137250 + (clk * 0x04), 0x00003f3f, info->mdiv);
381}
382
383static int
384nvc0_clock_prog(struct nouveau_clock *clk)
385{
386 struct nvc0_clock_priv *priv = (void *)clk;
387 struct {
388 void (*exec)(struct nvc0_clock_priv *, int);
389 } stage[] = {
390 { nvc0_clock_prog_0 }, /* div programming */
391 { nvc0_clock_prog_1 }, /* select div mode */
392 { nvc0_clock_prog_2 }, /* (maybe) program pll */
393 { nvc0_clock_prog_3 }, /* (maybe) select pll mode */
394 { nvc0_clock_prog_4 }, /* final divider */
395 };
396 int i, j;
397
398 for (i = 0; i < ARRAY_SIZE(stage); i++) {
399 for (j = 0; j < ARRAY_SIZE(priv->eng); j++) {
400 if (!priv->eng[j].freq)
401 continue;
402 stage[i].exec(priv, j);
403 }
404 }
405
406 return 0;
407}
408
409static void
410nvc0_clock_tidy(struct nouveau_clock *clk)
411{
412 struct nvc0_clock_priv *priv = (void *)clk;
413 memset(priv->eng, 0x00, sizeof(priv->eng));
414}
415
416static struct nouveau_clocks
417nvc0_domain[] = {
418 { nv_clk_src_crystal, 0xff },
419 { nv_clk_src_href , 0xff },
420 { nv_clk_src_hubk06 , 0x00 },
421 { nv_clk_src_hubk01 , 0x01 },
422 { nv_clk_src_copy , 0x02 },
423 { nv_clk_src_gpc , 0x03, 0, "core", 2000 },
424 { nv_clk_src_rop , 0x04 },
425 { nv_clk_src_mem , 0x05, 0, "memory", 1000 },
426 { nv_clk_src_vdec , 0x06 },
427 { nv_clk_src_daemon , 0x0a },
428 { nv_clk_src_hubk07 , 0x0b },
429 { nv_clk_src_max }
33}; 430};
34 431
35static int 432static int
@@ -40,12 +437,15 @@ nvc0_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
40 struct nvc0_clock_priv *priv; 437 struct nvc0_clock_priv *priv;
41 int ret; 438 int ret;
42 439
43 ret = nouveau_clock_create(parent, engine, oclass, &priv); 440 ret = nouveau_clock_create(parent, engine, oclass, nvc0_domain, &priv);
44 *pobject = nv_object(priv); 441 *pobject = nv_object(priv);
45 if (ret) 442 if (ret)
46 return ret; 443 return ret;
47 444
48 priv->base.pll_calc = nva3_clock_pll_calc; 445 priv->base.read = nvc0_clock_read;
446 priv->base.calc = nvc0_clock_calc;
447 priv->base.prog = nvc0_clock_prog;
448 priv->base.tidy = nvc0_clock_tidy;
49 return 0; 449 return 0;
50} 450}
51 451
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nve0.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nve0.c
new file mode 100644
index 000000000000..4c62e84b96f5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nve0.c
@@ -0,0 +1,497 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/clock.h>
26#include <subdev/timer.h>
27#include <subdev/bios.h>
28#include <subdev/bios/pll.h>
29
30#include "pll.h"
31
32struct nve0_clock_info {
33 u32 freq;
34 u32 ssel;
35 u32 mdiv;
36 u32 dsrc;
37 u32 ddiv;
38 u32 coef;
39};
40
41struct nve0_clock_priv {
42 struct nouveau_clock base;
43 struct nve0_clock_info eng[16];
44};
45
46static u32 read_div(struct nve0_clock_priv *, int, u32, u32);
47static u32 read_pll(struct nve0_clock_priv *, u32);
48
49static u32
50read_vco(struct nve0_clock_priv *priv, u32 dsrc)
51{
52 u32 ssrc = nv_rd32(priv, dsrc);
53 if (!(ssrc & 0x00000100))
54 return read_pll(priv, 0x00e800);
55 return read_pll(priv, 0x00e820);
56}
57
58static u32
59read_pll(struct nve0_clock_priv *priv, u32 pll)
60{
61 u32 ctrl = nv_rd32(priv, pll + 0x00);
62 u32 coef = nv_rd32(priv, pll + 0x04);
63 u32 P = (coef & 0x003f0000) >> 16;
64 u32 N = (coef & 0x0000ff00) >> 8;
65 u32 M = (coef & 0x000000ff) >> 0;
66 u32 sclk;
67 u16 fN = 0xf000;
68
69 if (!(ctrl & 0x00000001))
70 return 0;
71
72 switch (pll) {
73 case 0x00e800:
74 case 0x00e820:
75 sclk = nv_device(priv)->crystal;
76 P = 1;
77 break;
78 case 0x132000:
79 sclk = read_pll(priv, 0x132020);
80 P = (coef & 0x10000000) ? 2 : 1;
81 break;
82 case 0x132020:
83 sclk = read_div(priv, 0, 0x137320, 0x137330);
84 fN = nv_rd32(priv, pll + 0x10) >> 16;
85 break;
86 case 0x137000:
87 case 0x137020:
88 case 0x137040:
89 case 0x1370e0:
90 sclk = read_div(priv, (pll & 0xff) / 0x20, 0x137120, 0x137140);
91 break;
92 default:
93 return 0;
94 }
95
96 if (P == 0)
97 P = 1;
98
99 sclk = (sclk * N) + (((u16)(fN + 4096) * sclk) >> 13);
100 return sclk / (M * P);
101}
102
103static u32
104read_div(struct nve0_clock_priv *priv, int doff, u32 dsrc, u32 dctl)
105{
106 u32 ssrc = nv_rd32(priv, dsrc + (doff * 4));
107 u32 sctl = nv_rd32(priv, dctl + (doff * 4));
108
109 switch (ssrc & 0x00000003) {
110 case 0:
111 if ((ssrc & 0x00030000) != 0x00030000)
112 return nv_device(priv)->crystal;
113 return 108000;
114 case 2:
115 return 100000;
116 case 3:
117 if (sctl & 0x80000000) {
118 u32 sclk = read_vco(priv, dsrc + (doff * 4));
119 u32 sdiv = (sctl & 0x0000003f) + 2;
120 return (sclk * 2) / sdiv;
121 }
122
123 return read_vco(priv, dsrc + (doff * 4));
124 default:
125 return 0;
126 }
127}
128
129static u32
130read_mem(struct nve0_clock_priv *priv)
131{
132 switch (nv_rd32(priv, 0x1373f4) & 0x0000000f) {
133 case 1: return read_pll(priv, 0x132020);
134 case 2: return read_pll(priv, 0x132000);
135 default:
136 return 0;
137 }
138}
139
140static u32
141read_clk(struct nve0_clock_priv *priv, int clk)
142{
143 u32 sctl = nv_rd32(priv, 0x137250 + (clk * 4));
144 u32 sclk, sdiv;
145
146 if (clk < 7) {
147 u32 ssel = nv_rd32(priv, 0x137100);
148 if (ssel & (1 << clk)) {
149 sclk = read_pll(priv, 0x137000 + (clk * 0x20));
150 sdiv = 1;
151 } else {
152 sclk = read_div(priv, clk, 0x137160, 0x1371d0);
153 sdiv = 0;
154 }
155 } else {
156 u32 ssrc = nv_rd32(priv, 0x137160 + (clk * 0x04));
157 if ((ssrc & 0x00000003) == 0x00000003) {
158 sclk = read_div(priv, clk, 0x137160, 0x1371d0);
159 if (ssrc & 0x00000100) {
160 if (ssrc & 0x40000000)
161 sclk = read_pll(priv, 0x1370e0);
162 sdiv = 1;
163 } else {
164 sdiv = 0;
165 }
166 } else {
167 sclk = read_div(priv, clk, 0x137160, 0x1371d0);
168 sdiv = 0;
169 }
170 }
171
172 if (sctl & 0x80000000) {
173 if (sdiv)
174 sdiv = ((sctl & 0x00003f00) >> 8) + 2;
175 else
176 sdiv = ((sctl & 0x0000003f) >> 0) + 2;
177 return (sclk * 2) / sdiv;
178 }
179
180 return sclk;
181}
182
183static int
184nve0_clock_read(struct nouveau_clock *clk, enum nv_clk_src src)
185{
186 struct nouveau_device *device = nv_device(clk);
187 struct nve0_clock_priv *priv = (void *)clk;
188
189 switch (src) {
190 case nv_clk_src_crystal:
191 return device->crystal;
192 case nv_clk_src_href:
193 return 100000;
194 case nv_clk_src_mem:
195 return read_mem(priv);
196 case nv_clk_src_gpc:
197 return read_clk(priv, 0x00);
198 case nv_clk_src_rop:
199 return read_clk(priv, 0x01);
200 case nv_clk_src_hubk07:
201 return read_clk(priv, 0x02);
202 case nv_clk_src_hubk06:
203 return read_clk(priv, 0x07);
204 case nv_clk_src_hubk01:
205 return read_clk(priv, 0x08);
206 case nv_clk_src_daemon:
207 return read_clk(priv, 0x0c);
208 case nv_clk_src_vdec:
209 return read_clk(priv, 0x0e);
210 default:
211 nv_error(clk, "invalid clock source %d\n", src);
212 return -EINVAL;
213 }
214}
215
216static u32
217calc_div(struct nve0_clock_priv *priv, int clk, u32 ref, u32 freq, u32 *ddiv)
218{
219 u32 div = min((ref * 2) / freq, (u32)65);
220 if (div < 2)
221 div = 2;
222
223 *ddiv = div - 2;
224 return (ref * 2) / div;
225}
226
227static u32
228calc_src(struct nve0_clock_priv *priv, int clk, u32 freq, u32 *dsrc, u32 *ddiv)
229{
230 u32 sclk;
231
232 /* use one of the fixed frequencies if possible */
233 *ddiv = 0x00000000;
234 switch (freq) {
235 case 27000:
236 case 108000:
237 *dsrc = 0x00000000;
238 if (freq == 108000)
239 *dsrc |= 0x00030000;
240 return freq;
241 case 100000:
242 *dsrc = 0x00000002;
243 return freq;
244 default:
245 *dsrc = 0x00000003;
246 break;
247 }
248
249 /* otherwise, calculate the closest divider */
250 sclk = read_vco(priv, 0x137160 + (clk * 4));
251 if (clk < 7)
252 sclk = calc_div(priv, clk, sclk, freq, ddiv);
253 return sclk;
254}
255
256static u32
257calc_pll(struct nve0_clock_priv *priv, int clk, u32 freq, u32 *coef)
258{
259 struct nouveau_bios *bios = nouveau_bios(priv);
260 struct nvbios_pll limits;
261 int N, M, P, ret;
262
263 ret = nvbios_pll_parse(bios, 0x137000 + (clk * 0x20), &limits);
264 if (ret)
265 return 0;
266
267 limits.refclk = read_div(priv, clk, 0x137120, 0x137140);
268 if (!limits.refclk)
269 return 0;
270
271 ret = nva3_pll_calc(nv_subdev(priv), &limits, freq, &N, NULL, &M, &P);
272 if (ret <= 0)
273 return 0;
274
275 *coef = (P << 16) | (N << 8) | M;
276 return ret;
277}
278
279static int
280calc_clk(struct nve0_clock_priv *priv,
281 struct nouveau_cstate *cstate, int clk, int dom)
282{
283 struct nve0_clock_info *info = &priv->eng[clk];
284 u32 freq = cstate->domain[dom];
285 u32 src0, div0, div1D, div1P = 0;
286 u32 clk0, clk1 = 0;
287
288 /* invalid clock domain */
289 if (!freq)
290 return 0;
291
292 /* first possible path, using only dividers */
293 clk0 = calc_src(priv, clk, freq, &src0, &div0);
294 clk0 = calc_div(priv, clk, clk0, freq, &div1D);
295
296 /* see if we can get any closer using PLLs */
297 if (clk0 != freq && (0x0000ff87 & (1 << clk))) {
298 if (clk <= 7)
299 clk1 = calc_pll(priv, clk, freq, &info->coef);
300 else
301 clk1 = cstate->domain[nv_clk_src_hubk06];
302 clk1 = calc_div(priv, clk, clk1, freq, &div1P);
303 }
304
305 /* select the method which gets closest to target freq */
306 if (abs((int)freq - clk0) <= abs((int)freq - clk1)) {
307 info->dsrc = src0;
308 if (div0) {
309 info->ddiv |= 0x80000000;
310 info->ddiv |= div0 << 8;
311 info->ddiv |= div0;
312 }
313 if (div1D) {
314 info->mdiv |= 0x80000000;
315 info->mdiv |= div1D;
316 }
317 info->ssel = 0;
318 info->freq = clk0;
319 } else {
320 if (div1P) {
321 info->mdiv |= 0x80000000;
322 info->mdiv |= div1P << 8;
323 }
324 info->ssel = (1 << clk);
325 info->dsrc = 0x40000100;
326 info->freq = clk1;
327 }
328
329 return 0;
330}
331
332static int
333nve0_clock_calc(struct nouveau_clock *clk, struct nouveau_cstate *cstate)
334{
335 struct nve0_clock_priv *priv = (void *)clk;
336 int ret;
337
338 if ((ret = calc_clk(priv, cstate, 0x00, nv_clk_src_gpc)) ||
339 (ret = calc_clk(priv, cstate, 0x01, nv_clk_src_rop)) ||
340 (ret = calc_clk(priv, cstate, 0x02, nv_clk_src_hubk07)) ||
341 (ret = calc_clk(priv, cstate, 0x07, nv_clk_src_hubk06)) ||
342 (ret = calc_clk(priv, cstate, 0x08, nv_clk_src_hubk01)) ||
343 (ret = calc_clk(priv, cstate, 0x0c, nv_clk_src_daemon)) ||
344 (ret = calc_clk(priv, cstate, 0x0e, nv_clk_src_vdec)))
345 return ret;
346
347 return 0;
348}
349
350static void
351nve0_clock_prog_0(struct nve0_clock_priv *priv, int clk)
352{
353 struct nve0_clock_info *info = &priv->eng[clk];
354 if (!info->ssel) {
355 nv_mask(priv, 0x1371d0 + (clk * 0x04), 0x80003f3f, info->ddiv);
356 nv_wr32(priv, 0x137160 + (clk * 0x04), info->dsrc);
357 }
358}
359
360static void
361nve0_clock_prog_1_0(struct nve0_clock_priv *priv, int clk)
362{
363 nv_mask(priv, 0x137100, (1 << clk), 0x00000000);
364 nv_wait(priv, 0x137100, (1 << clk), 0x00000000);
365}
366
367static void
368nve0_clock_prog_1_1(struct nve0_clock_priv *priv, int clk)
369{
370 nv_mask(priv, 0x137160 + (clk * 0x04), 0x00000100, 0x00000000);
371}
372
373static void
374nve0_clock_prog_2(struct nve0_clock_priv *priv, int clk)
375{
376 struct nve0_clock_info *info = &priv->eng[clk];
377 const u32 addr = 0x137000 + (clk * 0x20);
378 nv_mask(priv, addr + 0x00, 0x00000004, 0x00000000);
379 nv_mask(priv, addr + 0x00, 0x00000001, 0x00000000);
380 if (info->coef) {
381 nv_wr32(priv, addr + 0x04, info->coef);
382 nv_mask(priv, addr + 0x00, 0x00000001, 0x00000001);
383 nv_wait(priv, addr + 0x00, 0x00020000, 0x00020000);
384 nv_mask(priv, addr + 0x00, 0x00020004, 0x00000004);
385 }
386}
387
388static void
389nve0_clock_prog_3(struct nve0_clock_priv *priv, int clk)
390{
391 struct nve0_clock_info *info = &priv->eng[clk];
392 nv_mask(priv, 0x137250 + (clk * 0x04), 0x00003f3f, info->mdiv);
393}
394
395static void
396nve0_clock_prog_4_0(struct nve0_clock_priv *priv, int clk)
397{
398 struct nve0_clock_info *info = &priv->eng[clk];
399 if (info->ssel) {
400 nv_mask(priv, 0x137100, (1 << clk), info->ssel);
401 nv_wait(priv, 0x137100, (1 << clk), info->ssel);
402 }
403}
404
405static void
406nve0_clock_prog_4_1(struct nve0_clock_priv *priv, int clk)
407{
408 struct nve0_clock_info *info = &priv->eng[clk];
409 if (info->ssel) {
410 nv_mask(priv, 0x137160 + (clk * 0x04), 0x40000000, 0x40000000);
411 nv_mask(priv, 0x137160 + (clk * 0x04), 0x00000100, 0x00000100);
412 }
413}
414
415static int
416nve0_clock_prog(struct nouveau_clock *clk)
417{
418 struct nve0_clock_priv *priv = (void *)clk;
419 struct {
420 u32 mask;
421 void (*exec)(struct nve0_clock_priv *, int);
422 } stage[] = {
423 { 0x007f, nve0_clock_prog_0 }, /* div programming */
424 { 0x007f, nve0_clock_prog_1_0 }, /* select div mode */
425 { 0xff80, nve0_clock_prog_1_1 },
426 { 0x00ff, nve0_clock_prog_2 }, /* (maybe) program pll */
427 { 0xff80, nve0_clock_prog_3 }, /* final divider */
428 { 0x007f, nve0_clock_prog_4_0 }, /* (maybe) select pll mode */
429 { 0xff80, nve0_clock_prog_4_1 },
430 };
431 int i, j;
432
433 for (i = 0; i < ARRAY_SIZE(stage); i++) {
434 for (j = 0; j < ARRAY_SIZE(priv->eng); j++) {
435 if (!(stage[i].mask & (1 << j)))
436 continue;
437 if (!priv->eng[j].freq)
438 continue;
439 stage[i].exec(priv, j);
440 }
441 }
442
443 return 0;
444}
445
446static void
447nve0_clock_tidy(struct nouveau_clock *clk)
448{
449 struct nve0_clock_priv *priv = (void *)clk;
450 memset(priv->eng, 0x00, sizeof(priv->eng));
451}
452
453static struct nouveau_clocks
454nve0_domain[] = {
455 { nv_clk_src_crystal, 0xff },
456 { nv_clk_src_href , 0xff },
457 { nv_clk_src_gpc , 0x00, NVKM_CLK_DOM_FLAG_CORE, "core", 2000 },
458 { nv_clk_src_hubk07 , 0x01, NVKM_CLK_DOM_FLAG_CORE },
459 { nv_clk_src_rop , 0x02, NVKM_CLK_DOM_FLAG_CORE },
460 { nv_clk_src_mem , 0x03, 0, "memory", 1000 },
461 { nv_clk_src_hubk06 , 0x04, NVKM_CLK_DOM_FLAG_CORE },
462 { nv_clk_src_hubk01 , 0x05 },
463 { nv_clk_src_vdec , 0x06 },
464 { nv_clk_src_daemon , 0x07 },
465 { nv_clk_src_max }
466};
467
468static int
469nve0_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
470 struct nouveau_oclass *oclass, void *data, u32 size,
471 struct nouveau_object **pobject)
472{
473 struct nve0_clock_priv *priv;
474 int ret;
475
476 ret = nouveau_clock_create(parent, engine, oclass, nve0_domain, &priv);
477 *pobject = nv_object(priv);
478 if (ret)
479 return ret;
480
481 priv->base.read = nve0_clock_read;
482 priv->base.calc = nve0_clock_calc;
483 priv->base.prog = nve0_clock_prog;
484 priv->base.tidy = nve0_clock_tidy;
485 return 0;
486}
487
488struct nouveau_oclass
489nve0_clock_oclass = {
490 .handle = NV_SUBDEV(CLOCK, 0xe0),
491 .ofuncs = &(struct nouveau_ofuncs) {
492 .ctor = nve0_clock_ctor,
493 .dtor = _nouveau_clock_dtor,
494 .init = _nouveau_clock_init,
495 .fini = _nouveau_clock_fini,
496 },
497};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/pllnv04.c b/drivers/gpu/drm/nouveau/core/subdev/clock/pllnv04.c
index cf1ed0dc9bc9..b47d543ab2e3 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/pllnv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/pllnv04.c
@@ -38,7 +38,7 @@ getMNP_single(struct nouveau_subdev *subdev, struct nvbios_pll *info, int clk,
38 * "clk" parameter in kHz 38 * "clk" parameter in kHz
39 * returns calculated clock 39 * returns calculated clock
40 */ 40 */
41 int cv = nouveau_bios(subdev)->version.chip; 41 struct nouveau_bios *bios = nouveau_bios(subdev);
42 int minvco = info->vco1.min_freq, maxvco = info->vco1.max_freq; 42 int minvco = info->vco1.min_freq, maxvco = info->vco1.max_freq;
43 int minM = info->vco1.min_m, maxM = info->vco1.max_m; 43 int minM = info->vco1.min_m, maxM = info->vco1.max_m;
44 int minN = info->vco1.min_n, maxN = info->vco1.max_n; 44 int minN = info->vco1.min_n, maxN = info->vco1.max_n;
@@ -54,18 +54,21 @@ getMNP_single(struct nouveau_subdev *subdev, struct nvbios_pll *info, int clk,
54 54
55 /* this division verified for nv20, nv18, nv28 (Haiku), and nv34 */ 55 /* this division verified for nv20, nv18, nv28 (Haiku), and nv34 */
56 /* possibly correlated with introduction of 27MHz crystal */ 56 /* possibly correlated with introduction of 27MHz crystal */
57 if (cv < 0x17 || cv == 0x1a || cv == 0x20) { 57 if (bios->version.major < 0x60) {
58 if (clk > 250000) 58 int cv = bios->version.chip;
59 maxM = 6; 59 if (cv < 0x17 || cv == 0x1a || cv == 0x20) {
60 if (clk > 340000) 60 if (clk > 250000)
61 maxM = 2; 61 maxM = 6;
62 } else if (cv < 0x40) { 62 if (clk > 340000)
63 if (clk > 150000) 63 maxM = 2;
64 maxM = 6; 64 } else if (cv < 0x40) {
65 if (clk > 200000) 65 if (clk > 150000)
66 maxM = 4; 66 maxM = 6;
67 if (clk > 340000) 67 if (clk > 200000)
68 maxM = 2; 68 maxM = 4;
69 if (clk > 340000)
70 maxM = 2;
71 }
69 } 72 }
70 73
71 P = 1 << maxP; 74 P = 1 << maxP;
@@ -227,10 +230,12 @@ nv04_pll_calc(struct nouveau_subdev *subdev, struct nvbios_pll *info, u32 freq,
227{ 230{
228 int ret; 231 int ret;
229 232
230 if (!info->vco2.max_freq) { 233 if (!info->vco2.max_freq || !N2) {
231 ret = getMNP_single(subdev, info, freq, N1, M1, P); 234 ret = getMNP_single(subdev, info, freq, N1, M1, P);
232 *N2 = 1; 235 if (N2) {
233 *M2 = 1; 236 *N2 = 1;
237 *M2 = 1;
238 }
234 } else { 239 } else {
235 ret = getMNP_double(subdev, info, freq, N1, M1, N2, M2, P); 240 ret = getMNP_double(subdev, info, freq, N1, M1, N2, M2, P);
236 } 241 }
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/pllnva3.c b/drivers/gpu/drm/nouveau/core/subdev/clock/pllnva3.c
index 2fe1f712eefa..8eca457c2814 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/pllnva3.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/pllnva3.c
@@ -45,6 +45,7 @@ nva3_pll_calc(struct nouveau_subdev *subdev, struct nvbios_pll *info,
45 lM = max(lM, (int)info->vco1.min_m); 45 lM = max(lM, (int)info->vco1.min_m);
46 hM = (info->refclk + info->vco1.min_inputfreq) / info->vco1.min_inputfreq; 46 hM = (info->refclk + info->vco1.min_inputfreq) / info->vco1.min_inputfreq;
47 hM = min(hM, (int)info->vco1.max_m); 47 hM = min(hM, (int)info->vco1.max_m);
48 lM = min(lM, hM);
48 49
49 for (M = lM; M <= hM; M++) { 50 for (M = lM; M <= hM; M++) {
50 u32 tmp = freq * *P * M; 51 u32 tmp = freq * *P * M;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/seq.h b/drivers/gpu/drm/nouveau/core/subdev/clock/seq.h
new file mode 100644
index 000000000000..fb33f06ebd59
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/seq.h
@@ -0,0 +1,17 @@
1#ifndef __NVKM_CLK_SEQ_H__
2#define __NVKM_CLK_SEQ_H__
3
4#include <subdev/bus.h>
5#include <subdev/bus/hwsq.h>
6
7#define clk_init(s,p) hwsq_init(&(s)->base, (p))
8#define clk_exec(s,e) hwsq_exec(&(s)->base, (e))
9#define clk_have(s,r) ((s)->r_##r.addr != 0x000000)
10#define clk_rd32(s,r) hwsq_rd32(&(s)->base, &(s)->r_##r)
11#define clk_wr32(s,r,d) hwsq_wr32(&(s)->base, &(s)->r_##r, (d))
12#define clk_mask(s,r,m,d) hwsq_mask(&(s)->base, &(s)->r_##r, (m), (d))
13#define clk_setf(s,f,d) hwsq_setf(&(s)->base, (f), (d))
14#define clk_wait(s,f,d) hwsq_wait(&(s)->base, (f), (d))
15#define clk_nsec(s,n) hwsq_nsec(&(s)->base, (n))
16
17#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv04.c
index b22357d9b821..27c8235f1a85 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv04.c
@@ -168,7 +168,8 @@ setPLL_single(struct nouveau_devinit *devinit, u32 reg,
168 /* downclock -- write new NM first */ 168 /* downclock -- write new NM first */
169 nv_wr32(devinit, reg, (oldpll & 0xffff0000) | pv->NM1); 169 nv_wr32(devinit, reg, (oldpll & 0xffff0000) | pv->NM1);
170 170
171 if (chip_version < 0x17 && chip_version != 0x11) 171 if ((chip_version < 0x17 || chip_version == 0x1a) &&
172 chip_version != 0x11)
172 /* wait a bit on older chips */ 173 /* wait a bit on older chips */
173 msleep(64); 174 msleep(64);
174 nv_rd32(devinit, reg); 175 nv_rd32(devinit, reg);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv10.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv10.c
index 463b08fa0968..8d274dba1ef1 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv10.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv10.c
@@ -38,12 +38,18 @@ static void
38nv10_devinit_meminit(struct nouveau_devinit *devinit) 38nv10_devinit_meminit(struct nouveau_devinit *devinit)
39{ 39{
40 struct nv10_devinit_priv *priv = (void *)devinit; 40 struct nv10_devinit_priv *priv = (void *)devinit;
41 const int mem_width[] = { 0x10, 0x00, 0x20 }; 41 static const int mem_width[] = { 0x10, 0x00, 0x20 };
42 const int mem_width_count = nv_device(priv)->chipset >= 0x17 ? 3 : 2; 42 int mem_width_count;
43 uint32_t patt = 0xdeadbeef; 43 uint32_t patt = 0xdeadbeef;
44 struct io_mapping *fb; 44 struct io_mapping *fb;
45 int i, j, k; 45 int i, j, k;
46 46
47 if (nv_device(priv)->card_type >= NV_11 &&
48 nv_device(priv)->chipset >= 0x17)
49 mem_width_count = 3;
50 else
51 mem_width_count = 2;
52
47 /* Map the framebuffer aperture */ 53 /* Map the framebuffer aperture */
48 fb = fbmem_init(nv_device(priv)->pdev); 54 fb = fbmem_init(nv_device(priv)->pdev);
49 if (!fb) { 55 if (!fb) {
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/base.c b/drivers/gpu/drm/nouveau/core/subdev/fb/base.c
index 821cd75b86a3..f009d8a39d9d 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/base.c
@@ -22,9 +22,10 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include "subdev/fb.h" 25#include <subdev/bios.h>
26#include "subdev/bios.h" 26#include <subdev/bios/bit.h>
27#include "subdev/bios/bit.h" 27
28#include "priv.h"
28 29
29int 30int
30nouveau_fb_bios_memtype(struct nouveau_bios *bios) 31nouveau_fb_bios_memtype(struct nouveau_bios *bios)
@@ -106,9 +107,9 @@ _nouveau_fb_dtor(struct nouveau_object *object)
106 107
107int 108int
108nouveau_fb_create_(struct nouveau_object *parent, struct nouveau_object *engine, 109nouveau_fb_create_(struct nouveau_object *parent, struct nouveau_object *engine,
109 struct nouveau_oclass *oclass, struct nouveau_oclass *ramcls, 110 struct nouveau_oclass *oclass, int length, void **pobject)
110 int length, void **pobject)
111{ 111{
112 struct nouveau_fb_impl *impl = (void *)oclass;
112 static const char *name[] = { 113 static const char *name[] = {
113 [NV_MEM_TYPE_UNKNOWN] = "unknown", 114 [NV_MEM_TYPE_UNKNOWN] = "unknown",
114 [NV_MEM_TYPE_STOLEN ] = "stolen system memory", 115 [NV_MEM_TYPE_STOLEN ] = "stolen system memory",
@@ -132,8 +133,10 @@ nouveau_fb_create_(struct nouveau_object *parent, struct nouveau_object *engine,
132 if (ret) 133 if (ret)
133 return ret; 134 return ret;
134 135
136 pfb->memtype_valid = impl->memtype;
137
135 ret = nouveau_object_ctor(nv_object(pfb), nv_object(pfb), 138 ret = nouveau_object_ctor(nv_object(pfb), nv_object(pfb),
136 ramcls, NULL, 0, &ram); 139 impl->ram, NULL, 0, &ram);
137 if (ret) { 140 if (ret) {
138 nv_fatal(pfb, "error detecting memory configuration!!\n"); 141 nv_fatal(pfb, "error detecting memory configuration!!\n");
139 return ret; 142 return ret;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/gddr5.c b/drivers/gpu/drm/nouveau/core/subdev/fb/gddr5.c
new file mode 100644
index 000000000000..34f9605ffee6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/gddr5.c
@@ -0,0 +1,96 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */
24
25#include <subdev/bios.h>
26#include "priv.h"
27
28int
29nouveau_gddr5_calc(struct nouveau_ram *ram)
30{
31 struct nouveau_bios *bios = nouveau_bios(ram);
32 int pd, lf, xd, vh, vr, vo;
33 int WL, CL, WR, at, dt, ds;
34 int rq = ram->freq < 1000000; /* XXX */
35
36 switch (!!ram->ramcfg.data * ram->ramcfg.version) {
37 case 0x11:
38 pd = (nv_ro08(bios, ram->ramcfg.data + 0x01) & 0x80) >> 7;
39 lf = (nv_ro08(bios, ram->ramcfg.data + 0x01) & 0x40) >> 6;
40 xd = !(nv_ro08(bios, ram->ramcfg.data + 0x01) & 0x20);
41 vh = (nv_ro08(bios, ram->ramcfg.data + 0x02) & 0x10) >> 4;
42 vr = (nv_ro08(bios, ram->ramcfg.data + 0x02) & 0x04) >> 2;
43 vo = nv_ro08(bios, ram->ramcfg.data + 0x06) & 0xff;
44 break;
45 default:
46 return -ENOSYS;
47 }
48
49 switch (!!ram->timing.data * ram->timing.version) {
50 case 0x20:
51 WL = (nv_ro16(bios, ram->timing.data + 0x04) & 0x0f80) >> 7;
52 CL = nv_ro08(bios, ram->timing.data + 0x04) & 0x1f;
53 WR = nv_ro08(bios, ram->timing.data + 0x0a) & 0x7f;
54 at = (nv_ro08(bios, ram->timing.data + 0x2e) & 0xc0) >> 6;
55 dt = nv_ro08(bios, ram->timing.data + 0x2e) & 0x03;
56 ds = nv_ro08(bios, ram->timing.data + 0x2f) & 0x03;
57 break;
58 default:
59 return -ENOSYS;
60 }
61
62 if (WL < 1 || WL > 7 || CL < 5 || CL > 36 || WR < 4 || WR > 35)
63 return -EINVAL;
64 CL -= 5;
65 WR -= 4;
66
67 ram->mr[0] &= ~0xf7f;
68 ram->mr[0] |= (WR & 0x0f) << 8;
69 ram->mr[0] |= (CL & 0x0f) << 3;
70 ram->mr[0] |= (WL & 0x07) << 0;
71
72 ram->mr[1] &= ~0x0bf;
73 ram->mr[1] |= (xd & 0x01) << 7;
74 ram->mr[1] |= (at & 0x03) << 4;
75 ram->mr[1] |= (dt & 0x03) << 2;
76 ram->mr[1] |= (ds & 0x03) << 0;
77
78 ram->mr[3] &= ~0x020;
79 ram->mr[3] |= (rq & 0x01) << 5;
80
81 if (!vo)
82 vo = (ram->mr[6] & 0xff0) >> 4;
83 if (ram->mr[6] & 0x001)
84 pd = 1; /* binary driver does this.. bug? */
85 ram->mr[6] &= ~0xff1;
86 ram->mr[6] |= (vo & 0xff) << 4;
87 ram->mr[6] |= (pd & 0x01) << 0;
88
89 if (!(ram->mr[7] & 0x100))
90 vr = 0; /* binary driver does this.. bug? */
91 ram->mr[7] &= ~0x188;
92 ram->mr[7] |= (vr & 0x01) << 8;
93 ram->mr[7] |= (vh & 0x01) << 7;
94 ram->mr[7] |= (lf & 0x01) << 3;
95 return 0;
96}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv04.c
index 1f103c7b89fa..8309fe33fe84 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv04.c
@@ -22,14 +22,10 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include "priv.h" 25#include "nv04.h"
26 26
27#define NV04_PFB_CFG0 0x00100200 27#define NV04_PFB_CFG0 0x00100200
28 28
29struct nv04_fb_priv {
30 struct nouveau_fb base;
31};
32
33bool 29bool
34nv04_fb_memtype_valid(struct nouveau_fb *pfb, u32 tile_flags) 30nv04_fb_memtype_valid(struct nouveau_fb *pfb, u32 tile_flags)
35{ 31{
@@ -57,30 +53,37 @@ nv04_fb_init(struct nouveau_object *object)
57 return 0; 53 return 0;
58} 54}
59 55
60static int 56int
61nv04_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine, 57nv04_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
62 struct nouveau_oclass *oclass, void *data, u32 size, 58 struct nouveau_oclass *oclass, void *data, u32 size,
63 struct nouveau_object **pobject) 59 struct nouveau_object **pobject)
64{ 60{
61 struct nv04_fb_impl *impl = (void *)oclass;
65 struct nv04_fb_priv *priv; 62 struct nv04_fb_priv *priv;
66 int ret; 63 int ret;
67 64
68 ret = nouveau_fb_create(parent, engine, oclass, &nv04_ram_oclass, &priv); 65 ret = nouveau_fb_create(parent, engine, oclass, &priv);
69 *pobject = nv_object(priv); 66 *pobject = nv_object(priv);
70 if (ret) 67 if (ret)
71 return ret; 68 return ret;
72 69
73 priv->base.memtype_valid = nv04_fb_memtype_valid; 70 priv->base.tile.regions = impl->tile.regions;
71 priv->base.tile.init = impl->tile.init;
72 priv->base.tile.comp = impl->tile.comp;
73 priv->base.tile.fini = impl->tile.fini;
74 priv->base.tile.prog = impl->tile.prog;
74 return 0; 75 return 0;
75} 76}
76 77
77struct nouveau_oclass 78struct nouveau_oclass *
78nv04_fb_oclass = { 79nv04_fb_oclass = &(struct nv04_fb_impl) {
79 .handle = NV_SUBDEV(FB, 0x04), 80 .base.base.handle = NV_SUBDEV(FB, 0x04),
80 .ofuncs = &(struct nouveau_ofuncs) { 81 .base.base.ofuncs = &(struct nouveau_ofuncs) {
81 .ctor = nv04_fb_ctor, 82 .ctor = nv04_fb_ctor,
82 .dtor = _nouveau_fb_dtor, 83 .dtor = _nouveau_fb_dtor,
83 .init = nv04_fb_init, 84 .init = nv04_fb_init,
84 .fini = _nouveau_fb_fini, 85 .fini = _nouveau_fb_fini,
85 }, 86 },
86}; 87 .base.memtype = nv04_fb_memtype_valid,
88 .base.ram = &nv04_ram_oclass,
89}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv04.h b/drivers/gpu/drm/nouveau/core/subdev/fb/nv04.h
new file mode 100644
index 000000000000..06ce71f87a74
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv04.h
@@ -0,0 +1,55 @@
1#ifndef __NVKM_FB_NV04_H__
2#define __NVKM_FB_NV04_H__
3
4#include "priv.h"
5
6struct nv04_fb_priv {
7 struct nouveau_fb base;
8};
9
10int nv04_fb_ctor(struct nouveau_object *, struct nouveau_object *,
11 struct nouveau_oclass *, void *, u32,
12 struct nouveau_object **);
13
14struct nv04_fb_impl {
15 struct nouveau_fb_impl base;
16 struct {
17 int regions;
18 void (*init)(struct nouveau_fb *, int i, u32 addr, u32 size,
19 u32 pitch, u32 flags, struct nouveau_fb_tile *);
20 void (*comp)(struct nouveau_fb *, int i, u32 size, u32 flags,
21 struct nouveau_fb_tile *);
22 void (*fini)(struct nouveau_fb *, int i,
23 struct nouveau_fb_tile *);
24 void (*prog)(struct nouveau_fb *, int i,
25 struct nouveau_fb_tile *);
26 } tile;
27};
28
29void nv10_fb_tile_init(struct nouveau_fb *, int i, u32 addr, u32 size,
30 u32 pitch, u32 flags, struct nouveau_fb_tile *);
31void nv10_fb_tile_fini(struct nouveau_fb *, int i, struct nouveau_fb_tile *);
32void nv10_fb_tile_prog(struct nouveau_fb *, int, struct nouveau_fb_tile *);
33
34void nv20_fb_tile_init(struct nouveau_fb *, int i, u32 addr, u32 size,
35 u32 pitch, u32 flags, struct nouveau_fb_tile *);
36void nv20_fb_tile_fini(struct nouveau_fb *, int i, struct nouveau_fb_tile *);
37void nv20_fb_tile_prog(struct nouveau_fb *, int, struct nouveau_fb_tile *);
38
39int nv30_fb_init(struct nouveau_object *);
40void nv30_fb_tile_init(struct nouveau_fb *, int i, u32 addr, u32 size,
41 u32 pitch, u32 flags, struct nouveau_fb_tile *);
42
43void nv40_fb_tile_comp(struct nouveau_fb *, int i, u32 size, u32 flags,
44 struct nouveau_fb_tile *);
45
46int nv41_fb_init(struct nouveau_object *);
47void nv41_fb_tile_prog(struct nouveau_fb *, int, struct nouveau_fb_tile *);
48
49int nv44_fb_init(struct nouveau_object *);
50void nv44_fb_tile_prog(struct nouveau_fb *, int, struct nouveau_fb_tile *);
51
52void nv46_fb_tile_init(struct nouveau_fb *, int i, u32 addr, u32 size,
53 u32 pitch, u32 flags, struct nouveau_fb_tile *);
54
55#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv10.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv10.c
index be069b5306b6..ffb7ec6d97aa 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv10.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv10.c
@@ -24,11 +24,7 @@
24 * 24 *
25 */ 25 */
26 26
27#include "priv.h" 27#include "nv04.h"
28
29struct nv10_fb_priv {
30 struct nouveau_fb base;
31};
32 28
33void 29void
34nv10_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch, 30nv10_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
@@ -57,34 +53,19 @@ nv10_fb_tile_prog(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
57 nv_rd32(pfb, 0x100240 + (i * 0x10)); 53 nv_rd32(pfb, 0x100240 + (i * 0x10));
58} 54}
59 55
60static int 56struct nouveau_oclass *
61nv10_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine, 57nv10_fb_oclass = &(struct nv04_fb_impl) {
62 struct nouveau_oclass *oclass, void *data, u32 size, 58 .base.base.handle = NV_SUBDEV(FB, 0x10),
63 struct nouveau_object **pobject) 59 .base.base.ofuncs = &(struct nouveau_ofuncs) {
64{ 60 .ctor = nv04_fb_ctor,
65 struct nv10_fb_priv *priv;
66 int ret;
67
68 ret = nouveau_fb_create(parent, engine, oclass, &nv10_ram_oclass, &priv);
69 *pobject = nv_object(priv);
70 if (ret)
71 return ret;
72
73 priv->base.memtype_valid = nv04_fb_memtype_valid;
74 priv->base.tile.regions = 8;
75 priv->base.tile.init = nv10_fb_tile_init;
76 priv->base.tile.fini = nv10_fb_tile_fini;
77 priv->base.tile.prog = nv10_fb_tile_prog;
78 return 0;
79}
80
81struct nouveau_oclass
82nv10_fb_oclass = {
83 .handle = NV_SUBDEV(FB, 0x10),
84 .ofuncs = &(struct nouveau_ofuncs) {
85 .ctor = nv10_fb_ctor,
86 .dtor = _nouveau_fb_dtor, 61 .dtor = _nouveau_fb_dtor,
87 .init = _nouveau_fb_init, 62 .init = _nouveau_fb_init,
88 .fini = _nouveau_fb_fini, 63 .fini = _nouveau_fb_fini,
89 }, 64 },
90}; 65 .base.memtype = nv04_fb_memtype_valid,
66 .base.ram = &nv10_ram_oclass,
67 .tile.regions = 8,
68 .tile.init = nv10_fb_tile_init,
69 .tile.fini = nv10_fb_tile_fini,
70 .tile.prog = nv10_fb_tile_prog,
71}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c
index 57a2af0079b3..9159a5ccee93 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c
@@ -24,40 +24,21 @@
24 * 24 *
25 */ 25 */
26 26
27#include "priv.h" 27#include "nv04.h"
28 28
29struct nv1a_fb_priv { 29struct nouveau_oclass *
30 struct nouveau_fb base; 30nv1a_fb_oclass = &(struct nv04_fb_impl) {
31}; 31 .base.base.handle = NV_SUBDEV(FB, 0x1a),
32 32 .base.base.ofuncs = &(struct nouveau_ofuncs) {
33static int 33 .ctor = nv04_fb_ctor,
34nv1a_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
35 struct nouveau_oclass *oclass, void *data, u32 size,
36 struct nouveau_object **pobject)
37{
38 struct nv1a_fb_priv *priv;
39 int ret;
40
41 ret = nouveau_fb_create(parent, engine, oclass, &nv1a_ram_oclass, &priv);
42 *pobject = nv_object(priv);
43 if (ret)
44 return ret;
45
46 priv->base.memtype_valid = nv04_fb_memtype_valid;
47 priv->base.tile.regions = 8;
48 priv->base.tile.init = nv10_fb_tile_init;
49 priv->base.tile.fini = nv10_fb_tile_fini;
50 priv->base.tile.prog = nv10_fb_tile_prog;
51 return 0;
52}
53
54struct nouveau_oclass
55nv1a_fb_oclass = {
56 .handle = NV_SUBDEV(FB, 0x1a),
57 .ofuncs = &(struct nouveau_ofuncs) {
58 .ctor = nv1a_fb_ctor,
59 .dtor = _nouveau_fb_dtor, 34 .dtor = _nouveau_fb_dtor,
60 .init = _nouveau_fb_init, 35 .init = _nouveau_fb_init,
61 .fini = _nouveau_fb_fini, 36 .fini = _nouveau_fb_fini,
62 }, 37 },
63}; 38 .base.memtype = nv04_fb_memtype_valid,
39 .base.ram = &nv10_ram_oclass,
40 .tile.regions = 8,
41 .tile.init = nv10_fb_tile_init,
42 .tile.fini = nv10_fb_tile_fini,
43 .tile.prog = nv10_fb_tile_prog,
44}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv20.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv20.c
index b18c4e63bb47..f003c1b1893f 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv20.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv20.c
@@ -24,11 +24,7 @@
24 * 24 *
25 */ 25 */
26 26
27#include "priv.h" 27#include "nv04.h"
28
29struct nv20_fb_priv {
30 struct nouveau_fb base;
31};
32 28
33void 29void
34nv20_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch, 30nv20_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
@@ -80,35 +76,20 @@ nv20_fb_tile_prog(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
80 nv_wr32(pfb, 0x100300 + (i * 0x04), tile->zcomp); 76 nv_wr32(pfb, 0x100300 + (i * 0x04), tile->zcomp);
81} 77}
82 78
83static int 79struct nouveau_oclass *
84nv20_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine, 80nv20_fb_oclass = &(struct nv04_fb_impl) {
85 struct nouveau_oclass *oclass, void *data, u32 size, 81 .base.base.handle = NV_SUBDEV(FB, 0x20),
86 struct nouveau_object **pobject) 82 .base.base.ofuncs = &(struct nouveau_ofuncs) {
87{ 83 .ctor = nv04_fb_ctor,
88 struct nv20_fb_priv *priv;
89 int ret;
90
91 ret = nouveau_fb_create(parent, engine, oclass, &nv20_ram_oclass, &priv);
92 *pobject = nv_object(priv);
93 if (ret)
94 return ret;
95
96 priv->base.memtype_valid = nv04_fb_memtype_valid;
97 priv->base.tile.regions = 8;
98 priv->base.tile.init = nv20_fb_tile_init;
99 priv->base.tile.comp = nv20_fb_tile_comp;
100 priv->base.tile.fini = nv20_fb_tile_fini;
101 priv->base.tile.prog = nv20_fb_tile_prog;
102 return 0;
103}
104
105struct nouveau_oclass
106nv20_fb_oclass = {
107 .handle = NV_SUBDEV(FB, 0x20),
108 .ofuncs = &(struct nouveau_ofuncs) {
109 .ctor = nv20_fb_ctor,
110 .dtor = _nouveau_fb_dtor, 84 .dtor = _nouveau_fb_dtor,
111 .init = _nouveau_fb_init, 85 .init = _nouveau_fb_init,
112 .fini = _nouveau_fb_fini, 86 .fini = _nouveau_fb_fini,
113 }, 87 },
114}; 88 .base.memtype = nv04_fb_memtype_valid,
89 .base.ram = &nv20_ram_oclass,
90 .tile.regions = 8,
91 .tile.init = nv20_fb_tile_init,
92 .tile.comp = nv20_fb_tile_comp,
93 .tile.fini = nv20_fb_tile_fini,
94 .tile.prog = nv20_fb_tile_prog,
95}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv25.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv25.c
index 32ccabf10c45..f34f4223210b 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv25.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv25.c
@@ -24,11 +24,7 @@
24 * 24 *
25 */ 25 */
26 26
27#include "priv.h" 27#include "nv04.h"
28
29struct nv25_fb_priv {
30 struct nouveau_fb base;
31};
32 28
33static void 29static void
34nv25_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags, 30nv25_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags,
@@ -46,35 +42,20 @@ nv25_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags,
46 } 42 }
47} 43}
48 44
49static int 45struct nouveau_oclass *
50nv25_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine, 46nv25_fb_oclass = &(struct nv04_fb_impl) {
51 struct nouveau_oclass *oclass, void *data, u32 size, 47 .base.base.handle = NV_SUBDEV(FB, 0x25),
52 struct nouveau_object **pobject) 48 .base.base.ofuncs = &(struct nouveau_ofuncs) {
53{ 49 .ctor = nv04_fb_ctor,
54 struct nv25_fb_priv *priv;
55 int ret;
56
57 ret = nouveau_fb_create(parent, engine, oclass, &nv20_ram_oclass, &priv);
58 *pobject = nv_object(priv);
59 if (ret)
60 return ret;
61
62 priv->base.memtype_valid = nv04_fb_memtype_valid;
63 priv->base.tile.regions = 8;
64 priv->base.tile.init = nv20_fb_tile_init;
65 priv->base.tile.comp = nv25_fb_tile_comp;
66 priv->base.tile.fini = nv20_fb_tile_fini;
67 priv->base.tile.prog = nv20_fb_tile_prog;
68 return 0;
69}
70
71struct nouveau_oclass
72nv25_fb_oclass = {
73 .handle = NV_SUBDEV(FB, 0x25),
74 .ofuncs = &(struct nouveau_ofuncs) {
75 .ctor = nv25_fb_ctor,
76 .dtor = _nouveau_fb_dtor, 50 .dtor = _nouveau_fb_dtor,
77 .init = _nouveau_fb_init, 51 .init = _nouveau_fb_init,
78 .fini = _nouveau_fb_fini, 52 .fini = _nouveau_fb_fini,
79 }, 53 },
80}; 54 .base.memtype = nv04_fb_memtype_valid,
55 .base.ram = &nv20_ram_oclass,
56 .tile.regions = 8,
57 .tile.init = nv20_fb_tile_init,
58 .tile.comp = nv25_fb_tile_comp,
59 .tile.fini = nv20_fb_tile_fini,
60 .tile.prog = nv20_fb_tile_prog,
61}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv30.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv30.c
index bef756d43d33..69093f7151f0 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv30.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv30.c
@@ -24,11 +24,7 @@
24 * 24 *
25 */ 25 */
26 26
27#include "priv.h" 27#include "nv04.h"
28
29struct nv30_fb_priv {
30 struct nouveau_fb base;
31};
32 28
33void 29void
34nv30_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch, 30nv30_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
@@ -67,7 +63,7 @@ nv30_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags,
67} 63}
68 64
69static int 65static int
70calc_bias(struct nv30_fb_priv *priv, int k, int i, int j) 66calc_bias(struct nv04_fb_priv *priv, int k, int i, int j)
71{ 67{
72 struct nouveau_device *device = nv_device(priv); 68 struct nouveau_device *device = nv_device(priv);
73 int b = (device->chipset > 0x30 ? 69 int b = (device->chipset > 0x30 ?
@@ -78,7 +74,7 @@ calc_bias(struct nv30_fb_priv *priv, int k, int i, int j)
78} 74}
79 75
80static int 76static int
81calc_ref(struct nv30_fb_priv *priv, int l, int k, int i) 77calc_ref(struct nv04_fb_priv *priv, int l, int k, int i)
82{ 78{
83 int j, x = 0; 79 int j, x = 0;
84 80
@@ -95,7 +91,7 @@ int
95nv30_fb_init(struct nouveau_object *object) 91nv30_fb_init(struct nouveau_object *object)
96{ 92{
97 struct nouveau_device *device = nv_device(object); 93 struct nouveau_device *device = nv_device(object);
98 struct nv30_fb_priv *priv = (void *)object; 94 struct nv04_fb_priv *priv = (void *)object;
99 int ret, i, j; 95 int ret, i, j;
100 96
101 ret = nouveau_fb_init(&priv->base); 97 ret = nouveau_fb_init(&priv->base);
@@ -124,35 +120,20 @@ nv30_fb_init(struct nouveau_object *object)
124 return 0; 120 return 0;
125} 121}
126 122
127static int 123struct nouveau_oclass *
128nv30_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine, 124nv30_fb_oclass = &(struct nv04_fb_impl) {
129 struct nouveau_oclass *oclass, void *data, u32 size, 125 .base.base.handle = NV_SUBDEV(FB, 0x30),
130 struct nouveau_object **pobject) 126 .base.base.ofuncs = &(struct nouveau_ofuncs) {
131{ 127 .ctor = nv04_fb_ctor,
132 struct nv30_fb_priv *priv;
133 int ret;
134
135 ret = nouveau_fb_create(parent, engine, oclass, &nv20_ram_oclass, &priv);
136 *pobject = nv_object(priv);
137 if (ret)
138 return ret;
139
140 priv->base.memtype_valid = nv04_fb_memtype_valid;
141 priv->base.tile.regions = 8;
142 priv->base.tile.init = nv30_fb_tile_init;
143 priv->base.tile.comp = nv30_fb_tile_comp;
144 priv->base.tile.fini = nv20_fb_tile_fini;
145 priv->base.tile.prog = nv20_fb_tile_prog;
146 return 0;
147}
148
149struct nouveau_oclass
150nv30_fb_oclass = {
151 .handle = NV_SUBDEV(FB, 0x30),
152 .ofuncs = &(struct nouveau_ofuncs) {
153 .ctor = nv30_fb_ctor,
154 .dtor = _nouveau_fb_dtor, 128 .dtor = _nouveau_fb_dtor,
155 .init = nv30_fb_init, 129 .init = nv30_fb_init,
156 .fini = _nouveau_fb_fini, 130 .fini = _nouveau_fb_fini,
157 }, 131 },
158}; 132 .base.memtype = nv04_fb_memtype_valid,
133 .base.ram = &nv20_ram_oclass,
134 .tile.regions = 8,
135 .tile.init = nv30_fb_tile_init,
136 .tile.comp = nv30_fb_tile_comp,
137 .tile.fini = nv20_fb_tile_fini,
138 .tile.prog = nv20_fb_tile_prog,
139}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv35.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv35.c
index 097d8e3824f2..161b06e8fc3f 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv35.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv35.c
@@ -24,11 +24,7 @@
24 * 24 *
25 */ 25 */
26 26
27#include "priv.h" 27#include "nv04.h"
28
29struct nv35_fb_priv {
30 struct nouveau_fb base;
31};
32 28
33static void 29static void
34nv35_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags, 30nv35_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags,
@@ -47,35 +43,20 @@ nv35_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags,
47 } 43 }
48} 44}
49 45
50static int 46struct nouveau_oclass *
51nv35_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine, 47nv35_fb_oclass = &(struct nv04_fb_impl) {
52 struct nouveau_oclass *oclass, void *data, u32 size, 48 .base.base.handle = NV_SUBDEV(FB, 0x35),
53 struct nouveau_object **pobject) 49 .base.base.ofuncs = &(struct nouveau_ofuncs) {
54{ 50 .ctor = nv04_fb_ctor,
55 struct nv35_fb_priv *priv;
56 int ret;
57
58 ret = nouveau_fb_create(parent, engine, oclass, &nv20_ram_oclass, &priv);
59 *pobject = nv_object(priv);
60 if (ret)
61 return ret;
62
63 priv->base.memtype_valid = nv04_fb_memtype_valid;
64 priv->base.tile.regions = 8;
65 priv->base.tile.init = nv30_fb_tile_init;
66 priv->base.tile.comp = nv35_fb_tile_comp;
67 priv->base.tile.fini = nv20_fb_tile_fini;
68 priv->base.tile.prog = nv20_fb_tile_prog;
69 return 0;
70}
71
72struct nouveau_oclass
73nv35_fb_oclass = {
74 .handle = NV_SUBDEV(FB, 0x35),
75 .ofuncs = &(struct nouveau_ofuncs) {
76 .ctor = nv35_fb_ctor,
77 .dtor = _nouveau_fb_dtor, 51 .dtor = _nouveau_fb_dtor,
78 .init = nv30_fb_init, 52 .init = nv30_fb_init,
79 .fini = _nouveau_fb_fini, 53 .fini = _nouveau_fb_fini,
80 }, 54 },
81}; 55 .base.memtype = nv04_fb_memtype_valid,
56 .base.ram = &nv20_ram_oclass,
57 .tile.regions = 8,
58 .tile.init = nv30_fb_tile_init,
59 .tile.comp = nv35_fb_tile_comp,
60 .tile.fini = nv20_fb_tile_fini,
61 .tile.prog = nv20_fb_tile_prog,
62}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv36.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv36.c
index 9d6d9df896d9..2dd3d0aab6bb 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv36.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv36.c
@@ -24,11 +24,7 @@
24 * 24 *
25 */ 25 */
26 26
27#include "priv.h" 27#include "nv04.h"
28
29struct nv36_fb_priv {
30 struct nouveau_fb base;
31};
32 28
33static void 29static void
34nv36_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags, 30nv36_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags,
@@ -47,35 +43,20 @@ nv36_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags,
47 } 43 }
48} 44}
49 45
50static int 46struct nouveau_oclass *
51nv36_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine, 47nv36_fb_oclass = &(struct nv04_fb_impl) {
52 struct nouveau_oclass *oclass, void *data, u32 size, 48 .base.base.handle = NV_SUBDEV(FB, 0x36),
53 struct nouveau_object **pobject) 49 .base.base.ofuncs = &(struct nouveau_ofuncs) {
54{ 50 .ctor = nv04_fb_ctor,
55 struct nv36_fb_priv *priv;
56 int ret;
57
58 ret = nouveau_fb_create(parent, engine, oclass, &nv20_ram_oclass, &priv);
59 *pobject = nv_object(priv);
60 if (ret)
61 return ret;
62
63 priv->base.memtype_valid = nv04_fb_memtype_valid;
64 priv->base.tile.regions = 8;
65 priv->base.tile.init = nv30_fb_tile_init;
66 priv->base.tile.comp = nv36_fb_tile_comp;
67 priv->base.tile.fini = nv20_fb_tile_fini;
68 priv->base.tile.prog = nv20_fb_tile_prog;
69 return 0;
70}
71
72struct nouveau_oclass
73nv36_fb_oclass = {
74 .handle = NV_SUBDEV(FB, 0x36),
75 .ofuncs = &(struct nouveau_ofuncs) {
76 .ctor = nv36_fb_ctor,
77 .dtor = _nouveau_fb_dtor, 51 .dtor = _nouveau_fb_dtor,
78 .init = nv30_fb_init, 52 .init = nv30_fb_init,
79 .fini = _nouveau_fb_fini, 53 .fini = _nouveau_fb_fini,
80 }, 54 },
81}; 55 .base.memtype = nv04_fb_memtype_valid,
56 .base.ram = &nv20_ram_oclass,
57 .tile.regions = 8,
58 .tile.init = nv30_fb_tile_init,
59 .tile.comp = nv36_fb_tile_comp,
60 .tile.fini = nv20_fb_tile_fini,
61 .tile.prog = nv20_fb_tile_prog,
62}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv40.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv40.c
index 33b4393a7829..95a115ab0c86 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv40.c
@@ -24,11 +24,7 @@
24 * 24 *
25 */ 25 */
26 26
27#include "priv.h" 27#include "nv04.h"
28
29struct nv40_fb_priv {
30 struct nouveau_fb base;
31};
32 28
33void 29void
34nv40_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags, 30nv40_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags,
@@ -50,7 +46,7 @@ nv40_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags,
50static int 46static int
51nv40_fb_init(struct nouveau_object *object) 47nv40_fb_init(struct nouveau_object *object)
52{ 48{
53 struct nv40_fb_priv *priv = (void *)object; 49 struct nv04_fb_priv *priv = (void *)object;
54 int ret; 50 int ret;
55 51
56 ret = nouveau_fb_init(&priv->base); 52 ret = nouveau_fb_init(&priv->base);
@@ -61,36 +57,20 @@ nv40_fb_init(struct nouveau_object *object)
61 return 0; 57 return 0;
62} 58}
63 59
64static int 60struct nouveau_oclass *
65nv40_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine, 61nv40_fb_oclass = &(struct nv04_fb_impl) {
66 struct nouveau_oclass *oclass, void *data, u32 size, 62 .base.base.handle = NV_SUBDEV(FB, 0x40),
67 struct nouveau_object **pobject) 63 .base.base.ofuncs = &(struct nouveau_ofuncs) {
68{ 64 .ctor = nv04_fb_ctor,
69 struct nv40_fb_priv *priv;
70 int ret;
71
72 ret = nouveau_fb_create(parent, engine, oclass, &nv40_ram_oclass, &priv);
73 *pobject = nv_object(priv);
74 if (ret)
75 return ret;
76
77 priv->base.memtype_valid = nv04_fb_memtype_valid;
78 priv->base.tile.regions = 8;
79 priv->base.tile.init = nv30_fb_tile_init;
80 priv->base.tile.comp = nv40_fb_tile_comp;
81 priv->base.tile.fini = nv20_fb_tile_fini;
82 priv->base.tile.prog = nv20_fb_tile_prog;
83 return 0;
84}
85
86
87struct nouveau_oclass
88nv40_fb_oclass = {
89 .handle = NV_SUBDEV(FB, 0x40),
90 .ofuncs = &(struct nouveau_ofuncs) {
91 .ctor = nv40_fb_ctor,
92 .dtor = _nouveau_fb_dtor, 65 .dtor = _nouveau_fb_dtor,
93 .init = nv40_fb_init, 66 .init = nv40_fb_init,
94 .fini = _nouveau_fb_fini, 67 .fini = _nouveau_fb_fini,
95 }, 68 },
96}; 69 .base.memtype = nv04_fb_memtype_valid,
70 .base.ram = &nv40_ram_oclass,
71 .tile.regions = 8,
72 .tile.init = nv30_fb_tile_init,
73 .tile.comp = nv40_fb_tile_comp,
74 .tile.fini = nv20_fb_tile_fini,
75 .tile.prog = nv20_fb_tile_prog,
76}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv40.h b/drivers/gpu/drm/nouveau/core/subdev/fb/nv40.h
new file mode 100644
index 000000000000..581f808527f2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv40.h
@@ -0,0 +1,17 @@
1#ifndef __NVKM_FB_NV40_H__
2#define __NVKM_FB_NV40_H__
3
4#include "priv.h"
5
6struct nv40_ram {
7 struct nouveau_ram base;
8 u32 ctrl;
9 u32 coef;
10};
11
12
13int nv40_ram_calc(struct nouveau_fb *, u32);
14int nv40_ram_prog(struct nouveau_fb *);
15void nv40_ram_tidy(struct nouveau_fb *);
16
17#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv41.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv41.c
index 02cd83789cd4..b239a8615599 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv41.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv41.c
@@ -24,11 +24,7 @@
24 * 24 *
25 */ 25 */
26 26
27#include "priv.h" 27#include "nv04.h"
28
29struct nv41_fb_priv {
30 struct nouveau_fb base;
31};
32 28
33void 29void
34nv41_fb_tile_prog(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile) 30nv41_fb_tile_prog(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
@@ -43,7 +39,7 @@ nv41_fb_tile_prog(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
43int 39int
44nv41_fb_init(struct nouveau_object *object) 40nv41_fb_init(struct nouveau_object *object)
45{ 41{
46 struct nv41_fb_priv *priv = (void *)object; 42 struct nv04_fb_priv *priv = (void *)object;
47 int ret; 43 int ret;
48 44
49 ret = nouveau_fb_init(&priv->base); 45 ret = nouveau_fb_init(&priv->base);
@@ -54,36 +50,20 @@ nv41_fb_init(struct nouveau_object *object)
54 return 0; 50 return 0;
55} 51}
56 52
57static int 53struct nouveau_oclass *
58nv41_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine, 54nv41_fb_oclass = &(struct nv04_fb_impl) {
59 struct nouveau_oclass *oclass, void *data, u32 size, 55 .base.base.handle = NV_SUBDEV(FB, 0x41),
60 struct nouveau_object **pobject) 56 .base.base.ofuncs = &(struct nouveau_ofuncs) {
61{ 57 .ctor = nv04_fb_ctor,
62 struct nv41_fb_priv *priv;
63 int ret;
64
65 ret = nouveau_fb_create(parent, engine, oclass, &nv41_ram_oclass, &priv);
66 *pobject = nv_object(priv);
67 if (ret)
68 return ret;
69
70 priv->base.memtype_valid = nv04_fb_memtype_valid;
71 priv->base.tile.regions = 12;
72 priv->base.tile.init = nv30_fb_tile_init;
73 priv->base.tile.comp = nv40_fb_tile_comp;
74 priv->base.tile.fini = nv20_fb_tile_fini;
75 priv->base.tile.prog = nv41_fb_tile_prog;
76 return 0;
77}
78
79
80struct nouveau_oclass
81nv41_fb_oclass = {
82 .handle = NV_SUBDEV(FB, 0x41),
83 .ofuncs = &(struct nouveau_ofuncs) {
84 .ctor = nv41_fb_ctor,
85 .dtor = _nouveau_fb_dtor, 58 .dtor = _nouveau_fb_dtor,
86 .init = nv41_fb_init, 59 .init = nv41_fb_init,
87 .fini = _nouveau_fb_fini, 60 .fini = _nouveau_fb_fini,
88 }, 61 },
89}; 62 .base.memtype = nv04_fb_memtype_valid,
63 .base.ram = &nv41_ram_oclass,
64 .tile.regions = 12,
65 .tile.init = nv30_fb_tile_init,
66 .tile.comp = nv40_fb_tile_comp,
67 .tile.fini = nv20_fb_tile_fini,
68 .tile.prog = nv41_fb_tile_prog,
69}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv44.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv44.c
index c5246c29f293..d8478208a681 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv44.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv44.c
@@ -24,11 +24,7 @@
24 * 24 *
25 */ 25 */
26 26
27#include "priv.h" 27#include "nv04.h"
28
29struct nv44_fb_priv {
30 struct nouveau_fb base;
31};
32 28
33static void 29static void
34nv44_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch, 30nv44_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
@@ -52,7 +48,7 @@ nv44_fb_tile_prog(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
52int 48int
53nv44_fb_init(struct nouveau_object *object) 49nv44_fb_init(struct nouveau_object *object)
54{ 50{
55 struct nv44_fb_priv *priv = (void *)object; 51 struct nv04_fb_priv *priv = (void *)object;
56 int ret; 52 int ret;
57 53
58 ret = nouveau_fb_init(&priv->base); 54 ret = nouveau_fb_init(&priv->base);
@@ -64,35 +60,19 @@ nv44_fb_init(struct nouveau_object *object)
64 return 0; 60 return 0;
65} 61}
66 62
67static int 63struct nouveau_oclass *
68nv44_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine, 64nv44_fb_oclass = &(struct nv04_fb_impl) {
69 struct nouveau_oclass *oclass, void *data, u32 size, 65 .base.base.handle = NV_SUBDEV(FB, 0x44),
70 struct nouveau_object **pobject) 66 .base.base.ofuncs = &(struct nouveau_ofuncs) {
71{ 67 .ctor = nv04_fb_ctor,
72 struct nv44_fb_priv *priv;
73 int ret;
74
75 ret = nouveau_fb_create(parent, engine, oclass, &nv44_ram_oclass, &priv);
76 *pobject = nv_object(priv);
77 if (ret)
78 return ret;
79
80 priv->base.memtype_valid = nv04_fb_memtype_valid;
81 priv->base.tile.regions = 12;
82 priv->base.tile.init = nv44_fb_tile_init;
83 priv->base.tile.fini = nv20_fb_tile_fini;
84 priv->base.tile.prog = nv44_fb_tile_prog;
85 return 0;
86}
87
88
89struct nouveau_oclass
90nv44_fb_oclass = {
91 .handle = NV_SUBDEV(FB, 0x44),
92 .ofuncs = &(struct nouveau_ofuncs) {
93 .ctor = nv44_fb_ctor,
94 .dtor = _nouveau_fb_dtor, 68 .dtor = _nouveau_fb_dtor,
95 .init = nv44_fb_init, 69 .init = nv44_fb_init,
96 .fini = _nouveau_fb_fini, 70 .fini = _nouveau_fb_fini,
97 }, 71 },
98}; 72 .base.memtype = nv04_fb_memtype_valid,
73 .base.ram = &nv44_ram_oclass,
74 .tile.regions = 12,
75 .tile.init = nv44_fb_tile_init,
76 .tile.fini = nv20_fb_tile_fini,
77 .tile.prog = nv44_fb_tile_prog,
78}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv46.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv46.c
index e2b57909bfca..a5b77514d35b 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv46.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv46.c
@@ -24,11 +24,7 @@
24 * 24 *
25 */ 25 */
26 26
27#include "priv.h" 27#include "nv04.h"
28
29struct nv46_fb_priv {
30 struct nouveau_fb base;
31};
32 28
33void 29void
34nv46_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch, 30nv46_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
@@ -44,35 +40,19 @@ nv46_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
44 tile->pitch = pitch; 40 tile->pitch = pitch;
45} 41}
46 42
47static int 43struct nouveau_oclass *
48nv46_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine, 44nv46_fb_oclass = &(struct nv04_fb_impl) {
49 struct nouveau_oclass *oclass, void *data, u32 size, 45 .base.base.handle = NV_SUBDEV(FB, 0x46),
50 struct nouveau_object **pobject) 46 .base.base.ofuncs = &(struct nouveau_ofuncs) {
51{ 47 .ctor = nv04_fb_ctor,
52 struct nv46_fb_priv *priv;
53 int ret;
54
55 ret = nouveau_fb_create(parent, engine, oclass, &nv44_ram_oclass, &priv);
56 *pobject = nv_object(priv);
57 if (ret)
58 return ret;
59
60 priv->base.memtype_valid = nv04_fb_memtype_valid;
61 priv->base.tile.regions = 15;
62 priv->base.tile.init = nv46_fb_tile_init;
63 priv->base.tile.fini = nv20_fb_tile_fini;
64 priv->base.tile.prog = nv44_fb_tile_prog;
65 return 0;
66}
67
68
69struct nouveau_oclass
70nv46_fb_oclass = {
71 .handle = NV_SUBDEV(FB, 0x46),
72 .ofuncs = &(struct nouveau_ofuncs) {
73 .ctor = nv46_fb_ctor,
74 .dtor = _nouveau_fb_dtor, 48 .dtor = _nouveau_fb_dtor,
75 .init = nv44_fb_init, 49 .init = nv44_fb_init,
76 .fini = _nouveau_fb_fini, 50 .fini = _nouveau_fb_fini,
77 }, 51 },
78}; 52 .base.memtype = nv04_fb_memtype_valid,
53 .base.ram = &nv44_ram_oclass,
54 .tile.regions = 15,
55 .tile.init = nv46_fb_tile_init,
56 .tile.fini = nv20_fb_tile_fini,
57 .tile.prog = nv44_fb_tile_prog,
58}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv47.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv47.c
index fe6a2278621d..3bea142376bc 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv47.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv47.c
@@ -24,42 +24,22 @@
24 * 24 *
25 */ 25 */
26 26
27#include "priv.h" 27#include "nv04.h"
28 28
29struct nv47_fb_priv { 29struct nouveau_oclass *
30 struct nouveau_fb base; 30nv47_fb_oclass = &(struct nv04_fb_impl) {
31}; 31 .base.base.handle = NV_SUBDEV(FB, 0x47),
32 32 .base.base.ofuncs = &(struct nouveau_ofuncs) {
33static int 33 .ctor = nv04_fb_ctor,
34nv47_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
35 struct nouveau_oclass *oclass, void *data, u32 size,
36 struct nouveau_object **pobject)
37{
38 struct nv47_fb_priv *priv;
39 int ret;
40
41 ret = nouveau_fb_create(parent, engine, oclass, &nv41_ram_oclass, &priv);
42 *pobject = nv_object(priv);
43 if (ret)
44 return ret;
45
46 priv->base.memtype_valid = nv04_fb_memtype_valid;
47 priv->base.tile.regions = 15;
48 priv->base.tile.init = nv30_fb_tile_init;
49 priv->base.tile.comp = nv40_fb_tile_comp;
50 priv->base.tile.fini = nv20_fb_tile_fini;
51 priv->base.tile.prog = nv41_fb_tile_prog;
52 return 0;
53}
54
55
56struct nouveau_oclass
57nv47_fb_oclass = {
58 .handle = NV_SUBDEV(FB, 0x47),
59 .ofuncs = &(struct nouveau_ofuncs) {
60 .ctor = nv47_fb_ctor,
61 .dtor = _nouveau_fb_dtor, 34 .dtor = _nouveau_fb_dtor,
62 .init = nv41_fb_init, 35 .init = nv41_fb_init,
63 .fini = _nouveau_fb_fini, 36 .fini = _nouveau_fb_fini,
64 }, 37 },
65}; 38 .base.memtype = nv04_fb_memtype_valid,
39 .base.ram = &nv41_ram_oclass,
40 .tile.regions = 15,
41 .tile.init = nv30_fb_tile_init,
42 .tile.comp = nv40_fb_tile_comp,
43 .tile.fini = nv20_fb_tile_fini,
44 .tile.prog = nv41_fb_tile_prog,
45}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv49.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv49.c
index 5eca99b8c7e2..666cbd5d47f5 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv49.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv49.c
@@ -24,42 +24,22 @@
24 * 24 *
25 */ 25 */
26 26
27#include "priv.h" 27#include "nv04.h"
28 28
29struct nv49_fb_priv { 29struct nouveau_oclass *
30 struct nouveau_fb base; 30nv49_fb_oclass = &(struct nv04_fb_impl) {
31}; 31 .base.base.handle = NV_SUBDEV(FB, 0x49),
32 32 .base.base.ofuncs = &(struct nouveau_ofuncs) {
33static int 33 .ctor = nv04_fb_ctor,
34nv49_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
35 struct nouveau_oclass *oclass, void *data, u32 size,
36 struct nouveau_object **pobject)
37{
38 struct nv49_fb_priv *priv;
39 int ret;
40
41 ret = nouveau_fb_create(parent, engine, oclass, &nv49_ram_oclass, &priv);
42 *pobject = nv_object(priv);
43 if (ret)
44 return ret;
45
46 priv->base.memtype_valid = nv04_fb_memtype_valid;
47 priv->base.tile.regions = 15;
48 priv->base.tile.init = nv30_fb_tile_init;
49 priv->base.tile.comp = nv40_fb_tile_comp;
50 priv->base.tile.fini = nv20_fb_tile_fini;
51 priv->base.tile.prog = nv41_fb_tile_prog;
52 return 0;
53}
54
55
56struct nouveau_oclass
57nv49_fb_oclass = {
58 .handle = NV_SUBDEV(FB, 0x49),
59 .ofuncs = &(struct nouveau_ofuncs) {
60 .ctor = nv49_fb_ctor,
61 .dtor = _nouveau_fb_dtor, 34 .dtor = _nouveau_fb_dtor,
62 .init = nv41_fb_init, 35 .init = nv41_fb_init,
63 .fini = _nouveau_fb_fini, 36 .fini = _nouveau_fb_fini,
64 }, 37 },
65}; 38 .base.memtype = nv04_fb_memtype_valid,
39 .base.ram = &nv49_ram_oclass,
40 .tile.regions = 15,
41 .tile.init = nv30_fb_tile_init,
42 .tile.comp = nv40_fb_tile_comp,
43 .tile.fini = nv20_fb_tile_fini,
44 .tile.prog = nv41_fb_tile_prog,
45}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv4e.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv4e.c
index 1190b78a1e91..42e64f364ec1 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv4e.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv4e.c
@@ -24,40 +24,21 @@
24 * 24 *
25 */ 25 */
26 26
27#include "priv.h" 27#include "nv04.h"
28 28
29struct nv4e_fb_priv { 29struct nouveau_oclass *
30 struct nouveau_fb base; 30nv4e_fb_oclass = &(struct nv04_fb_impl) {
31}; 31 .base.base.handle = NV_SUBDEV(FB, 0x4e),
32 32 .base.base.ofuncs = &(struct nouveau_ofuncs) {
33static int 33 .ctor = nv04_fb_ctor,
34nv4e_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
35 struct nouveau_oclass *oclass, void *data, u32 size,
36 struct nouveau_object **pobject)
37{
38 struct nv4e_fb_priv *priv;
39 int ret;
40
41 ret = nouveau_fb_create(parent, engine, oclass, &nv4e_ram_oclass, &priv);
42 *pobject = nv_object(priv);
43 if (ret)
44 return ret;
45
46 priv->base.memtype_valid = nv04_fb_memtype_valid;
47 priv->base.tile.regions = 12;
48 priv->base.tile.init = nv46_fb_tile_init;
49 priv->base.tile.fini = nv20_fb_tile_fini;
50 priv->base.tile.prog = nv44_fb_tile_prog;
51 return 0;
52}
53
54struct nouveau_oclass
55nv4e_fb_oclass = {
56 .handle = NV_SUBDEV(FB, 0x4e),
57 .ofuncs = &(struct nouveau_ofuncs) {
58 .ctor = nv4e_fb_ctor,
59 .dtor = _nouveau_fb_dtor, 34 .dtor = _nouveau_fb_dtor,
60 .init = nv44_fb_init, 35 .init = nv44_fb_init,
61 .fini = _nouveau_fb_fini, 36 .fini = _nouveau_fb_fini,
62 }, 37 },
63}; 38 .base.memtype = nv04_fb_memtype_valid,
39 .base.ram = &nv4e_ram_oclass,
40 .tile.regions = 12,
41 .tile.init = nv46_fb_tile_init,
42 .tile.fini = nv20_fb_tile_fini,
43 .tile.prog = nv44_fb_tile_prog,
44}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c
index da614ec5564b..cbc7f00c1278 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c
@@ -27,14 +27,9 @@
27#include <core/engctx.h> 27#include <core/engctx.h>
28#include <core/object.h> 28#include <core/object.h>
29 29
30#include "priv.h"
31#include <subdev/bios.h> 30#include <subdev/bios.h>
32 31
33struct nv50_fb_priv { 32#include "nv50.h"
34 struct nouveau_fb base;
35 struct page *r100c08_page;
36 dma_addr_t r100c08;
37};
38 33
39int 34int
40nv50_fb_memtype[0x80] = { 35nv50_fb_memtype[0x80] = {
@@ -48,7 +43,7 @@ nv50_fb_memtype[0x80] = {
48 1, 0, 2, 0, 1, 0, 2, 0, 1, 1, 2, 2, 1, 1, 0, 0 43 1, 0, 2, 0, 1, 0, 2, 0, 1, 1, 2, 2, 1, 1, 0, 0
49}; 44};
50 45
51static bool 46bool
52nv50_fb_memtype_valid(struct nouveau_fb *pfb, u32 memtype) 47nv50_fb_memtype_valid(struct nouveau_fb *pfb, u32 memtype)
53{ 48{
54 return nv50_fb_memtype[(memtype & 0xff00) >> 8] != 0; 49 return nv50_fb_memtype[(memtype & 0xff00) >> 8] != 0;
@@ -239,7 +234,7 @@ nv50_fb_intr(struct nouveau_subdev *subdev)
239 pr_cont("0x%08x\n", st1); 234 pr_cont("0x%08x\n", st1);
240} 235}
241 236
242static int 237int
243nv50_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine, 238nv50_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
244 struct nouveau_oclass *oclass, void *data, u32 size, 239 struct nouveau_oclass *oclass, void *data, u32 size,
245 struct nouveau_object **pobject) 240 struct nouveau_object **pobject)
@@ -248,7 +243,7 @@ nv50_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
248 struct nv50_fb_priv *priv; 243 struct nv50_fb_priv *priv;
249 int ret; 244 int ret;
250 245
251 ret = nouveau_fb_create(parent, engine, oclass, &nv50_ram_oclass, &priv); 246 ret = nouveau_fb_create(parent, engine, oclass, &priv);
252 *pobject = nv_object(priv); 247 *pobject = nv_object(priv);
253 if (ret) 248 if (ret)
254 return ret; 249 return ret;
@@ -264,12 +259,11 @@ nv50_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
264 nv_warn(priv, "failed 0x100c08 page alloc\n"); 259 nv_warn(priv, "failed 0x100c08 page alloc\n");
265 } 260 }
266 261
267 priv->base.memtype_valid = nv50_fb_memtype_valid;
268 nv_subdev(priv)->intr = nv50_fb_intr; 262 nv_subdev(priv)->intr = nv50_fb_intr;
269 return 0; 263 return 0;
270} 264}
271 265
272static void 266void
273nv50_fb_dtor(struct nouveau_object *object) 267nv50_fb_dtor(struct nouveau_object *object)
274{ 268{
275 struct nouveau_device *device = nv_device(object); 269 struct nouveau_device *device = nv_device(object);
@@ -284,10 +278,10 @@ nv50_fb_dtor(struct nouveau_object *object)
284 nouveau_fb_destroy(&priv->base); 278 nouveau_fb_destroy(&priv->base);
285} 279}
286 280
287static int 281int
288nv50_fb_init(struct nouveau_object *object) 282nv50_fb_init(struct nouveau_object *object)
289{ 283{
290 struct nouveau_device *device = nv_device(object); 284 struct nv50_fb_impl *impl = (void *)object->oclass;
291 struct nv50_fb_priv *priv = (void *)object; 285 struct nv50_fb_priv *priv = (void *)object;
292 int ret; 286 int ret;
293 287
@@ -303,33 +297,20 @@ nv50_fb_init(struct nouveau_object *object)
303 297
304 /* This is needed to get meaningful information from 100c90 298 /* This is needed to get meaningful information from 100c90
305 * on traps. No idea what these values mean exactly. */ 299 * on traps. No idea what these values mean exactly. */
306 switch (device->chipset) { 300 nv_wr32(priv, 0x100c90, impl->trap);
307 case 0x50:
308 nv_wr32(priv, 0x100c90, 0x000707ff);
309 break;
310 case 0xa3:
311 case 0xa5:
312 case 0xa8:
313 nv_wr32(priv, 0x100c90, 0x000d0fff);
314 break;
315 case 0xaf:
316 nv_wr32(priv, 0x100c90, 0x089d1fff);
317 break;
318 default:
319 nv_wr32(priv, 0x100c90, 0x001d07ff);
320 break;
321 }
322
323 return 0; 301 return 0;
324} 302}
325 303
326struct nouveau_oclass 304struct nouveau_oclass *
327nv50_fb_oclass = { 305nv50_fb_oclass = &(struct nv50_fb_impl) {
328 .handle = NV_SUBDEV(FB, 0x50), 306 .base.base.handle = NV_SUBDEV(FB, 0x50),
329 .ofuncs = &(struct nouveau_ofuncs) { 307 .base.base.ofuncs = &(struct nouveau_ofuncs) {
330 .ctor = nv50_fb_ctor, 308 .ctor = nv50_fb_ctor,
331 .dtor = nv50_fb_dtor, 309 .dtor = nv50_fb_dtor,
332 .init = nv50_fb_init, 310 .init = nv50_fb_init,
333 .fini = _nouveau_fb_fini, 311 .fini = _nouveau_fb_fini,
334 }, 312 },
335}; 313 .base.memtype = nv50_fb_memtype_valid,
314 .base.ram = &nv50_ram_oclass,
315 .trap = 0x000707ff,
316}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.h b/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.h
new file mode 100644
index 000000000000..c5e5a888c607
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.h
@@ -0,0 +1,33 @@
1#ifndef __NVKM_FB_NV50_H__
2#define __NVKM_FB_NV50_H__
3
4#include "priv.h"
5
6struct nv50_fb_priv {
7 struct nouveau_fb base;
8 struct page *r100c08_page;
9 dma_addr_t r100c08;
10};
11
12int nv50_fb_ctor(struct nouveau_object *, struct nouveau_object *,
13 struct nouveau_oclass *, void *, u32,
14 struct nouveau_object **);
15void nv50_fb_dtor(struct nouveau_object *);
16int nv50_fb_init(struct nouveau_object *);
17
18struct nv50_fb_impl {
19 struct nouveau_fb_impl base;
20 u32 trap;
21};
22
23#define nv50_ram_create(p,e,o,d) \
24 nv50_ram_create_((p), (e), (o), sizeof(**d), (void **)d)
25int nv50_ram_create_(struct nouveau_object *, struct nouveau_object *,
26 struct nouveau_oclass *, int, void **);
27int nv50_ram_get(struct nouveau_fb *, u64 size, u32 align, u32 ncmin,
28 u32 memtype, struct nouveau_mem **);
29void nv50_ram_put(struct nouveau_fb *, struct nouveau_mem **);
30void __nv50_ram_put(struct nouveau_fb *, struct nouveau_mem *);
31extern int nv50_fb_memtype[0x80];
32
33#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv84.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv84.c
new file mode 100644
index 000000000000..cf0e767d3833
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv84.c
@@ -0,0 +1,39 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "nv50.h"
26
27struct nouveau_oclass *
28nv84_fb_oclass = &(struct nv50_fb_impl) {
29 .base.base.handle = NV_SUBDEV(FB, 0x84),
30 .base.base.ofuncs = &(struct nouveau_ofuncs) {
31 .ctor = nv50_fb_ctor,
32 .dtor = nv50_fb_dtor,
33 .init = nv50_fb_init,
34 .fini = _nouveau_fb_fini,
35 },
36 .base.memtype = nv50_fb_memtype_valid,
37 .base.ram = &nv50_ram_oclass,
38 .trap = 0x001d07ff,
39}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nva3.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nva3.c
new file mode 100644
index 000000000000..dab6e1c63d48
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nva3.c
@@ -0,0 +1,39 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "nv50.h"
26
27struct nouveau_oclass *
28nva3_fb_oclass = &(struct nv50_fb_impl) {
29 .base.base.handle = NV_SUBDEV(FB, 0xa3),
30 .base.base.ofuncs = &(struct nouveau_ofuncs) {
31 .ctor = nv50_fb_ctor,
32 .dtor = nv50_fb_dtor,
33 .init = nv50_fb_init,
34 .fini = _nouveau_fb_fini,
35 },
36 .base.memtype = nv50_fb_memtype_valid,
37 .base.ram = &nva3_ram_oclass,
38 .trap = 0x000d0fff,
39}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nvaa.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nvaa.c
new file mode 100644
index 000000000000..cba8e6818035
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nvaa.c
@@ -0,0 +1,39 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "nv50.h"
26
27struct nouveau_oclass *
28nvaa_fb_oclass = &(struct nv50_fb_impl) {
29 .base.base.handle = NV_SUBDEV(FB, 0xaa),
30 .base.base.ofuncs = &(struct nouveau_ofuncs) {
31 .ctor = nv50_fb_ctor,
32 .dtor = nv50_fb_dtor,
33 .init = nv50_fb_init,
34 .fini = _nouveau_fb_fini,
35 },
36 .base.memtype = nv50_fb_memtype_valid,
37 .base.ram = &nvaa_ram_oclass,
38 .trap = 0x001d07ff,
39}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nvaf.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nvaf.c
new file mode 100644
index 000000000000..5423faa2c09b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nvaf.c
@@ -0,0 +1,39 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "nv50.h"
26
27struct nouveau_oclass *
28nvaf_fb_oclass = &(struct nv50_fb_impl) {
29 .base.base.handle = NV_SUBDEV(FB, 0xaf),
30 .base.base.ofuncs = &(struct nouveau_ofuncs) {
31 .ctor = nv50_fb_ctor,
32 .dtor = nv50_fb_dtor,
33 .init = nv50_fb_init,
34 .fini = _nouveau_fb_fini,
35 },
36 .base.memtype = nv50_fb_memtype_valid,
37 .base.ram = &nvaa_ram_oclass,
38 .trap = 0x089d1fff,
39}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c
index f35d76fd746d..e5fc37c4caac 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c
@@ -22,24 +22,18 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include "priv.h" 25#include "nvc0.h"
26
27struct nvc0_fb_priv {
28 struct nouveau_fb base;
29 struct page *r100c10_page;
30 dma_addr_t r100c10;
31};
32 26
33extern const u8 nvc0_pte_storage_type_map[256]; 27extern const u8 nvc0_pte_storage_type_map[256];
34 28
35static bool 29bool
36nvc0_fb_memtype_valid(struct nouveau_fb *pfb, u32 tile_flags) 30nvc0_fb_memtype_valid(struct nouveau_fb *pfb, u32 tile_flags)
37{ 31{
38 u8 memtype = (tile_flags & 0x0000ff00) >> 8; 32 u8 memtype = (tile_flags & 0x0000ff00) >> 8;
39 return likely((nvc0_pte_storage_type_map[memtype] != 0xff)); 33 return likely((nvc0_pte_storage_type_map[memtype] != 0xff));
40} 34}
41 35
42static int 36int
43nvc0_fb_init(struct nouveau_object *object) 37nvc0_fb_init(struct nouveau_object *object)
44{ 38{
45 struct nvc0_fb_priv *priv = (void *)object; 39 struct nvc0_fb_priv *priv = (void *)object;
@@ -54,7 +48,7 @@ nvc0_fb_init(struct nouveau_object *object)
54 return 0; 48 return 0;
55} 49}
56 50
57static void 51void
58nvc0_fb_dtor(struct nouveau_object *object) 52nvc0_fb_dtor(struct nouveau_object *object)
59{ 53{
60 struct nouveau_device *device = nv_device(object); 54 struct nouveau_device *device = nv_device(object);
@@ -69,7 +63,7 @@ nvc0_fb_dtor(struct nouveau_object *object)
69 nouveau_fb_destroy(&priv->base); 63 nouveau_fb_destroy(&priv->base);
70} 64}
71 65
72static int 66int
73nvc0_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine, 67nvc0_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
74 struct nouveau_oclass *oclass, void *data, u32 size, 68 struct nouveau_oclass *oclass, void *data, u32 size,
75 struct nouveau_object **pobject) 69 struct nouveau_object **pobject)
@@ -78,13 +72,11 @@ nvc0_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
78 struct nvc0_fb_priv *priv; 72 struct nvc0_fb_priv *priv;
79 int ret; 73 int ret;
80 74
81 ret = nouveau_fb_create(parent, engine, oclass, &nvc0_ram_oclass, &priv); 75 ret = nouveau_fb_create(parent, engine, oclass, &priv);
82 *pobject = nv_object(priv); 76 *pobject = nv_object(priv);
83 if (ret) 77 if (ret)
84 return ret; 78 return ret;
85 79
86 priv->base.memtype_valid = nvc0_fb_memtype_valid;
87
88 priv->r100c10_page = alloc_page(GFP_KERNEL | __GFP_ZERO); 80 priv->r100c10_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
89 if (priv->r100c10_page) { 81 if (priv->r100c10_page) {
90 priv->r100c10 = pci_map_page(device->pdev, priv->r100c10_page, 82 priv->r100c10 = pci_map_page(device->pdev, priv->r100c10_page,
@@ -97,14 +89,15 @@ nvc0_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
97 return 0; 89 return 0;
98} 90}
99 91
100 92struct nouveau_oclass *
101struct nouveau_oclass 93nvc0_fb_oclass = &(struct nouveau_fb_impl) {
102nvc0_fb_oclass = { 94 .base.handle = NV_SUBDEV(FB, 0xc0),
103 .handle = NV_SUBDEV(FB, 0xc0), 95 .base.ofuncs = &(struct nouveau_ofuncs) {
104 .ofuncs = &(struct nouveau_ofuncs) {
105 .ctor = nvc0_fb_ctor, 96 .ctor = nvc0_fb_ctor,
106 .dtor = nvc0_fb_dtor, 97 .dtor = nvc0_fb_dtor,
107 .init = nvc0_fb_init, 98 .init = nvc0_fb_init,
108 .fini = _nouveau_fb_fini, 99 .fini = _nouveau_fb_fini,
109 }, 100 },
110}; 101 .memtype = nvc0_fb_memtype_valid,
102 .ram = &nvc0_ram_oclass,
103}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.h b/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.h
new file mode 100644
index 000000000000..9e1931eb746f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.h
@@ -0,0 +1,29 @@
1#ifndef __NVKM_RAM_NVC0_H__
2#define __NVKM_RAM_NVC0_H__
3
4#include "priv.h"
5#include "nv50.h"
6
7struct nvc0_fb_priv {
8 struct nouveau_fb base;
9 struct page *r100c10_page;
10 dma_addr_t r100c10;
11};
12
13int nvc0_fb_ctor(struct nouveau_object *, struct nouveau_object *,
14 struct nouveau_oclass *, void *, u32,
15 struct nouveau_object **);
16void nvc0_fb_dtor(struct nouveau_object *);
17int nvc0_fb_init(struct nouveau_object *);
18bool nvc0_fb_memtype_valid(struct nouveau_fb *, u32);
19
20
21#define nvc0_ram_create(p,e,o,d) \
22 nvc0_ram_create_((p), (e), (o), sizeof(**d), (void **)d)
23int nvc0_ram_create_(struct nouveau_object *, struct nouveau_object *,
24 struct nouveau_oclass *, int, void **);
25int nvc0_ram_get(struct nouveau_fb *, u64, u32, u32, u32,
26 struct nouveau_mem **);
27void nvc0_ram_put(struct nouveau_fb *, struct nouveau_mem **);
28
29#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nve0.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nve0.c
new file mode 100644
index 000000000000..595db50cfef3
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nve0.c
@@ -0,0 +1,38 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "nvc0.h"
26
27struct nouveau_oclass *
28nve0_fb_oclass = &(struct nouveau_fb_impl) {
29 .base.handle = NV_SUBDEV(FB, 0xe0),
30 .base.ofuncs = &(struct nouveau_ofuncs) {
31 .ctor = nvc0_fb_ctor,
32 .dtor = nvc0_fb_dtor,
33 .init = nvc0_fb_init,
34 .fini = _nouveau_fb_fini,
35 },
36 .memtype = nvc0_fb_memtype_valid,
37 .ram = &nve0_ram_oclass,
38}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/priv.h b/drivers/gpu/drm/nouveau/core/subdev/fb/priv.h
index db9d6ddde52c..493125214e88 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/priv.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/priv.h
@@ -12,6 +12,8 @@
12#define nouveau_ram_fini(p,s) \ 12#define nouveau_ram_fini(p,s) \
13 nouveau_object_fini(&(p)->base, (s)) 13 nouveau_object_fini(&(p)->base, (s))
14 14
15#define nouveau_ram_create_(p,e,o,s,d) \
16 nouveau_object_create_((p), (e), (o), 0, (s), (void **)d)
15#define _nouveau_ram_dtor nouveau_object_destroy 17#define _nouveau_ram_dtor nouveau_object_destroy
16#define _nouveau_ram_init nouveau_object_init 18#define _nouveau_ram_init nouveau_object_init
17#define _nouveau_ram_fini nouveau_object_fini 19#define _nouveau_ram_fini nouveau_object_fini
@@ -26,10 +28,16 @@ extern struct nouveau_oclass nv44_ram_oclass;
26extern struct nouveau_oclass nv49_ram_oclass; 28extern struct nouveau_oclass nv49_ram_oclass;
27extern struct nouveau_oclass nv4e_ram_oclass; 29extern struct nouveau_oclass nv4e_ram_oclass;
28extern struct nouveau_oclass nv50_ram_oclass; 30extern struct nouveau_oclass nv50_ram_oclass;
31extern struct nouveau_oclass nva3_ram_oclass;
32extern struct nouveau_oclass nvaa_ram_oclass;
29extern struct nouveau_oclass nvc0_ram_oclass; 33extern struct nouveau_oclass nvc0_ram_oclass;
34extern struct nouveau_oclass nve0_ram_oclass;
30 35
31#define nouveau_fb_create(p,e,c,r,d) \ 36int nouveau_sddr3_calc(struct nouveau_ram *ram);
32 nouveau_fb_create_((p), (e), (c), (r), sizeof(**d), (void **)d) 37int nouveau_gddr5_calc(struct nouveau_ram *ram);
38
39#define nouveau_fb_create(p,e,c,d) \
40 nouveau_fb_create_((p), (e), (c), sizeof(**d), (void **)d)
33#define nouveau_fb_destroy(p) ({ \ 41#define nouveau_fb_destroy(p) ({ \
34 struct nouveau_fb *pfb = (p); \ 42 struct nouveau_fb *pfb = (p); \
35 _nouveau_fb_dtor(nv_object(pfb)); \ 43 _nouveau_fb_dtor(nv_object(pfb)); \
@@ -44,44 +52,21 @@ extern struct nouveau_oclass nvc0_ram_oclass;
44}) 52})
45 53
46int nouveau_fb_create_(struct nouveau_object *, struct nouveau_object *, 54int nouveau_fb_create_(struct nouveau_object *, struct nouveau_object *,
47 struct nouveau_oclass *, struct nouveau_oclass *, 55 struct nouveau_oclass *, int, void **);
48 int length, void **pobject);
49void _nouveau_fb_dtor(struct nouveau_object *); 56void _nouveau_fb_dtor(struct nouveau_object *);
50int _nouveau_fb_init(struct nouveau_object *); 57int _nouveau_fb_init(struct nouveau_object *);
51int _nouveau_fb_fini(struct nouveau_object *, bool); 58int _nouveau_fb_fini(struct nouveau_object *, bool);
52 59
53struct nouveau_bios; 60struct nouveau_fb_impl {
54int nouveau_fb_bios_memtype(struct nouveau_bios *); 61 struct nouveau_oclass base;
62 struct nouveau_oclass *ram;
63 bool (*memtype)(struct nouveau_fb *, u32);
64};
55 65
56bool nv04_fb_memtype_valid(struct nouveau_fb *, u32 memtype); 66bool nv04_fb_memtype_valid(struct nouveau_fb *, u32 memtype);
67bool nv50_fb_memtype_valid(struct nouveau_fb *, u32 memtype);
57 68
58void nv10_fb_tile_init(struct nouveau_fb *, int i, u32 addr, u32 size, 69struct nouveau_bios;
59 u32 pitch, u32 flags, struct nouveau_fb_tile *); 70int nouveau_fb_bios_memtype(struct nouveau_bios *);
60void nv10_fb_tile_fini(struct nouveau_fb *, int i, struct nouveau_fb_tile *);
61void nv10_fb_tile_prog(struct nouveau_fb *, int, struct nouveau_fb_tile *);
62
63void nv20_fb_tile_init(struct nouveau_fb *, int i, u32 addr, u32 size,
64 u32 pitch, u32 flags, struct nouveau_fb_tile *);
65void nv20_fb_tile_fini(struct nouveau_fb *, int i, struct nouveau_fb_tile *);
66void nv20_fb_tile_prog(struct nouveau_fb *, int, struct nouveau_fb_tile *);
67
68int nv30_fb_init(struct nouveau_object *);
69void nv30_fb_tile_init(struct nouveau_fb *, int i, u32 addr, u32 size,
70 u32 pitch, u32 flags, struct nouveau_fb_tile *);
71
72void nv40_fb_tile_comp(struct nouveau_fb *, int i, u32 size, u32 flags,
73 struct nouveau_fb_tile *);
74
75int nv41_fb_init(struct nouveau_object *);
76void nv41_fb_tile_prog(struct nouveau_fb *, int, struct nouveau_fb_tile *);
77
78int nv44_fb_init(struct nouveau_object *);
79void nv44_fb_tile_prog(struct nouveau_fb *, int, struct nouveau_fb_tile *);
80
81void nv46_fb_tile_init(struct nouveau_fb *, int i, u32 addr, u32 size,
82 u32 pitch, u32 flags, struct nouveau_fb_tile *);
83
84void __nv50_ram_put(struct nouveau_fb *, struct nouveau_mem *);
85extern int nv50_fb_memtype[0x80];
86 71
87#endif 72#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramfuc.h b/drivers/gpu/drm/nouveau/core/subdev/fb/ramfuc.h
new file mode 100644
index 000000000000..0f57fcfe0bbf
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramfuc.h
@@ -0,0 +1,118 @@
1#ifndef __NVKM_FBRAM_FUC_H__
2#define __NVKM_FBRAM_FUC_H__
3
4#include <subdev/pwr.h>
5
6struct ramfuc {
7 struct nouveau_memx *memx;
8 struct nouveau_fb *pfb;
9 int sequence;
10};
11
12struct ramfuc_reg {
13 int sequence;
14 bool force;
15 u32 addr[2];
16 u32 data;
17};
18
19static inline struct ramfuc_reg
20ramfuc_reg2(u32 addr1, u32 addr2)
21{
22 return (struct ramfuc_reg) {
23 .sequence = 0,
24 .addr = { addr1, addr2 },
25 .data = 0xdeadbeef,
26 };
27}
28
29static inline struct ramfuc_reg
30ramfuc_reg(u32 addr)
31{
32 return ramfuc_reg2(addr, addr);
33}
34
35static inline int
36ramfuc_init(struct ramfuc *ram, struct nouveau_fb *pfb)
37{
38 struct nouveau_pwr *ppwr = nouveau_pwr(pfb);
39 int ret;
40
41 ret = nouveau_memx_init(ppwr, &ram->memx);
42 if (ret)
43 return ret;
44
45 ram->sequence++;
46 ram->pfb = pfb;
47 return 0;
48}
49
50static inline int
51ramfuc_exec(struct ramfuc *ram, bool exec)
52{
53 int ret = 0;
54 if (ram->pfb) {
55 ret = nouveau_memx_fini(&ram->memx, exec);
56 ram->pfb = NULL;
57 }
58 return ret;
59}
60
61static inline u32
62ramfuc_rd32(struct ramfuc *ram, struct ramfuc_reg *reg)
63{
64 if (reg->sequence != ram->sequence)
65 reg->data = nv_rd32(ram->pfb, reg->addr[0]);
66 return reg->data;
67}
68
69static inline void
70ramfuc_wr32(struct ramfuc *ram, struct ramfuc_reg *reg, u32 data)
71{
72 reg->sequence = ram->sequence;
73 reg->data = data;
74 if (reg->addr[0] != reg->addr[1])
75 nouveau_memx_wr32(ram->memx, reg->addr[1], reg->data);
76 nouveau_memx_wr32(ram->memx, reg->addr[0], reg->data);
77}
78
79static inline void
80ramfuc_nuke(struct ramfuc *ram, struct ramfuc_reg *reg)
81{
82 reg->force = true;
83}
84
85static inline u32
86ramfuc_mask(struct ramfuc *ram, struct ramfuc_reg *reg, u32 mask, u32 data)
87{
88 u32 temp = ramfuc_rd32(ram, reg);
89 if (temp != ((temp & ~mask) | data) || reg->force) {
90 ramfuc_wr32(ram, reg, (temp & ~mask) | data);
91 reg->force = false;
92 }
93 return temp;
94}
95
96static inline void
97ramfuc_wait(struct ramfuc *ram, u32 addr, u32 mask, u32 data, u32 nsec)
98{
99 nouveau_memx_wait(ram->memx, addr, mask, data, nsec);
100}
101
102static inline void
103ramfuc_nsec(struct ramfuc *ram, u32 nsec)
104{
105 nouveau_memx_nsec(ram->memx, nsec);
106}
107
108#define ram_init(s,p) ramfuc_init(&(s)->base, (p))
109#define ram_exec(s,e) ramfuc_exec(&(s)->base, (e))
110#define ram_have(s,r) ((s)->r_##r.addr != 0x000000)
111#define ram_rd32(s,r) ramfuc_rd32(&(s)->base, &(s)->r_##r)
112#define ram_wr32(s,r,d) ramfuc_wr32(&(s)->base, &(s)->r_##r, (d))
113#define ram_nuke(s,r) ramfuc_nuke(&(s)->base, &(s)->r_##r)
114#define ram_mask(s,r,m,d) ramfuc_mask(&(s)->base, &(s)->r_##r, (m), (d))
115#define ram_wait(s,r,m,d,n) ramfuc_wait(&(s)->base, (r), (m), (d), (n))
116#define ram_nsec(s,n) ramfuc_nsec(&(s)->base, (n))
117
118#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv40.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv40.c
index ee49ac4dbdb6..7648beb11199 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv40.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv40.c
@@ -22,7 +22,154 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include "priv.h" 25#include <subdev/bios.h>
26#include <subdev/bios/bit.h>
27#include <subdev/bios/pll.h>
28#include <subdev/bios/init.h>
29#include <subdev/clock.h>
30#include <subdev/clock/pll.h>
31#include <subdev/timer.h>
32
33#include <engine/fifo.h>
34
35#include "nv40.h"
36
37int
38nv40_ram_calc(struct nouveau_fb *pfb, u32 freq)
39{
40 struct nouveau_bios *bios = nouveau_bios(pfb);
41 struct nv40_ram *ram = (void *)pfb->ram;
42 struct nvbios_pll pll;
43 int N1, M1, N2, M2;
44 int log2P, ret;
45
46 ret = nvbios_pll_parse(bios, 0x04, &pll);
47 if (ret) {
48 nv_error(pfb, "mclk pll data not found\n");
49 return ret;
50 }
51
52 ret = nv04_pll_calc(nv_subdev(pfb), &pll, freq,
53 &N1, &M1, &N2, &M2, &log2P);
54 if (ret < 0)
55 return ret;
56
57 ram->ctrl = 0x80000000 | (log2P << 16);
58 ram->ctrl |= min(pll.bias_p + log2P, (int)pll.max_p) << 20;
59 if (N2 == M2) {
60 ram->ctrl |= 0x00000100;
61 ram->coef = (N1 << 8) | M1;
62 } else {
63 ram->ctrl |= 0x40000000;
64 ram->coef = (N2 << 24) | (M2 << 16) | (N1 << 8) | M1;
65 }
66
67 return 0;
68}
69
70int
71nv40_ram_prog(struct nouveau_fb *pfb)
72{
73 struct nouveau_bios *bios = nouveau_bios(pfb);
74 struct nv40_ram *ram = (void *)pfb->ram;
75 struct bit_entry M;
76 u32 crtc_mask = 0;
77 u8 sr1[2];
78 int i;
79
80 /* determine which CRTCs are active, fetch VGA_SR1 for each */
81 for (i = 0; i < 2; i++) {
82 u32 vbl = nv_rd32(pfb, 0x600808 + (i * 0x2000));
83 u32 cnt = 0;
84 do {
85 if (vbl != nv_rd32(pfb, 0x600808 + (i * 0x2000))) {
86 nv_wr08(pfb, 0x0c03c4 + (i * 0x2000), 0x01);
87 sr1[i] = nv_rd08(pfb, 0x0c03c5 + (i * 0x2000));
88 if (!(sr1[i] & 0x20))
89 crtc_mask |= (1 << i);
90 break;
91 }
92 udelay(1);
93 } while (cnt++ < 32);
94 }
95
96 /* wait for vblank start on active crtcs, disable memory access */
97 for (i = 0; i < 2; i++) {
98 if (!(crtc_mask & (1 << i)))
99 continue;
100 nv_wait(pfb, 0x600808 + (i * 0x2000), 0x00010000, 0x00000000);
101 nv_wait(pfb, 0x600808 + (i * 0x2000), 0x00010000, 0x00010000);
102 nv_wr08(pfb, 0x0c03c4 + (i * 0x2000), 0x01);
103 nv_wr08(pfb, 0x0c03c5 + (i * 0x2000), sr1[i] | 0x20);
104 }
105
106 /* prepare ram for reclocking */
107 nv_wr32(pfb, 0x1002d4, 0x00000001); /* precharge */
108 nv_wr32(pfb, 0x1002d0, 0x00000001); /* refresh */
109 nv_wr32(pfb, 0x1002d0, 0x00000001); /* refresh */
110 nv_mask(pfb, 0x100210, 0x80000000, 0x00000000); /* no auto refresh */
111 nv_wr32(pfb, 0x1002dc, 0x00000001); /* enable self-refresh */
112
113 /* change the PLL of each memory partition */
114 nv_mask(pfb, 0x00c040, 0x0000c000, 0x00000000);
115 switch (nv_device(pfb)->chipset) {
116 case 0x40:
117 case 0x45:
118 case 0x41:
119 case 0x42:
120 case 0x47:
121 nv_mask(pfb, 0x004044, 0xc0771100, ram->ctrl);
122 nv_mask(pfb, 0x00402c, 0xc0771100, ram->ctrl);
123 nv_wr32(pfb, 0x004048, ram->coef);
124 nv_wr32(pfb, 0x004030, ram->coef);
125 case 0x43:
126 case 0x49:
127 case 0x4b:
128 nv_mask(pfb, 0x004038, 0xc0771100, ram->ctrl);
129 nv_wr32(pfb, 0x00403c, ram->coef);
130 default:
131 nv_mask(pfb, 0x004020, 0xc0771100, ram->ctrl);
132 nv_wr32(pfb, 0x004024, ram->coef);
133 break;
134 }
135 udelay(100);
136 nv_mask(pfb, 0x00c040, 0x0000c000, 0x0000c000);
137
138 /* re-enable normal operation of memory controller */
139 nv_wr32(pfb, 0x1002dc, 0x00000000);
140 nv_mask(pfb, 0x100210, 0x80000000, 0x80000000);
141 udelay(100);
142
143 /* execute memory reset script from vbios */
144 if (!bit_entry(bios, 'M', &M)) {
145 struct nvbios_init init = {
146 .subdev = nv_subdev(pfb),
147 .bios = bios,
148 .offset = nv_ro16(bios, M.offset + 0x00),
149 .execute = 1,
150 };
151
152 nvbios_exec(&init);
153 }
154
155 /* make sure we're in vblank (hopefully the same one as before), and
156 * then re-enable crtc memory access
157 */
158 for (i = 0; i < 2; i++) {
159 if (!(crtc_mask & (1 << i)))
160 continue;
161 nv_wait(pfb, 0x600808 + (i * 0x2000), 0x00010000, 0x00010000);
162 nv_wr08(pfb, 0x0c03c4 + (i * 0x2000), 0x01);
163 nv_wr08(pfb, 0x0c03c5 + (i * 0x2000), sr1[i]);
164 }
165
166 return 0;
167}
168
169void
170nv40_ram_tidy(struct nouveau_fb *pfb)
171{
172}
26 173
27static int 174static int
28nv40_ram_create(struct nouveau_object *parent, struct nouveau_object *engine, 175nv40_ram_create(struct nouveau_object *parent, struct nouveau_object *engine,
@@ -30,7 +177,7 @@ nv40_ram_create(struct nouveau_object *parent, struct nouveau_object *engine,
30 struct nouveau_object **pobject) 177 struct nouveau_object **pobject)
31{ 178{
32 struct nouveau_fb *pfb = nouveau_fb(parent); 179 struct nouveau_fb *pfb = nouveau_fb(parent);
33 struct nouveau_ram *ram; 180 struct nv40_ram *ram;
34 u32 pbus1218 = nv_rd32(pfb, 0x001218); 181 u32 pbus1218 = nv_rd32(pfb, 0x001218);
35 int ret; 182 int ret;
36 183
@@ -40,15 +187,18 @@ nv40_ram_create(struct nouveau_object *parent, struct nouveau_object *engine,
40 return ret; 187 return ret;
41 188
42 switch (pbus1218 & 0x00000300) { 189 switch (pbus1218 & 0x00000300) {
43 case 0x00000000: ram->type = NV_MEM_TYPE_SDRAM; break; 190 case 0x00000000: ram->base.type = NV_MEM_TYPE_SDRAM; break;
44 case 0x00000100: ram->type = NV_MEM_TYPE_DDR1; break; 191 case 0x00000100: ram->base.type = NV_MEM_TYPE_DDR1; break;
45 case 0x00000200: ram->type = NV_MEM_TYPE_GDDR3; break; 192 case 0x00000200: ram->base.type = NV_MEM_TYPE_GDDR3; break;
46 case 0x00000300: ram->type = NV_MEM_TYPE_DDR2; break; 193 case 0x00000300: ram->base.type = NV_MEM_TYPE_DDR2; break;
47 } 194 }
48 195
49 ram->size = nv_rd32(pfb, 0x10020c) & 0xff000000; 196 ram->base.size = nv_rd32(pfb, 0x10020c) & 0xff000000;
50 ram->parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1; 197 ram->base.parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1;
51 ram->tags = nv_rd32(pfb, 0x100320); 198 ram->base.tags = nv_rd32(pfb, 0x100320);
199 ram->base.calc = nv40_ram_calc;
200 ram->base.prog = nv40_ram_prog;
201 ram->base.tidy = nv40_ram_tidy;
52 return 0; 202 return 0;
53} 203}
54 204
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv41.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv41.c
index 1dab7e12abab..d64498a4d9ee 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv41.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv41.c
@@ -22,7 +22,7 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include "priv.h" 25#include "nv40.h"
26 26
27static int 27static int
28nv41_ram_create(struct nouveau_object *parent, struct nouveau_object *engine, 28nv41_ram_create(struct nouveau_object *parent, struct nouveau_object *engine,
@@ -30,7 +30,7 @@ nv41_ram_create(struct nouveau_object *parent, struct nouveau_object *engine,
30 struct nouveau_object **pobject) 30 struct nouveau_object **pobject)
31{ 31{
32 struct nouveau_fb *pfb = nouveau_fb(parent); 32 struct nouveau_fb *pfb = nouveau_fb(parent);
33 struct nouveau_ram *ram; 33 struct nv40_ram *ram;
34 u32 pfb474 = nv_rd32(pfb, 0x100474); 34 u32 pfb474 = nv_rd32(pfb, 0x100474);
35 int ret; 35 int ret;
36 36
@@ -40,15 +40,18 @@ nv41_ram_create(struct nouveau_object *parent, struct nouveau_object *engine,
40 return ret; 40 return ret;
41 41
42 if (pfb474 & 0x00000004) 42 if (pfb474 & 0x00000004)
43 ram->type = NV_MEM_TYPE_GDDR3; 43 ram->base.type = NV_MEM_TYPE_GDDR3;
44 if (pfb474 & 0x00000002) 44 if (pfb474 & 0x00000002)
45 ram->type = NV_MEM_TYPE_DDR2; 45 ram->base.type = NV_MEM_TYPE_DDR2;
46 if (pfb474 & 0x00000001) 46 if (pfb474 & 0x00000001)
47 ram->type = NV_MEM_TYPE_DDR1; 47 ram->base.type = NV_MEM_TYPE_DDR1;
48 48
49 ram->size = nv_rd32(pfb, 0x10020c) & 0xff000000; 49 ram->base.size = nv_rd32(pfb, 0x10020c) & 0xff000000;
50 ram->parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1; 50 ram->base.parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1;
51 ram->tags = nv_rd32(pfb, 0x100320); 51 ram->base.tags = nv_rd32(pfb, 0x100320);
52 ram->base.calc = nv40_ram_calc;
53 ram->base.prog = nv40_ram_prog;
54 ram->base.tidy = nv40_ram_tidy;
52 return 0; 55 return 0;
53} 56}
54 57
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv44.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv44.c
index 25fff842e5c1..089acac810c5 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv44.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv44.c
@@ -22,7 +22,7 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include "priv.h" 25#include "nv40.h"
26 26
27static int 27static int
28nv44_ram_create(struct nouveau_object *parent, struct nouveau_object *engine, 28nv44_ram_create(struct nouveau_object *parent, struct nouveau_object *engine,
@@ -30,7 +30,7 @@ nv44_ram_create(struct nouveau_object *parent, struct nouveau_object *engine,
30 struct nouveau_object **pobject) 30 struct nouveau_object **pobject)
31{ 31{
32 struct nouveau_fb *pfb = nouveau_fb(parent); 32 struct nouveau_fb *pfb = nouveau_fb(parent);
33 struct nouveau_ram *ram; 33 struct nv40_ram *ram;
34 u32 pfb474 = nv_rd32(pfb, 0x100474); 34 u32 pfb474 = nv_rd32(pfb, 0x100474);
35 int ret; 35 int ret;
36 36
@@ -40,13 +40,16 @@ nv44_ram_create(struct nouveau_object *parent, struct nouveau_object *engine,
40 return ret; 40 return ret;
41 41
42 if (pfb474 & 0x00000004) 42 if (pfb474 & 0x00000004)
43 ram->type = NV_MEM_TYPE_GDDR3; 43 ram->base.type = NV_MEM_TYPE_GDDR3;
44 if (pfb474 & 0x00000002) 44 if (pfb474 & 0x00000002)
45 ram->type = NV_MEM_TYPE_DDR2; 45 ram->base.type = NV_MEM_TYPE_DDR2;
46 if (pfb474 & 0x00000001) 46 if (pfb474 & 0x00000001)
47 ram->type = NV_MEM_TYPE_DDR1; 47 ram->base.type = NV_MEM_TYPE_DDR1;
48 48
49 ram->size = nv_rd32(pfb, 0x10020c) & 0xff000000; 49 ram->base.size = nv_rd32(pfb, 0x10020c) & 0xff000000;
50 ram->base.calc = nv40_ram_calc;
51 ram->base.prog = nv40_ram_prog;
52 ram->base.tidy = nv40_ram_tidy;
50 return 0; 53 return 0;
51} 54}
52 55
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv49.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv49.c
index ab7ef0ac9e34..baa013afa57b 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv49.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv49.c
@@ -22,7 +22,7 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include "priv.h" 25#include "nv40.h"
26 26
27static int 27static int
28nv49_ram_create(struct nouveau_object *parent, struct nouveau_object *engine, 28nv49_ram_create(struct nouveau_object *parent, struct nouveau_object *engine,
@@ -30,7 +30,7 @@ nv49_ram_create(struct nouveau_object *parent, struct nouveau_object *engine,
30 struct nouveau_object **pobject) 30 struct nouveau_object **pobject)
31{ 31{
32 struct nouveau_fb *pfb = nouveau_fb(parent); 32 struct nouveau_fb *pfb = nouveau_fb(parent);
33 struct nouveau_ram *ram; 33 struct nv40_ram *ram;
34 u32 pfb914 = nv_rd32(pfb, 0x100914); 34 u32 pfb914 = nv_rd32(pfb, 0x100914);
35 int ret; 35 int ret;
36 36
@@ -40,15 +40,18 @@ nv49_ram_create(struct nouveau_object *parent, struct nouveau_object *engine,
40 return ret; 40 return ret;
41 41
42 switch (pfb914 & 0x00000003) { 42 switch (pfb914 & 0x00000003) {
43 case 0x00000000: ram->type = NV_MEM_TYPE_DDR1; break; 43 case 0x00000000: ram->base.type = NV_MEM_TYPE_DDR1; break;
44 case 0x00000001: ram->type = NV_MEM_TYPE_DDR2; break; 44 case 0x00000001: ram->base.type = NV_MEM_TYPE_DDR2; break;
45 case 0x00000002: ram->type = NV_MEM_TYPE_GDDR3; break; 45 case 0x00000002: ram->base.type = NV_MEM_TYPE_GDDR3; break;
46 case 0x00000003: break; 46 case 0x00000003: break;
47 } 47 }
48 48
49 ram->size = nv_rd32(pfb, 0x10020c) & 0xff000000; 49 ram->base.size = nv_rd32(pfb, 0x10020c) & 0xff000000;
50 ram->parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1; 50 ram->base.parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1;
51 ram->tags = nv_rd32(pfb, 0x100320); 51 ram->base.tags = nv_rd32(pfb, 0x100320);
52 ram->base.calc = nv40_ram_calc;
53 ram->base.prog = nv40_ram_prog;
54 ram->base.tidy = nv40_ram_tidy;
52 return 0; 55 return 0;
53} 56}
54 57
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv50.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv50.c
index 903baff77fdd..76762a17d89c 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv50.c
@@ -23,8 +23,215 @@
23 */ 23 */
24 24
25#include <subdev/bios.h> 25#include <subdev/bios.h>
26#include <subdev/bios/bit.h>
27#include <subdev/bios/pll.h>
28#include <subdev/bios/perf.h>
29#include <subdev/bios/timing.h>
30#include <subdev/clock/pll.h>
31#include <subdev/fb.h>
32
33#include <core/option.h>
26#include <core/mm.h> 34#include <core/mm.h>
27#include "priv.h" 35
36#include "ramseq.h"
37
38#include "nv50.h"
39
40struct nv50_ramseq {
41 struct hwsq base;
42 struct hwsq_reg r_0x002504;
43 struct hwsq_reg r_0x004008;
44 struct hwsq_reg r_0x00400c;
45 struct hwsq_reg r_0x00c040;
46 struct hwsq_reg r_0x100210;
47 struct hwsq_reg r_0x1002d0;
48 struct hwsq_reg r_0x1002d4;
49 struct hwsq_reg r_0x1002dc;
50 struct hwsq_reg r_0x100da0[8];
51 struct hwsq_reg r_0x100e20;
52 struct hwsq_reg r_0x100e24;
53 struct hwsq_reg r_0x611200;
54 struct hwsq_reg r_timing[9];
55 struct hwsq_reg r_mr[4];
56};
57
58struct nv50_ram {
59 struct nouveau_ram base;
60 struct nv50_ramseq hwsq;
61};
62
63#define QFX5800NVA0 1
64
65static int
66nv50_ram_calc(struct nouveau_fb *pfb, u32 freq)
67{
68 struct nouveau_bios *bios = nouveau_bios(pfb);
69 struct nv50_ram *ram = (void *)pfb->ram;
70 struct nv50_ramseq *hwsq = &ram->hwsq;
71 struct nvbios_perfE perfE;
72 struct nvbios_pll mpll;
73 struct bit_entry M;
74 struct {
75 u32 data;
76 u8 size;
77 } ramcfg, timing;
78 u8 ver, hdr, cnt, strap;
79 u32 data;
80 int N1, M1, N2, M2, P;
81 int ret, i;
82
83 /* lookup closest matching performance table entry for frequency */
84 i = 0;
85 do {
86 ramcfg.data = nvbios_perfEp(bios, i++, &ver, &hdr, &cnt,
87 &ramcfg.size, &perfE);
88 if (!ramcfg.data || (ver < 0x25 || ver >= 0x40) ||
89 (ramcfg.size < 2)) {
90 nv_error(pfb, "invalid/missing perftab entry\n");
91 return -EINVAL;
92 }
93 } while (perfE.memory < freq);
94
95 /* locate specific data set for the attached memory */
96 if (bit_entry(bios, 'M', &M) || M.version != 1 || M.length < 5) {
97 nv_error(pfb, "invalid/missing memory table\n");
98 return -EINVAL;
99 }
100
101 strap = (nv_rd32(pfb, 0x101000) & 0x0000003c) >> 2;
102 data = nv_ro16(bios, M.offset + 3);
103 if (data)
104 strap = nv_ro08(bios, data + strap);
105
106 if (strap >= cnt) {
107 nv_error(pfb, "invalid ramcfg strap\n");
108 return -EINVAL;
109 }
110
111 ramcfg.data += hdr + (strap * ramcfg.size);
112
113 /* lookup memory timings, if bios says they're present */
114 strap = nv_ro08(bios, ramcfg.data + 0x01);
115 if (strap != 0xff) {
116 timing.data = nvbios_timing_entry(bios, strap, &ver, &hdr);
117 if (!timing.data || ver != 0x10 || hdr < 0x12) {
118 nv_error(pfb, "invalid/missing timing entry "
119 "%02x %04x %02x %02x\n",
120 strap, timing.data, ver, hdr);
121 return -EINVAL;
122 }
123 } else {
124 timing.data = 0;
125 }
126
127 ret = ram_init(hwsq, nv_subdev(pfb));
128 if (ret)
129 return ret;
130
131 ram_wait(hwsq, 0x01, 0x00); /* wait for !vblank */
132 ram_wait(hwsq, 0x01, 0x01); /* wait for vblank */
133 ram_wr32(hwsq, 0x611200, 0x00003300);
134 ram_wr32(hwsq, 0x002504, 0x00000001); /* block fifo */
135 ram_nsec(hwsq, 8000);
136 ram_setf(hwsq, 0x10, 0x00); /* disable fb */
137 ram_wait(hwsq, 0x00, 0x01); /* wait for fb disabled */
138
139 ram_wr32(hwsq, 0x1002d4, 0x00000001); /* precharge */
140 ram_wr32(hwsq, 0x1002d0, 0x00000001); /* refresh */
141 ram_wr32(hwsq, 0x1002d0, 0x00000001); /* refresh */
142 ram_wr32(hwsq, 0x100210, 0x00000000); /* disable auto-refresh */
143 ram_wr32(hwsq, 0x1002dc, 0x00000001); /* enable self-refresh */
144
145 ret = nvbios_pll_parse(bios, 0x004008, &mpll);
146 mpll.vco2.max_freq = 0;
147 if (ret == 0) {
148 ret = nv04_pll_calc(nv_subdev(pfb), &mpll, freq,
149 &N1, &M1, &N2, &M2, &P);
150 if (ret == 0)
151 ret = -EINVAL;
152 }
153
154 if (ret < 0)
155 return ret;
156
157 ram_mask(hwsq, 0x00c040, 0xc000c000, 0x0000c000);
158 ram_mask(hwsq, 0x004008, 0x00000200, 0x00000200);
159 ram_mask(hwsq, 0x00400c, 0x0000ffff, (N1 << 8) | M1);
160 ram_mask(hwsq, 0x004008, 0x81ff0000, 0x80000000 | (mpll.bias_p << 19) |
161 (P << 22) | (P << 16));
162#if QFX5800NVA0
163 for (i = 0; i < 8; i++)
164 ram_mask(hwsq, 0x100da0[i], 0x00000000, 0x00000000); /*XXX*/
165#endif
166 ram_nsec(hwsq, 96000); /*XXX*/
167 ram_mask(hwsq, 0x004008, 0x00002200, 0x00002000);
168
169 ram_wr32(hwsq, 0x1002dc, 0x00000000); /* disable self-refresh */
170 ram_wr32(hwsq, 0x100210, 0x80000000); /* enable auto-refresh */
171
172 ram_nsec(hwsq, 12000);
173
174 switch (ram->base.type) {
175 case NV_MEM_TYPE_DDR2:
176 ram_nuke(hwsq, mr[0]); /* force update */
177 ram_mask(hwsq, mr[0], 0x000, 0x000);
178 break;
179 case NV_MEM_TYPE_GDDR3:
180 ram_mask(hwsq, mr[2], 0x000, 0x000);
181 ram_nuke(hwsq, mr[0]); /* force update */
182 ram_mask(hwsq, mr[0], 0x000, 0x000);
183 break;
184 default:
185 break;
186 }
187
188 ram_mask(hwsq, timing[3], 0x00000000, 0x00000000); /*XXX*/
189 ram_mask(hwsq, timing[1], 0x00000000, 0x00000000); /*XXX*/
190 ram_mask(hwsq, timing[6], 0x00000000, 0x00000000); /*XXX*/
191 ram_mask(hwsq, timing[7], 0x00000000, 0x00000000); /*XXX*/
192 ram_mask(hwsq, timing[8], 0x00000000, 0x00000000); /*XXX*/
193 ram_mask(hwsq, timing[0], 0x00000000, 0x00000000); /*XXX*/
194 ram_mask(hwsq, timing[2], 0x00000000, 0x00000000); /*XXX*/
195 ram_mask(hwsq, timing[4], 0x00000000, 0x00000000); /*XXX*/
196 ram_mask(hwsq, timing[5], 0x00000000, 0x00000000); /*XXX*/
197
198 ram_mask(hwsq, timing[0], 0x00000000, 0x00000000); /*XXX*/
199
200#if QFX5800NVA0
201 ram_nuke(hwsq, 0x100e24);
202 ram_mask(hwsq, 0x100e24, 0x00000000, 0x00000000);
203 ram_nuke(hwsq, 0x100e20);
204 ram_mask(hwsq, 0x100e20, 0x00000000, 0x00000000);
205#endif
206
207 ram_mask(hwsq, mr[0], 0x100, 0x100);
208 ram_mask(hwsq, mr[0], 0x100, 0x000);
209
210 ram_setf(hwsq, 0x10, 0x01); /* enable fb */
211 ram_wait(hwsq, 0x00, 0x00); /* wait for fb enabled */
212 ram_wr32(hwsq, 0x611200, 0x00003330);
213 ram_wr32(hwsq, 0x002504, 0x00000000); /* un-block fifo */
214 return 0;
215}
216
217static int
218nv50_ram_prog(struct nouveau_fb *pfb)
219{
220 struct nouveau_device *device = nv_device(pfb);
221 struct nv50_ram *ram = (void *)pfb->ram;
222 struct nv50_ramseq *hwsq = &ram->hwsq;
223
224 ram_exec(hwsq, nouveau_boolopt(device->cfgopt, "NvMemExec", false));
225 return 0;
226}
227
228static void
229nv50_ram_tidy(struct nouveau_fb *pfb)
230{
231 struct nv50_ram *ram = (void *)pfb->ram;
232 struct nv50_ramseq *hwsq = &ram->hwsq;
233 ram_exec(hwsq, false);
234}
28 235
29void 236void
30__nv50_ram_put(struct nouveau_fb *pfb, struct nouveau_mem *mem) 237__nv50_ram_put(struct nouveau_fb *pfb, struct nouveau_mem *mem)
@@ -57,7 +264,7 @@ nv50_ram_put(struct nouveau_fb *pfb, struct nouveau_mem **pmem)
57 kfree(mem); 264 kfree(mem);
58} 265}
59 266
60static int 267int
61nv50_ram_get(struct nouveau_fb *pfb, u64 size, u32 align, u32 ncmin, 268nv50_ram_get(struct nouveau_fb *pfb, u64 size, u32 align, u32 ncmin,
62 u32 memtype, struct nouveau_mem **pmem) 269 u32 memtype, struct nouveau_mem **pmem)
63{ 270{
@@ -160,77 +367,114 @@ nv50_fb_vram_rblock(struct nouveau_fb *pfb, struct nouveau_ram *ram)
160 return rblock_size; 367 return rblock_size;
161} 368}
162 369
163static int 370int
164nv50_ram_create(struct nouveau_object *parent, struct nouveau_object *engine, 371nv50_ram_create_(struct nouveau_object *parent, struct nouveau_object *engine,
165 struct nouveau_oclass *oclass, void *data, u32 datasize, 372 struct nouveau_oclass *oclass, int length, void **pobject)
166 struct nouveau_object **pobject)
167{ 373{
168 struct nouveau_fb *pfb = nouveau_fb(parent);
169 struct nouveau_device *device = nv_device(pfb);
170 struct nouveau_bios *bios = nouveau_bios(device);
171 struct nouveau_ram *ram;
172 const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */ 374 const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */
173 const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */ 375 const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */
174 u32 size; 376 struct nouveau_bios *bios = nouveau_bios(parent);
377 struct nouveau_fb *pfb = nouveau_fb(parent);
378 struct nouveau_ram *ram;
175 int ret; 379 int ret;
176 380
177 ret = nouveau_ram_create(parent, engine, oclass, &ram); 381 ret = nouveau_ram_create_(parent, engine, oclass, length, pobject);
178 *pobject = nv_object(ram); 382 ram = *pobject;
179 if (ret) 383 if (ret)
180 return ret; 384 return ret;
181 385
182 ram->size = nv_rd32(pfb, 0x10020c); 386 ram->size = nv_rd32(pfb, 0x10020c);
183 ram->size = (ram->size & 0xffffff00) | 387 ram->size = (ram->size & 0xffffff00) | ((ram->size & 0x000000ff) << 32);
184 ((ram->size & 0x000000ff) << 32);
185
186 size = (ram->size >> 12) - rsvd_head - rsvd_tail;
187 switch (device->chipset) {
188 case 0xaa:
189 case 0xac:
190 case 0xaf: /* IGPs, no reordering, no real VRAM */
191 ret = nouveau_mm_init(&pfb->vram, rsvd_head, size, 1);
192 if (ret)
193 return ret;
194 388
195 ram->type = NV_MEM_TYPE_STOLEN; 389 switch (nv_rd32(pfb, 0x100714) & 0x00000007) {
196 ram->stolen = (u64)nv_rd32(pfb, 0x100e10) << 12; 390 case 0: ram->type = NV_MEM_TYPE_DDR1; break;
391 case 1:
392 if (nouveau_fb_bios_memtype(bios) == NV_MEM_TYPE_DDR3)
393 ram->type = NV_MEM_TYPE_DDR3;
394 else
395 ram->type = NV_MEM_TYPE_DDR2;
197 break; 396 break;
397 case 2: ram->type = NV_MEM_TYPE_GDDR3; break;
398 case 3: ram->type = NV_MEM_TYPE_GDDR4; break;
399 case 4: ram->type = NV_MEM_TYPE_GDDR5; break;
198 default: 400 default:
199 switch (nv_rd32(pfb, 0x100714) & 0x00000007) {
200 case 0: ram->type = NV_MEM_TYPE_DDR1; break;
201 case 1:
202 if (nouveau_fb_bios_memtype(bios) == NV_MEM_TYPE_DDR3)
203 ram->type = NV_MEM_TYPE_DDR3;
204 else
205 ram->type = NV_MEM_TYPE_DDR2;
206 break;
207 case 2: ram->type = NV_MEM_TYPE_GDDR3; break;
208 case 3: ram->type = NV_MEM_TYPE_GDDR4; break;
209 case 4: ram->type = NV_MEM_TYPE_GDDR5; break;
210 default:
211 break;
212 }
213
214 ret = nouveau_mm_init(&pfb->vram, rsvd_head, size,
215 nv50_fb_vram_rblock(pfb, ram) >> 12);
216 if (ret)
217 return ret;
218
219 ram->ranks = (nv_rd32(pfb, 0x100200) & 0x4) ? 2 : 1;
220 ram->tags = nv_rd32(pfb, 0x100320);
221 break; 401 break;
222 } 402 }
223 403
404 ret = nouveau_mm_init(&pfb->vram, rsvd_head, (ram->size >> 12) -
405 (rsvd_head + rsvd_tail),
406 nv50_fb_vram_rblock(pfb, ram) >> 12);
407 if (ret)
408 return ret;
409
410 ram->ranks = (nv_rd32(pfb, 0x100200) & 0x4) ? 2 : 1;
411 ram->tags = nv_rd32(pfb, 0x100320);
224 ram->get = nv50_ram_get; 412 ram->get = nv50_ram_get;
225 ram->put = nv50_ram_put; 413 ram->put = nv50_ram_put;
226 return 0; 414 return 0;
227} 415}
228 416
417static int
418nv50_ram_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
419 struct nouveau_oclass *oclass, void *data, u32 datasize,
420 struct nouveau_object **pobject)
421{
422 struct nv50_ram *ram;
423 int ret, i;
424
425 ret = nv50_ram_create(parent, engine, oclass, &ram);
426 *pobject = nv_object(ram);
427 if (ret)
428 return ret;
429
430 switch (ram->base.type) {
431 case NV_MEM_TYPE_DDR2:
432 case NV_MEM_TYPE_GDDR3:
433 ram->base.calc = nv50_ram_calc;
434 ram->base.prog = nv50_ram_prog;
435 ram->base.tidy = nv50_ram_tidy;
436 break;
437 default:
438 nv_warn(ram, "reclocking of this ram type unsupported\n");
439 return 0;
440 }
441
442 ram->hwsq.r_0x002504 = hwsq_reg(0x002504);
443 ram->hwsq.r_0x00c040 = hwsq_reg(0x00c040);
444 ram->hwsq.r_0x004008 = hwsq_reg(0x004008);
445 ram->hwsq.r_0x00400c = hwsq_reg(0x00400c);
446 ram->hwsq.r_0x100210 = hwsq_reg(0x100210);
447 ram->hwsq.r_0x1002d0 = hwsq_reg(0x1002d0);
448 ram->hwsq.r_0x1002d4 = hwsq_reg(0x1002d4);
449 ram->hwsq.r_0x1002dc = hwsq_reg(0x1002dc);
450 for (i = 0; i < 8; i++)
451 ram->hwsq.r_0x100da0[i] = hwsq_reg(0x100da0 + (i * 0x04));
452 ram->hwsq.r_0x100e20 = hwsq_reg(0x100e20);
453 ram->hwsq.r_0x100e24 = hwsq_reg(0x100e24);
454 ram->hwsq.r_0x611200 = hwsq_reg(0x611200);
455
456 for (i = 0; i < 9; i++)
457 ram->hwsq.r_timing[i] = hwsq_reg(0x100220 + (i * 0x04));
458
459 if (ram->base.ranks > 1) {
460 ram->hwsq.r_mr[0] = hwsq_reg2(0x1002c0, 0x1002c8);
461 ram->hwsq.r_mr[1] = hwsq_reg2(0x1002c4, 0x1002cc);
462 ram->hwsq.r_mr[2] = hwsq_reg2(0x1002e0, 0x1002e8);
463 ram->hwsq.r_mr[3] = hwsq_reg2(0x1002e4, 0x1002ec);
464 } else {
465 ram->hwsq.r_mr[0] = hwsq_reg(0x1002c0);
466 ram->hwsq.r_mr[1] = hwsq_reg(0x1002c4);
467 ram->hwsq.r_mr[2] = hwsq_reg(0x1002e0);
468 ram->hwsq.r_mr[3] = hwsq_reg(0x1002e4);
469 }
470
471 return 0;
472}
473
229struct nouveau_oclass 474struct nouveau_oclass
230nv50_ram_oclass = { 475nv50_ram_oclass = {
231 .handle = 0,
232 .ofuncs = &(struct nouveau_ofuncs) { 476 .ofuncs = &(struct nouveau_ofuncs) {
233 .ctor = nv50_ram_create, 477 .ctor = nv50_ram_ctor,
234 .dtor = _nouveau_ram_dtor, 478 .dtor = _nouveau_ram_dtor,
235 .init = _nouveau_ram_init, 479 .init = _nouveau_ram_init,
236 .fini = _nouveau_ram_fini, 480 .fini = _nouveau_ram_fini,
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnva3.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnva3.c
new file mode 100644
index 000000000000..f6292cd9207c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnva3.c
@@ -0,0 +1,447 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/bios.h>
26#include <subdev/bios/bit.h>
27#include <subdev/bios/pll.h>
28#include <subdev/bios/rammap.h>
29#include <subdev/bios/timing.h>
30
31#include <subdev/clock/nva3.h>
32#include <subdev/clock/pll.h>
33
34#include <core/option.h>
35
36#include "ramfuc.h"
37
38#include "nv50.h"
39
40struct nva3_ramfuc {
41 struct ramfuc base;
42 struct ramfuc_reg r_0x004000;
43 struct ramfuc_reg r_0x004004;
44 struct ramfuc_reg r_0x004018;
45 struct ramfuc_reg r_0x004128;
46 struct ramfuc_reg r_0x004168;
47 struct ramfuc_reg r_0x100200;
48 struct ramfuc_reg r_0x100210;
49 struct ramfuc_reg r_0x100220[9];
50 struct ramfuc_reg r_0x1002d0;
51 struct ramfuc_reg r_0x1002d4;
52 struct ramfuc_reg r_0x1002dc;
53 struct ramfuc_reg r_0x10053c;
54 struct ramfuc_reg r_0x1005a0;
55 struct ramfuc_reg r_0x1005a4;
56 struct ramfuc_reg r_0x100714;
57 struct ramfuc_reg r_0x100718;
58 struct ramfuc_reg r_0x10071c;
59 struct ramfuc_reg r_0x100760;
60 struct ramfuc_reg r_0x1007a0;
61 struct ramfuc_reg r_0x1007e0;
62 struct ramfuc_reg r_0x10f804;
63 struct ramfuc_reg r_0x1110e0;
64 struct ramfuc_reg r_0x111100;
65 struct ramfuc_reg r_0x111104;
66 struct ramfuc_reg r_0x611200;
67 struct ramfuc_reg r_mr[4];
68};
69
70struct nva3_ram {
71 struct nouveau_ram base;
72 struct nva3_ramfuc fuc;
73};
74
75static int
76nva3_ram_calc(struct nouveau_fb *pfb, u32 freq)
77{
78 struct nouveau_bios *bios = nouveau_bios(pfb);
79 struct nva3_ram *ram = (void *)pfb->ram;
80 struct nva3_ramfuc *fuc = &ram->fuc;
81 struct nva3_clock_info mclk;
82 struct bit_entry M;
83 u8 ver, cnt, strap;
84 u32 data;
85 struct {
86 u32 data;
87 u8 size;
88 } rammap, ramcfg, timing;
89 u32 r004018, r100760, ctrl;
90 u32 unk714, unk718, unk71c;
91 int ret;
92
93 /* lookup memory config data relevant to the target frequency */
94 rammap.data = nvbios_rammap_match(bios, freq / 1000, &ver, &rammap.size,
95 &cnt, &ramcfg.size);
96 if (!rammap.data || ver != 0x10 || rammap.size < 0x0e) {
97 nv_error(pfb, "invalid/missing rammap entry\n");
98 return -EINVAL;
99 }
100
101 /* locate specific data set for the attached memory */
102 if (bit_entry(bios, 'M', &M) || M.version != 2 || M.length < 3) {
103 nv_error(pfb, "invalid/missing memory table\n");
104 return -EINVAL;
105 }
106
107 strap = (nv_rd32(pfb, 0x101000) & 0x0000003c) >> 2;
108 data = nv_ro16(bios, M.offset + 1);
109 if (data)
110 strap = nv_ro08(bios, data + strap);
111
112 if (strap >= cnt) {
113 nv_error(pfb, "invalid ramcfg strap\n");
114 return -EINVAL;
115 }
116
117 ramcfg.data = rammap.data + rammap.size + (strap * ramcfg.size);
118 if (!ramcfg.data || ver != 0x10 || ramcfg.size < 0x0e) {
119 nv_error(pfb, "invalid/missing ramcfg entry\n");
120 return -EINVAL;
121 }
122
123 /* lookup memory timings, if bios says they're present */
124 strap = nv_ro08(bios, ramcfg.data + 0x01);
125 if (strap != 0xff) {
126 timing.data = nvbios_timing_entry(bios, strap, &ver,
127 &timing.size);
128 if (!timing.data || ver != 0x10 || timing.size < 0x19) {
129 nv_error(pfb, "invalid/missing timing entry\n");
130 return -EINVAL;
131 }
132 } else {
133 timing.data = 0;
134 }
135
136 ret = nva3_clock_info(nouveau_clock(pfb), 0x12, 0x4000, freq, &mclk);
137 if (ret < 0) {
138 nv_error(pfb, "failed mclk calculation\n");
139 return ret;
140 }
141
142 ret = ram_init(fuc, pfb);
143 if (ret)
144 return ret;
145
146 /* XXX: where the fuck does 750MHz come from? */
147 if (freq <= 750000) {
148 r004018 = 0x10000000;
149 r100760 = 0x22222222;
150 } else {
151 r004018 = 0x00000000;
152 r100760 = 0x00000000;
153 }
154
155 ctrl = ram_rd32(fuc, 0x004000);
156 if (ctrl & 0x00000008) {
157 if (mclk.pll) {
158 ram_mask(fuc, 0x004128, 0x00000101, 0x00000101);
159 ram_wr32(fuc, 0x004004, mclk.pll);
160 ram_wr32(fuc, 0x004000, (ctrl |= 0x00000001));
161 ram_wr32(fuc, 0x004000, (ctrl &= 0xffffffef));
162 ram_wait(fuc, 0x004000, 0x00020000, 0x00020000, 64000);
163 ram_wr32(fuc, 0x004000, (ctrl |= 0x00000010));
164 ram_wr32(fuc, 0x004018, 0x00005000 | r004018);
165 ram_wr32(fuc, 0x004000, (ctrl |= 0x00000004));
166 }
167 } else {
168 u32 ssel = 0x00000101;
169 if (mclk.clk)
170 ssel |= mclk.clk;
171 else
172 ssel |= 0x00080000; /* 324MHz, shouldn't matter... */
173 ram_mask(fuc, 0x004168, 0x003f3141, ctrl);
174 }
175
176 if ( (nv_ro08(bios, ramcfg.data + 0x02) & 0x10)) {
177 ram_mask(fuc, 0x111104, 0x00000600, 0x00000000);
178 } else {
179 ram_mask(fuc, 0x111100, 0x40000000, 0x40000000);
180 ram_mask(fuc, 0x111104, 0x00000180, 0x00000000);
181 }
182
183 if (!(nv_ro08(bios, rammap.data + 0x04) & 0x02))
184 ram_mask(fuc, 0x100200, 0x00000800, 0x00000000);
185 ram_wr32(fuc, 0x611200, 0x00003300);
186 if (!(nv_ro08(bios, ramcfg.data + 0x02) & 0x10))
187 ram_wr32(fuc, 0x111100, 0x4c020000); /*XXX*/
188
189 ram_wr32(fuc, 0x1002d4, 0x00000001);
190 ram_wr32(fuc, 0x1002d0, 0x00000001);
191 ram_wr32(fuc, 0x1002d0, 0x00000001);
192 ram_wr32(fuc, 0x100210, 0x00000000);
193 ram_wr32(fuc, 0x1002dc, 0x00000001);
194 ram_nsec(fuc, 2000);
195
196 ctrl = ram_rd32(fuc, 0x004000);
197 if (!(ctrl & 0x00000008) && mclk.pll) {
198 ram_wr32(fuc, 0x004000, (ctrl |= 0x00000008));
199 ram_mask(fuc, 0x1110e0, 0x00088000, 0x00088000);
200 ram_wr32(fuc, 0x004018, 0x00001000);
201 ram_wr32(fuc, 0x004000, (ctrl &= ~0x00000001));
202 ram_wr32(fuc, 0x004004, mclk.pll);
203 ram_wr32(fuc, 0x004000, (ctrl |= 0x00000001));
204 udelay(64);
205 ram_wr32(fuc, 0x004018, 0x00005000 | r004018);
206 udelay(20);
207 } else
208 if (!mclk.pll) {
209 ram_mask(fuc, 0x004168, 0x003f3040, mclk.clk);
210 ram_wr32(fuc, 0x004000, (ctrl |= 0x00000008));
211 ram_mask(fuc, 0x1110e0, 0x00088000, 0x00088000);
212 ram_wr32(fuc, 0x004018, 0x0000d000 | r004018);
213 }
214
215 if ( (nv_ro08(bios, rammap.data + 0x04) & 0x08)) {
216 u32 unk5a0 = (nv_ro16(bios, ramcfg.data + 0x05) << 8) |
217 nv_ro08(bios, ramcfg.data + 0x05);
218 u32 unk5a4 = (nv_ro16(bios, ramcfg.data + 0x07));
219 u32 unk804 = (nv_ro08(bios, ramcfg.data + 0x09) & 0xf0) << 16 |
220 (nv_ro08(bios, ramcfg.data + 0x03) & 0x0f) << 16 |
221 (nv_ro08(bios, ramcfg.data + 0x09) & 0x0f) |
222 0x80000000;
223 ram_wr32(fuc, 0x1005a0, unk5a0);
224 ram_wr32(fuc, 0x1005a4, unk5a4);
225 ram_wr32(fuc, 0x10f804, unk804);
226 ram_mask(fuc, 0x10053c, 0x00001000, 0x00000000);
227 } else {
228 ram_mask(fuc, 0x10053c, 0x00001000, 0x00001000);
229 ram_mask(fuc, 0x10f804, 0x80000000, 0x00000000);
230 ram_mask(fuc, 0x100760, 0x22222222, r100760);
231 ram_mask(fuc, 0x1007a0, 0x22222222, r100760);
232 ram_mask(fuc, 0x1007e0, 0x22222222, r100760);
233 }
234
235 if (mclk.pll) {
236 ram_mask(fuc, 0x1110e0, 0x00088000, 0x00011000);
237 ram_wr32(fuc, 0x004000, (ctrl &= ~0x00000008));
238 }
239
240 /*XXX: LEAVE */
241 ram_wr32(fuc, 0x1002dc, 0x00000000);
242 ram_wr32(fuc, 0x1002d4, 0x00000001);
243 ram_wr32(fuc, 0x100210, 0x80000000);
244 ram_nsec(fuc, 1000);
245 ram_nsec(fuc, 1000);
246
247 ram_mask(fuc, mr[2], 0x00000000, 0x00000000);
248 ram_nsec(fuc, 1000);
249 ram_nuke(fuc, mr[0]);
250 ram_mask(fuc, mr[0], 0x00000000, 0x00000000);
251 ram_nsec(fuc, 1000);
252
253 ram_mask(fuc, 0x100220[3], 0x00000000, 0x00000000);
254 ram_mask(fuc, 0x100220[1], 0x00000000, 0x00000000);
255 ram_mask(fuc, 0x100220[6], 0x00000000, 0x00000000);
256 ram_mask(fuc, 0x100220[7], 0x00000000, 0x00000000);
257 ram_mask(fuc, 0x100220[2], 0x00000000, 0x00000000);
258 ram_mask(fuc, 0x100220[4], 0x00000000, 0x00000000);
259 ram_mask(fuc, 0x100220[5], 0x00000000, 0x00000000);
260 ram_mask(fuc, 0x100220[0], 0x00000000, 0x00000000);
261 ram_mask(fuc, 0x100220[8], 0x00000000, 0x00000000);
262
263 data = (nv_ro08(bios, ramcfg.data + 0x02) & 0x08) ? 0x00000000 : 0x00001000;
264 ram_mask(fuc, 0x100200, 0x00001000, data);
265
266 unk714 = ram_rd32(fuc, 0x100714) & ~0xf0000010;
267 unk718 = ram_rd32(fuc, 0x100718) & ~0x00000100;
268 unk71c = ram_rd32(fuc, 0x10071c) & ~0x00000100;
269 if ( (nv_ro08(bios, ramcfg.data + 0x02) & 0x20))
270 unk714 |= 0xf0000000;
271 if (!(nv_ro08(bios, ramcfg.data + 0x02) & 0x04))
272 unk714 |= 0x00000010;
273 ram_wr32(fuc, 0x100714, unk714);
274
275 if (nv_ro08(bios, ramcfg.data + 0x02) & 0x01)
276 unk71c |= 0x00000100;
277 ram_wr32(fuc, 0x10071c, unk71c);
278
279 if (nv_ro08(bios, ramcfg.data + 0x02) & 0x02)
280 unk718 |= 0x00000100;
281 ram_wr32(fuc, 0x100718, unk718);
282
283 if (nv_ro08(bios, ramcfg.data + 0x02) & 0x10)
284 ram_wr32(fuc, 0x111100, 0x48000000); /*XXX*/
285
286 ram_mask(fuc, mr[0], 0x100, 0x100);
287 ram_nsec(fuc, 1000);
288 ram_mask(fuc, mr[0], 0x100, 0x000);
289 ram_nsec(fuc, 1000);
290
291 ram_nsec(fuc, 2000);
292 ram_nsec(fuc, 12000);
293
294 ram_wr32(fuc, 0x611200, 0x00003330);
295 if ( (nv_ro08(bios, rammap.data + 0x04) & 0x02))
296 ram_mask(fuc, 0x100200, 0x00000800, 0x00000800);
297 if ( (nv_ro08(bios, ramcfg.data + 0x02) & 0x10)) {
298 ram_mask(fuc, 0x111104, 0x00000180, 0x00000180);
299 ram_mask(fuc, 0x111100, 0x40000000, 0x00000000);
300 } else {
301 ram_mask(fuc, 0x111104, 0x00000600, 0x00000600);
302 }
303
304 if (mclk.pll) {
305 ram_mask(fuc, 0x004168, 0x00000001, 0x00000000);
306 ram_mask(fuc, 0x004168, 0x00000100, 0x00000000);
307 } else {
308 ram_mask(fuc, 0x004000, 0x00000001, 0x00000000);
309 ram_mask(fuc, 0x004128, 0x00000001, 0x00000000);
310 ram_mask(fuc, 0x004128, 0x00000100, 0x00000000);
311 }
312
313 return 0;
314}
315
316static int
317nva3_ram_prog(struct nouveau_fb *pfb)
318{
319 struct nouveau_device *device = nv_device(pfb);
320 struct nva3_ram *ram = (void *)pfb->ram;
321 struct nva3_ramfuc *fuc = &ram->fuc;
322 ram_exec(fuc, nouveau_boolopt(device->cfgopt, "NvMemExec", false));
323 return 0;
324}
325
326static void
327nva3_ram_tidy(struct nouveau_fb *pfb)
328{
329 struct nva3_ram *ram = (void *)pfb->ram;
330 struct nva3_ramfuc *fuc = &ram->fuc;
331 ram_exec(fuc, false);
332}
333
334static int
335nva3_ram_init(struct nouveau_object *object)
336{
337 struct nouveau_fb *pfb = (void *)object->parent;
338 struct nva3_ram *ram = (void *)object;
339 int ret, i;
340
341 ret = nouveau_ram_init(&ram->base);
342 if (ret)
343 return ret;
344
345 /* prepare for ddr link training, and load training patterns */
346 switch (ram->base.type) {
347 case NV_MEM_TYPE_DDR3: {
348 static const u32 pattern[16] = {
349 0xaaaaaaaa, 0xcccccccc, 0xdddddddd, 0xeeeeeeee,
350 0x00000000, 0x11111111, 0x44444444, 0xdddddddd,
351 0x33333333, 0x55555555, 0x77777777, 0x66666666,
352 0x99999999, 0x88888888, 0xeeeeeeee, 0xbbbbbbbb,
353 };
354
355 nv_wr32(pfb, 0x100538, 0x10001ff6); /*XXX*/
356 nv_wr32(pfb, 0x1005a8, 0x0000ffff);
357 nv_mask(pfb, 0x10f800, 0x00000001, 0x00000001);
358 for (i = 0; i < 0x30; i++) {
359 nv_wr32(pfb, 0x10f8c0, (i << 8) | i);
360 nv_wr32(pfb, 0x10f8e0, (i << 8) | i);
361 nv_wr32(pfb, 0x10f900, pattern[i % 16]);
362 nv_wr32(pfb, 0x10f920, pattern[i % 16]);
363 }
364 }
365 break;
366 default:
367 break;
368 }
369
370 return 0;
371}
372
373static int
374nva3_ram_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
375 struct nouveau_oclass *oclass, void *data, u32 datasize,
376 struct nouveau_object **pobject)
377{
378 struct nva3_ram *ram;
379 int ret, i;
380
381 ret = nv50_ram_create(parent, engine, oclass, &ram);
382 *pobject = nv_object(ram);
383 if (ret)
384 return ret;
385
386 switch (ram->base.type) {
387 case NV_MEM_TYPE_DDR3:
388 ram->base.calc = nva3_ram_calc;
389 ram->base.prog = nva3_ram_prog;
390 ram->base.tidy = nva3_ram_tidy;
391 break;
392 default:
393 nv_warn(ram, "reclocking of this ram type unsupported\n");
394 return 0;
395 }
396
397 ram->fuc.r_0x004000 = ramfuc_reg(0x004000);
398 ram->fuc.r_0x004004 = ramfuc_reg(0x004004);
399 ram->fuc.r_0x004018 = ramfuc_reg(0x004018);
400 ram->fuc.r_0x004128 = ramfuc_reg(0x004128);
401 ram->fuc.r_0x004168 = ramfuc_reg(0x004168);
402 ram->fuc.r_0x100200 = ramfuc_reg(0x100200);
403 ram->fuc.r_0x100210 = ramfuc_reg(0x100210);
404 for (i = 0; i < 9; i++)
405 ram->fuc.r_0x100220[i] = ramfuc_reg(0x100220 + (i * 4));
406 ram->fuc.r_0x1002d0 = ramfuc_reg(0x1002d0);
407 ram->fuc.r_0x1002d4 = ramfuc_reg(0x1002d4);
408 ram->fuc.r_0x1002dc = ramfuc_reg(0x1002dc);
409 ram->fuc.r_0x10053c = ramfuc_reg(0x10053c);
410 ram->fuc.r_0x1005a0 = ramfuc_reg(0x1005a0);
411 ram->fuc.r_0x1005a4 = ramfuc_reg(0x1005a4);
412 ram->fuc.r_0x100714 = ramfuc_reg(0x100714);
413 ram->fuc.r_0x100718 = ramfuc_reg(0x100718);
414 ram->fuc.r_0x10071c = ramfuc_reg(0x10071c);
415 ram->fuc.r_0x100760 = ramfuc_reg(0x100760);
416 ram->fuc.r_0x1007a0 = ramfuc_reg(0x1007a0);
417 ram->fuc.r_0x1007e0 = ramfuc_reg(0x1007e0);
418 ram->fuc.r_0x10f804 = ramfuc_reg(0x10f804);
419 ram->fuc.r_0x1110e0 = ramfuc_reg(0x1110e0);
420 ram->fuc.r_0x111100 = ramfuc_reg(0x111100);
421 ram->fuc.r_0x111104 = ramfuc_reg(0x111104);
422 ram->fuc.r_0x611200 = ramfuc_reg(0x611200);
423
424 if (ram->base.ranks > 1) {
425 ram->fuc.r_mr[0] = ramfuc_reg2(0x1002c0, 0x1002c8);
426 ram->fuc.r_mr[1] = ramfuc_reg2(0x1002c4, 0x1002cc);
427 ram->fuc.r_mr[2] = ramfuc_reg2(0x1002e0, 0x1002e8);
428 ram->fuc.r_mr[3] = ramfuc_reg2(0x1002e4, 0x1002ec);
429 } else {
430 ram->fuc.r_mr[0] = ramfuc_reg(0x1002c0);
431 ram->fuc.r_mr[1] = ramfuc_reg(0x1002c4);
432 ram->fuc.r_mr[2] = ramfuc_reg(0x1002e0);
433 ram->fuc.r_mr[3] = ramfuc_reg(0x1002e4);
434 }
435
436 return 0;
437}
438
439struct nouveau_oclass
440nva3_ram_oclass = {
441 .ofuncs = &(struct nouveau_ofuncs) {
442 .ctor = nva3_ram_ctor,
443 .dtor = _nouveau_ram_dtor,
444 .init = nva3_ram_init,
445 .fini = _nouveau_ram_fini,
446 },
447};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvaa.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvaa.c
new file mode 100644
index 000000000000..00f2ca7e44a5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvaa.c
@@ -0,0 +1,66 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "nv50.h"
26
27static int
28nvaa_ram_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
29 struct nouveau_oclass *oclass, void *data, u32 datasize,
30 struct nouveau_object **pobject)
31{
32 const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */
33 const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */
34 struct nouveau_fb *pfb = nouveau_fb(parent);
35 struct nouveau_ram *ram;
36 int ret;
37
38 ret = nouveau_ram_create(parent, engine, oclass, &ram);
39 *pobject = nv_object(ram);
40 if (ret)
41 return ret;
42
43 ram->size = nv_rd32(pfb, 0x10020c);
44 ram->size = (ram->size & 0xffffff00) | ((ram->size & 0x000000ff) << 32);
45
46 ret = nouveau_mm_init(&pfb->vram, rsvd_head, (ram->size >> 12) -
47 (rsvd_head + rsvd_tail), 1);
48 if (ret)
49 return ret;
50
51 ram->type = NV_MEM_TYPE_STOLEN;
52 ram->stolen = (u64)nv_rd32(pfb, 0x100e10) << 12;
53 ram->get = nv50_ram_get;
54 ram->put = nv50_ram_put;
55 return 0;
56}
57
58struct nouveau_oclass
59nvaa_ram_oclass = {
60 .ofuncs = &(struct nouveau_ofuncs) {
61 .ctor = nvaa_ram_ctor,
62 .dtor = _nouveau_ram_dtor,
63 .init = _nouveau_ram_init,
64 .fini = _nouveau_ram_fini,
65 },
66};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c
index cf97c4de4a6b..f464547c6bab 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c
@@ -23,9 +23,414 @@
23 */ 23 */
24 24
25#include <subdev/bios.h> 25#include <subdev/bios.h>
26#include <subdev/bios/bit.h>
27#include <subdev/bios/pll.h>
28#include <subdev/bios/rammap.h>
29#include <subdev/bios/timing.h>
26#include <subdev/ltcg.h> 30#include <subdev/ltcg.h>
27 31
28#include "priv.h" 32#include <subdev/clock.h>
33#include <subdev/clock/pll.h>
34
35#include <core/option.h>
36
37#include "ramfuc.h"
38
39#include "nvc0.h"
40
41struct nvc0_ramfuc {
42 struct ramfuc base;
43
44 struct ramfuc_reg r_0x10fe20;
45 struct ramfuc_reg r_0x10fe24;
46 struct ramfuc_reg r_0x137320;
47 struct ramfuc_reg r_0x137330;
48
49 struct ramfuc_reg r_0x132000;
50 struct ramfuc_reg r_0x132004;
51 struct ramfuc_reg r_0x132100;
52
53 struct ramfuc_reg r_0x137390;
54
55 struct ramfuc_reg r_0x10f290;
56 struct ramfuc_reg r_0x10f294;
57 struct ramfuc_reg r_0x10f298;
58 struct ramfuc_reg r_0x10f29c;
59 struct ramfuc_reg r_0x10f2a0;
60
61 struct ramfuc_reg r_0x10f300;
62 struct ramfuc_reg r_0x10f338;
63 struct ramfuc_reg r_0x10f340;
64 struct ramfuc_reg r_0x10f344;
65 struct ramfuc_reg r_0x10f348;
66
67 struct ramfuc_reg r_0x10f910;
68 struct ramfuc_reg r_0x10f914;
69
70 struct ramfuc_reg r_0x100b0c;
71 struct ramfuc_reg r_0x10f050;
72 struct ramfuc_reg r_0x10f090;
73 struct ramfuc_reg r_0x10f200;
74 struct ramfuc_reg r_0x10f210;
75 struct ramfuc_reg r_0x10f310;
76 struct ramfuc_reg r_0x10f314;
77 struct ramfuc_reg r_0x10f610;
78 struct ramfuc_reg r_0x10f614;
79 struct ramfuc_reg r_0x10f800;
80 struct ramfuc_reg r_0x10f808;
81 struct ramfuc_reg r_0x10f824;
82 struct ramfuc_reg r_0x10f830;
83 struct ramfuc_reg r_0x10f988;
84 struct ramfuc_reg r_0x10f98c;
85 struct ramfuc_reg r_0x10f990;
86 struct ramfuc_reg r_0x10f998;
87 struct ramfuc_reg r_0x10f9b0;
88 struct ramfuc_reg r_0x10f9b4;
89 struct ramfuc_reg r_0x10fb04;
90 struct ramfuc_reg r_0x10fb08;
91 struct ramfuc_reg r_0x137300;
92 struct ramfuc_reg r_0x137310;
93 struct ramfuc_reg r_0x137360;
94 struct ramfuc_reg r_0x1373ec;
95 struct ramfuc_reg r_0x1373f0;
96 struct ramfuc_reg r_0x1373f8;
97
98 struct ramfuc_reg r_0x61c140;
99 struct ramfuc_reg r_0x611200;
100
101 struct ramfuc_reg r_0x13d8f4;
102};
103
104struct nvc0_ram {
105 struct nouveau_ram base;
106 struct nvc0_ramfuc fuc;
107 struct nvbios_pll refpll;
108 struct nvbios_pll mempll;
109};
110
111static void
112nvc0_ram_train(struct nvc0_ramfuc *fuc, u32 magic)
113{
114 struct nvc0_ram *ram = container_of(fuc, typeof(*ram), fuc);
115 struct nouveau_fb *pfb = nouveau_fb(ram);
116 u32 part = nv_rd32(pfb, 0x022438), i;
117 u32 mask = nv_rd32(pfb, 0x022554);
118 u32 addr = 0x110974;
119
120 ram_wr32(fuc, 0x10f910, magic);
121 ram_wr32(fuc, 0x10f914, magic);
122
123 for (i = 0; (magic & 0x80000000) && i < part; addr += 0x1000, i++) {
124 if (mask & (1 << i))
125 continue;
126 ram_wait(fuc, addr, 0x0000000f, 0x00000000, 500000);
127 }
128}
129
130static int
131nvc0_ram_calc(struct nouveau_fb *pfb, u32 freq)
132{
133 struct nouveau_clock *clk = nouveau_clock(pfb);
134 struct nouveau_bios *bios = nouveau_bios(pfb);
135 struct nvc0_ram *ram = (void *)pfb->ram;
136 struct nvc0_ramfuc *fuc = &ram->fuc;
137 struct bit_entry M;
138 u8 ver, cnt, strap;
139 u32 data;
140 struct {
141 u32 data;
142 u8 size;
143 } rammap, ramcfg, timing;
144 int ref, div, out;
145 int from, mode;
146 int N1, M1, P;
147 int ret;
148
149 /* lookup memory config data relevant to the target frequency */
150 rammap.data = nvbios_rammap_match(bios, freq / 1000, &ver, &rammap.size,
151 &cnt, &ramcfg.size);
152 if (!rammap.data || ver != 0x10 || rammap.size < 0x0e) {
153 nv_error(pfb, "invalid/missing rammap entry\n");
154 return -EINVAL;
155 }
156
157 /* locate specific data set for the attached memory */
158 if (bit_entry(bios, 'M', &M) || M.version != 2 || M.length < 3) {
159 nv_error(pfb, "invalid/missing memory table\n");
160 return -EINVAL;
161 }
162
163 strap = (nv_rd32(pfb, 0x101000) & 0x0000003c) >> 2;
164 data = nv_ro16(bios, M.offset + 1);
165 if (data)
166 strap = nv_ro08(bios, data + strap);
167
168 if (strap >= cnt) {
169 nv_error(pfb, "invalid ramcfg strap\n");
170 return -EINVAL;
171 }
172
173 ramcfg.data = rammap.data + rammap.size + (strap * ramcfg.size);
174 if (!ramcfg.data || ver != 0x10 || ramcfg.size < 0x0e) {
175 nv_error(pfb, "invalid/missing ramcfg entry\n");
176 return -EINVAL;
177 }
178
179 /* lookup memory timings, if bios says they're present */
180 strap = nv_ro08(bios, ramcfg.data + 0x01);
181 if (strap != 0xff) {
182 timing.data = nvbios_timing_entry(bios, strap, &ver,
183 &timing.size);
184 if (!timing.data || ver != 0x10 || timing.size < 0x19) {
185 nv_error(pfb, "invalid/missing timing entry\n");
186 return -EINVAL;
187 }
188 } else {
189 timing.data = 0;
190 }
191
192 ret = ram_init(fuc, pfb);
193 if (ret)
194 return ret;
195
196 /* determine current mclk configuration */
197 from = !!(ram_rd32(fuc, 0x1373f0) & 0x00000002); /*XXX: ok? */
198
199 /* determine target mclk configuration */
200 if (!(ram_rd32(fuc, 0x137300) & 0x00000100))
201 ref = clk->read(clk, nv_clk_src_sppll0);
202 else
203 ref = clk->read(clk, nv_clk_src_sppll1);
204 div = max(min((ref * 2) / freq, (u32)65), (u32)2) - 2;
205 out = (ref * 2) / (div + 2);
206 mode = freq != out;
207
208 ram_mask(fuc, 0x137360, 0x00000002, 0x00000000);
209
210 if ((ram_rd32(fuc, 0x132000) & 0x00000002) || 0 /*XXX*/) {
211 ram_nuke(fuc, 0x132000);
212 ram_mask(fuc, 0x132000, 0x00000002, 0x00000002);
213 ram_mask(fuc, 0x132000, 0x00000002, 0x00000000);
214 }
215
216 if (mode == 1) {
217 ram_nuke(fuc, 0x10fe20);
218 ram_mask(fuc, 0x10fe20, 0x00000002, 0x00000002);
219 ram_mask(fuc, 0x10fe20, 0x00000002, 0x00000000);
220 }
221
222// 0x00020034 // 0x0000000a
223 ram_wr32(fuc, 0x132100, 0x00000001);
224
225 if (mode == 1 && from == 0) {
226 /* calculate refpll */
227 ret = nva3_pll_calc(nv_subdev(pfb), &ram->refpll,
228 ram->mempll.refclk, &N1, NULL, &M1, &P);
229 if (ret <= 0) {
230 nv_error(pfb, "unable to calc refpll\n");
231 return ret ? ret : -ERANGE;
232 }
233
234 ram_wr32(fuc, 0x10fe20, 0x20010000);
235 ram_wr32(fuc, 0x137320, 0x00000003);
236 ram_wr32(fuc, 0x137330, 0x81200006);
237 ram_wr32(fuc, 0x10fe24, (P << 16) | (N1 << 8) | M1);
238 ram_wr32(fuc, 0x10fe20, 0x20010001);
239 ram_wait(fuc, 0x137390, 0x00020000, 0x00020000, 64000);
240
241 /* calculate mempll */
242 ret = nva3_pll_calc(nv_subdev(pfb), &ram->mempll, freq,
243 &N1, NULL, &M1, &P);
244 if (ret <= 0) {
245 nv_error(pfb, "unable to calc refpll\n");
246 return ret ? ret : -ERANGE;
247 }
248
249 ram_wr32(fuc, 0x10fe20, 0x20010005);
250 ram_wr32(fuc, 0x132004, (P << 16) | (N1 << 8) | M1);
251 ram_wr32(fuc, 0x132000, 0x18010101);
252 ram_wait(fuc, 0x137390, 0x00000002, 0x00000002, 64000);
253 } else
254 if (mode == 0) {
255 ram_wr32(fuc, 0x137300, 0x00000003);
256 }
257
258 if (from == 0) {
259 ram_nuke(fuc, 0x10fb04);
260 ram_mask(fuc, 0x10fb04, 0x0000ffff, 0x00000000);
261 ram_nuke(fuc, 0x10fb08);
262 ram_mask(fuc, 0x10fb08, 0x0000ffff, 0x00000000);
263 ram_wr32(fuc, 0x10f988, 0x2004ff00);
264 ram_wr32(fuc, 0x10f98c, 0x003fc040);
265 ram_wr32(fuc, 0x10f990, 0x20012001);
266 ram_wr32(fuc, 0x10f998, 0x00011a00);
267 ram_wr32(fuc, 0x13d8f4, 0x00000000);
268 } else {
269 ram_wr32(fuc, 0x10f988, 0x20010000);
270 ram_wr32(fuc, 0x10f98c, 0x00000000);
271 ram_wr32(fuc, 0x10f990, 0x20012001);
272 ram_wr32(fuc, 0x10f998, 0x00010a00);
273 }
274
275 if (from == 0) {
276// 0x00020039 // 0x000000ba
277 }
278
279// 0x0002003a // 0x00000002
280 ram_wr32(fuc, 0x100b0c, 0x00080012);
281// 0x00030014 // 0x00000000 // 0x02b5f070
282// 0x00030014 // 0x00010000 // 0x02b5f070
283 ram_wr32(fuc, 0x611200, 0x00003300);
284// 0x00020034 // 0x0000000a
285// 0x00030020 // 0x00000001 // 0x00000000
286
287 ram_mask(fuc, 0x10f200, 0x00000800, 0x00000000);
288 ram_wr32(fuc, 0x10f210, 0x00000000);
289 ram_nsec(fuc, 1000);
290 if (mode == 0)
291 nvc0_ram_train(fuc, 0x000c1001);
292 ram_wr32(fuc, 0x10f310, 0x00000001);
293 ram_nsec(fuc, 1000);
294 ram_wr32(fuc, 0x10f090, 0x00000061);
295 ram_wr32(fuc, 0x10f090, 0xc000007f);
296 ram_nsec(fuc, 1000);
297
298 if (from == 0) {
299 ram_wr32(fuc, 0x10f824, 0x00007fd4);
300 } else {
301 ram_wr32(fuc, 0x1373ec, 0x00020404);
302 }
303
304 if (mode == 0) {
305 ram_mask(fuc, 0x10f808, 0x00080000, 0x00000000);
306 ram_mask(fuc, 0x10f200, 0x00008000, 0x00008000);
307 ram_wr32(fuc, 0x10f830, 0x41500010);
308 ram_mask(fuc, 0x10f830, 0x01000000, 0x00000000);
309 ram_mask(fuc, 0x132100, 0x00000100, 0x00000100);
310 ram_wr32(fuc, 0x10f050, 0xff000090);
311 ram_wr32(fuc, 0x1373ec, 0x00020f0f);
312 ram_wr32(fuc, 0x1373f0, 0x00000003);
313 ram_wr32(fuc, 0x137310, 0x81201616);
314 ram_wr32(fuc, 0x132100, 0x00000001);
315// 0x00020039 // 0x000000ba
316 ram_wr32(fuc, 0x10f830, 0x00300017);
317 ram_wr32(fuc, 0x1373f0, 0x00000001);
318 ram_wr32(fuc, 0x10f824, 0x00007e77);
319 ram_wr32(fuc, 0x132000, 0x18030001);
320 ram_wr32(fuc, 0x10f090, 0x4000007e);
321 ram_nsec(fuc, 2000);
322 ram_wr32(fuc, 0x10f314, 0x00000001);
323 ram_wr32(fuc, 0x10f210, 0x80000000);
324 ram_wr32(fuc, 0x10f338, 0x00300220);
325 ram_wr32(fuc, 0x10f300, 0x0000011d);
326 ram_nsec(fuc, 1000);
327 ram_wr32(fuc, 0x10f290, 0x02060505);
328 ram_wr32(fuc, 0x10f294, 0x34208288);
329 ram_wr32(fuc, 0x10f298, 0x44050411);
330 ram_wr32(fuc, 0x10f29c, 0x0000114c);
331 ram_wr32(fuc, 0x10f2a0, 0x42e10069);
332 ram_wr32(fuc, 0x10f614, 0x40044f77);
333 ram_wr32(fuc, 0x10f610, 0x40044f77);
334 ram_wr32(fuc, 0x10f344, 0x00600009);
335 ram_nsec(fuc, 1000);
336 ram_wr32(fuc, 0x10f348, 0x00700008);
337 ram_wr32(fuc, 0x61c140, 0x19240000);
338 ram_wr32(fuc, 0x10f830, 0x00300017);
339 nvc0_ram_train(fuc, 0x80021001);
340 nvc0_ram_train(fuc, 0x80081001);
341 ram_wr32(fuc, 0x10f340, 0x00500004);
342 ram_nsec(fuc, 1000);
343 ram_wr32(fuc, 0x10f830, 0x01300017);
344 ram_wr32(fuc, 0x10f830, 0x00300017);
345// 0x00030020 // 0x00000000 // 0x00000000
346// 0x00020034 // 0x0000000b
347 ram_wr32(fuc, 0x100b0c, 0x00080028);
348 ram_wr32(fuc, 0x611200, 0x00003330);
349 } else {
350 ram_wr32(fuc, 0x10f800, 0x00001800);
351 ram_wr32(fuc, 0x13d8f4, 0x00000000);
352 ram_wr32(fuc, 0x1373ec, 0x00020404);
353 ram_wr32(fuc, 0x1373f0, 0x00000003);
354 ram_wr32(fuc, 0x10f830, 0x40700010);
355 ram_wr32(fuc, 0x10f830, 0x40500010);
356 ram_wr32(fuc, 0x13d8f4, 0x00000000);
357 ram_wr32(fuc, 0x1373f8, 0x00000000);
358 ram_wr32(fuc, 0x132100, 0x00000101);
359 ram_wr32(fuc, 0x137310, 0x89201616);
360 ram_wr32(fuc, 0x10f050, 0xff000090);
361 ram_wr32(fuc, 0x1373ec, 0x00030404);
362 ram_wr32(fuc, 0x1373f0, 0x00000002);
363 // 0x00020039 // 0x00000011
364 ram_wr32(fuc, 0x132100, 0x00000001);
365 ram_wr32(fuc, 0x1373f8, 0x00002000);
366 ram_nsec(fuc, 2000);
367 ram_wr32(fuc, 0x10f808, 0x7aaa0050);
368 ram_wr32(fuc, 0x10f830, 0x00500010);
369 ram_wr32(fuc, 0x10f200, 0x00ce1000);
370 ram_wr32(fuc, 0x10f090, 0x4000007e);
371 ram_nsec(fuc, 2000);
372 ram_wr32(fuc, 0x10f314, 0x00000001);
373 ram_wr32(fuc, 0x10f210, 0x80000000);
374 ram_wr32(fuc, 0x10f338, 0x00300200);
375 ram_wr32(fuc, 0x10f300, 0x0000084d);
376 ram_nsec(fuc, 1000);
377 ram_wr32(fuc, 0x10f290, 0x0b343825);
378 ram_wr32(fuc, 0x10f294, 0x3483028e);
379 ram_wr32(fuc, 0x10f298, 0x440c0600);
380 ram_wr32(fuc, 0x10f29c, 0x0000214c);
381 ram_wr32(fuc, 0x10f2a0, 0x42e20069);
382 ram_wr32(fuc, 0x10f200, 0x00ce0000);
383 ram_wr32(fuc, 0x10f614, 0x60044e77);
384 ram_wr32(fuc, 0x10f610, 0x60044e77);
385 ram_wr32(fuc, 0x10f340, 0x00500000);
386 ram_nsec(fuc, 1000);
387 ram_wr32(fuc, 0x10f344, 0x00600228);
388 ram_nsec(fuc, 1000);
389 ram_wr32(fuc, 0x10f348, 0x00700000);
390 ram_wr32(fuc, 0x13d8f4, 0x00000000);
391 ram_wr32(fuc, 0x61c140, 0x09a40000);
392
393 nvc0_ram_train(fuc, 0x800e1008);
394
395 ram_nsec(fuc, 1000);
396 ram_wr32(fuc, 0x10f800, 0x00001804);
397 // 0x00030020 // 0x00000000 // 0x00000000
398 // 0x00020034 // 0x0000000b
399 ram_wr32(fuc, 0x13d8f4, 0x00000000);
400 ram_wr32(fuc, 0x100b0c, 0x00080028);
401 ram_wr32(fuc, 0x611200, 0x00003330);
402 ram_nsec(fuc, 100000);
403 ram_wr32(fuc, 0x10f9b0, 0x05313f41);
404 ram_wr32(fuc, 0x10f9b4, 0x00002f50);
405
406 nvc0_ram_train(fuc, 0x010c1001);
407 }
408
409 ram_mask(fuc, 0x10f200, 0x00000800, 0x00000800);
410// 0x00020016 // 0x00000000
411
412 if (mode == 0)
413 ram_mask(fuc, 0x132000, 0x00000001, 0x00000000);
414 return 0;
415}
416
417static int
418nvc0_ram_prog(struct nouveau_fb *pfb)
419{
420 struct nouveau_device *device = nv_device(pfb);
421 struct nvc0_ram *ram = (void *)pfb->ram;
422 struct nvc0_ramfuc *fuc = &ram->fuc;
423 ram_exec(fuc, nouveau_boolopt(device->cfgopt, "NvMemExec", false));
424 return 0;
425}
426
427static void
428nvc0_ram_tidy(struct nouveau_fb *pfb)
429{
430 struct nvc0_ram *ram = (void *)pfb->ram;
431 struct nvc0_ramfuc *fuc = &ram->fuc;
432 ram_exec(fuc, false);
433}
29 434
30extern const u8 nvc0_pte_storage_type_map[256]; 435extern const u8 nvc0_pte_storage_type_map[256];
31 436
@@ -110,10 +515,9 @@ nvc0_ram_get(struct nouveau_fb *pfb, u64 size, u32 align, u32 ncmin,
110 return 0; 515 return 0;
111} 516}
112 517
113static int 518int
114nvc0_ram_create(struct nouveau_object *parent, struct nouveau_object *engine, 519nvc0_ram_create_(struct nouveau_object *parent, struct nouveau_object *engine,
115 struct nouveau_oclass *oclass, void *data, u32 size, 520 struct nouveau_oclass *oclass, int size, void **pobject)
116 struct nouveau_object **pobject)
117{ 521{
118 struct nouveau_fb *pfb = nouveau_fb(parent); 522 struct nouveau_fb *pfb = nouveau_fb(parent);
119 struct nouveau_bios *bios = nouveau_bios(pfb); 523 struct nouveau_bios *bios = nouveau_bios(pfb);
@@ -127,8 +531,8 @@ nvc0_ram_create(struct nouveau_object *parent, struct nouveau_object *engine,
127 bool uniform = true; 531 bool uniform = true;
128 int ret, part; 532 int ret, part;
129 533
130 ret = nouveau_ram_create(parent, engine, oclass, &ram); 534 ret = nouveau_ram_create_(parent, engine, oclass, size, pobject);
131 *pobject = nv_object(ram); 535 ram = *pobject;
132 if (ret) 536 if (ret)
133 return ret; 537 return ret;
134 538
@@ -182,13 +586,158 @@ nvc0_ram_create(struct nouveau_object *parent, struct nouveau_object *engine,
182 return 0; 586 return 0;
183} 587}
184 588
589static int
590nvc0_ram_init(struct nouveau_object *object)
591{
592 struct nouveau_fb *pfb = (void *)object->parent;
593 struct nvc0_ram *ram = (void *)object;
594 int ret, i;
595
596 ret = nouveau_ram_init(&ram->base);
597 if (ret)
598 return ret;
599
600 /* prepare for ddr link training, and load training patterns */
601 switch (ram->base.type) {
602 case NV_MEM_TYPE_GDDR5: {
603 static const u8 train0[] = {
604 0x00, 0xff, 0x55, 0xaa, 0x33, 0xcc,
605 0x00, 0xff, 0xff, 0x00, 0xff, 0x00,
606 };
607 static const u32 train1[] = {
608 0x00000000, 0xffffffff,
609 0x55555555, 0xaaaaaaaa,
610 0x33333333, 0xcccccccc,
611 0xf0f0f0f0, 0x0f0f0f0f,
612 0x00ff00ff, 0xff00ff00,
613 0x0000ffff, 0xffff0000,
614 };
615
616 for (i = 0; i < 0x30; i++) {
617 nv_wr32(pfb, 0x10f968, 0x00000000 | (i << 8));
618 nv_wr32(pfb, 0x10f96c, 0x00000000 | (i << 8));
619 nv_wr32(pfb, 0x10f920, 0x00000100 | train0[i % 12]);
620 nv_wr32(pfb, 0x10f924, 0x00000100 | train0[i % 12]);
621 nv_wr32(pfb, 0x10f918, train1[i % 12]);
622 nv_wr32(pfb, 0x10f91c, train1[i % 12]);
623 nv_wr32(pfb, 0x10f920, 0x00000000 | train0[i % 12]);
624 nv_wr32(pfb, 0x10f924, 0x00000000 | train0[i % 12]);
625 nv_wr32(pfb, 0x10f918, train1[i % 12]);
626 nv_wr32(pfb, 0x10f91c, train1[i % 12]);
627 }
628 } break;
629 default:
630 break;
631 }
632
633 return 0;
634}
635
636static int
637nvc0_ram_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
638 struct nouveau_oclass *oclass, void *data, u32 size,
639 struct nouveau_object **pobject)
640{
641 struct nouveau_bios *bios = nouveau_bios(parent);
642 struct nvc0_ram *ram;
643 int ret;
644
645 ret = nvc0_ram_create(parent, engine, oclass, &ram);
646 *pobject = nv_object(ram);
647 if (ret)
648 return ret;
649
650 ret = nvbios_pll_parse(bios, 0x0c, &ram->refpll);
651 if (ret) {
652 nv_error(ram, "mclk refpll data not found\n");
653 return ret;
654 }
655
656 ret = nvbios_pll_parse(bios, 0x04, &ram->mempll);
657 if (ret) {
658 nv_error(ram, "mclk pll data not found\n");
659 return ret;
660 }
661
662 switch (ram->base.type) {
663 case NV_MEM_TYPE_GDDR5:
664 ram->base.calc = nvc0_ram_calc;
665 ram->base.prog = nvc0_ram_prog;
666 ram->base.tidy = nvc0_ram_tidy;
667 break;
668 default:
669 nv_warn(ram, "reclocking of this ram type unsupported\n");
670 return 0;
671 }
672
673 ram->fuc.r_0x10fe20 = ramfuc_reg(0x10fe20);
674 ram->fuc.r_0x10fe24 = ramfuc_reg(0x10fe24);
675 ram->fuc.r_0x137320 = ramfuc_reg(0x137320);
676 ram->fuc.r_0x137330 = ramfuc_reg(0x137330);
677
678 ram->fuc.r_0x132000 = ramfuc_reg(0x132000);
679 ram->fuc.r_0x132004 = ramfuc_reg(0x132004);
680 ram->fuc.r_0x132100 = ramfuc_reg(0x132100);
681
682 ram->fuc.r_0x137390 = ramfuc_reg(0x137390);
683
684 ram->fuc.r_0x10f290 = ramfuc_reg(0x10f290);
685 ram->fuc.r_0x10f294 = ramfuc_reg(0x10f294);
686 ram->fuc.r_0x10f298 = ramfuc_reg(0x10f298);
687 ram->fuc.r_0x10f29c = ramfuc_reg(0x10f29c);
688 ram->fuc.r_0x10f2a0 = ramfuc_reg(0x10f2a0);
689
690 ram->fuc.r_0x10f300 = ramfuc_reg(0x10f300);
691 ram->fuc.r_0x10f338 = ramfuc_reg(0x10f338);
692 ram->fuc.r_0x10f340 = ramfuc_reg(0x10f340);
693 ram->fuc.r_0x10f344 = ramfuc_reg(0x10f344);
694 ram->fuc.r_0x10f348 = ramfuc_reg(0x10f348);
695
696 ram->fuc.r_0x10f910 = ramfuc_reg(0x10f910);
697 ram->fuc.r_0x10f914 = ramfuc_reg(0x10f914);
698
699 ram->fuc.r_0x100b0c = ramfuc_reg(0x100b0c);
700 ram->fuc.r_0x10f050 = ramfuc_reg(0x10f050);
701 ram->fuc.r_0x10f090 = ramfuc_reg(0x10f090);
702 ram->fuc.r_0x10f200 = ramfuc_reg(0x10f200);
703 ram->fuc.r_0x10f210 = ramfuc_reg(0x10f210);
704 ram->fuc.r_0x10f310 = ramfuc_reg(0x10f310);
705 ram->fuc.r_0x10f314 = ramfuc_reg(0x10f314);
706 ram->fuc.r_0x10f610 = ramfuc_reg(0x10f610);
707 ram->fuc.r_0x10f614 = ramfuc_reg(0x10f614);
708 ram->fuc.r_0x10f800 = ramfuc_reg(0x10f800);
709 ram->fuc.r_0x10f808 = ramfuc_reg(0x10f808);
710 ram->fuc.r_0x10f824 = ramfuc_reg(0x10f824);
711 ram->fuc.r_0x10f830 = ramfuc_reg(0x10f830);
712 ram->fuc.r_0x10f988 = ramfuc_reg(0x10f988);
713 ram->fuc.r_0x10f98c = ramfuc_reg(0x10f98c);
714 ram->fuc.r_0x10f990 = ramfuc_reg(0x10f990);
715 ram->fuc.r_0x10f998 = ramfuc_reg(0x10f998);
716 ram->fuc.r_0x10f9b0 = ramfuc_reg(0x10f9b0);
717 ram->fuc.r_0x10f9b4 = ramfuc_reg(0x10f9b4);
718 ram->fuc.r_0x10fb04 = ramfuc_reg(0x10fb04);
719 ram->fuc.r_0x10fb08 = ramfuc_reg(0x10fb08);
720 ram->fuc.r_0x137310 = ramfuc_reg(0x137300);
721 ram->fuc.r_0x137310 = ramfuc_reg(0x137310);
722 ram->fuc.r_0x137360 = ramfuc_reg(0x137360);
723 ram->fuc.r_0x1373ec = ramfuc_reg(0x1373ec);
724 ram->fuc.r_0x1373f0 = ramfuc_reg(0x1373f0);
725 ram->fuc.r_0x1373f8 = ramfuc_reg(0x1373f8);
726
727 ram->fuc.r_0x61c140 = ramfuc_reg(0x61c140);
728 ram->fuc.r_0x611200 = ramfuc_reg(0x611200);
729
730 ram->fuc.r_0x13d8f4 = ramfuc_reg(0x13d8f4);
731 return 0;
732}
733
185struct nouveau_oclass 734struct nouveau_oclass
186nvc0_ram_oclass = { 735nvc0_ram_oclass = {
187 .handle = 0, 736 .handle = 0,
188 .ofuncs = &(struct nouveau_ofuncs) { 737 .ofuncs = &(struct nouveau_ofuncs) {
189 .ctor = nvc0_ram_create, 738 .ctor = nvc0_ram_ctor,
190 .dtor = _nouveau_ram_dtor, 739 .dtor = _nouveau_ram_dtor,
191 .init = _nouveau_ram_init, 740 .init = nvc0_ram_init,
192 .fini = _nouveau_ram_fini, 741 .fini = _nouveau_ram_fini,
193 } 742 }
194}; 743};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnve0.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnve0.c
new file mode 100644
index 000000000000..bc86cfd084f6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnve0.c
@@ -0,0 +1,1264 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/gpio.h>
26
27#include <subdev/bios.h>
28#include <subdev/bios/bit.h>
29#include <subdev/bios/pll.h>
30#include <subdev/bios/init.h>
31#include <subdev/bios/rammap.h>
32#include <subdev/bios/timing.h>
33
34#include <subdev/clock.h>
35#include <subdev/clock/pll.h>
36
37#include <subdev/timer.h>
38
39#include <core/option.h>
40
41#include "nvc0.h"
42
43#include "ramfuc.h"
44
45struct nve0_ramfuc {
46 struct ramfuc base;
47
48 struct nvbios_pll refpll;
49 struct nvbios_pll mempll;
50
51 struct ramfuc_reg r_gpioMV;
52 u32 r_funcMV[2];
53 struct ramfuc_reg r_gpio2E;
54 u32 r_func2E[2];
55 struct ramfuc_reg r_gpiotrig;
56
57 struct ramfuc_reg r_0x132020;
58 struct ramfuc_reg r_0x132028;
59 struct ramfuc_reg r_0x132024;
60 struct ramfuc_reg r_0x132030;
61 struct ramfuc_reg r_0x132034;
62 struct ramfuc_reg r_0x132000;
63 struct ramfuc_reg r_0x132004;
64 struct ramfuc_reg r_0x132040;
65
66 struct ramfuc_reg r_0x10f248;
67 struct ramfuc_reg r_0x10f290;
68 struct ramfuc_reg r_0x10f294;
69 struct ramfuc_reg r_0x10f298;
70 struct ramfuc_reg r_0x10f29c;
71 struct ramfuc_reg r_0x10f2a0;
72 struct ramfuc_reg r_0x10f2a4;
73 struct ramfuc_reg r_0x10f2a8;
74 struct ramfuc_reg r_0x10f2ac;
75 struct ramfuc_reg r_0x10f2cc;
76 struct ramfuc_reg r_0x10f2e8;
77 struct ramfuc_reg r_0x10f250;
78 struct ramfuc_reg r_0x10f24c;
79 struct ramfuc_reg r_0x10fec4;
80 struct ramfuc_reg r_0x10fec8;
81 struct ramfuc_reg r_0x10f604;
82 struct ramfuc_reg r_0x10f614;
83 struct ramfuc_reg r_0x10f610;
84 struct ramfuc_reg r_0x100770;
85 struct ramfuc_reg r_0x100778;
86 struct ramfuc_reg r_0x10f224;
87
88 struct ramfuc_reg r_0x10f870;
89 struct ramfuc_reg r_0x10f698;
90 struct ramfuc_reg r_0x10f694;
91 struct ramfuc_reg r_0x10f6b8;
92 struct ramfuc_reg r_0x10f808;
93 struct ramfuc_reg r_0x10f670;
94 struct ramfuc_reg r_0x10f60c;
95 struct ramfuc_reg r_0x10f830;
96 struct ramfuc_reg r_0x1373ec;
97 struct ramfuc_reg r_0x10f800;
98 struct ramfuc_reg r_0x10f82c;
99
100 struct ramfuc_reg r_0x10f978;
101 struct ramfuc_reg r_0x10f910;
102 struct ramfuc_reg r_0x10f914;
103
104 struct ramfuc_reg r_mr[16]; /* MR0 - MR8, MR15 */
105
106 struct ramfuc_reg r_0x62c000;
107 struct ramfuc_reg r_0x10f200;
108 struct ramfuc_reg r_0x10f210;
109 struct ramfuc_reg r_0x10f310;
110 struct ramfuc_reg r_0x10f314;
111 struct ramfuc_reg r_0x10f318;
112 struct ramfuc_reg r_0x10f090;
113 struct ramfuc_reg r_0x10f69c;
114 struct ramfuc_reg r_0x10f824;
115 struct ramfuc_reg r_0x1373f0;
116 struct ramfuc_reg r_0x1373f4;
117 struct ramfuc_reg r_0x137320;
118 struct ramfuc_reg r_0x10f65c;
119 struct ramfuc_reg r_0x10f6bc;
120 struct ramfuc_reg r_0x100710;
121 struct ramfuc_reg r_0x10f750;
122};
123
124struct nve0_ram {
125 struct nouveau_ram base;
126 struct nve0_ramfuc fuc;
127 int from;
128 int mode;
129 int N1, fN1, M1, P1;
130 int N2, M2, P2;
131};
132
133/*******************************************************************************
134 * GDDR5
135 ******************************************************************************/
136static void
137train(struct nve0_ramfuc *fuc, u32 magic)
138{
139 struct nve0_ram *ram = container_of(fuc, typeof(*ram), fuc);
140 struct nouveau_fb *pfb = nouveau_fb(ram);
141 const int mc = nv_rd32(pfb, 0x02243c);
142 int i;
143
144 ram_mask(fuc, 0x10f910, 0xbc0e0000, magic);
145 ram_mask(fuc, 0x10f914, 0xbc0e0000, magic);
146 for (i = 0; i < mc; i++) {
147 const u32 addr = 0x110974 + (i * 0x1000);
148 ram_wait(fuc, addr, 0x0000000f, 0x00000000, 500000);
149 }
150}
151
152static void
153r1373f4_init(struct nve0_ramfuc *fuc)
154{
155 struct nve0_ram *ram = container_of(fuc, typeof(*ram), fuc);
156 const u32 mcoef = ((--ram->P2 << 28) | (ram->N2 << 8) | ram->M2);
157 const u32 rcoef = (( ram->P1 << 16) | (ram->N1 << 8) | ram->M1);
158 const u32 runk0 = ram->fN1 << 16;
159 const u32 runk1 = ram->fN1;
160
161 if (ram->from == 2) {
162 ram_mask(fuc, 0x1373f4, 0x00000000, 0x00001100);
163 ram_mask(fuc, 0x1373f4, 0x00000000, 0x00000010);
164 } else {
165 ram_mask(fuc, 0x1373f4, 0x00000000, 0x00010010);
166 }
167
168 ram_mask(fuc, 0x1373f4, 0x00000003, 0x00000000);
169 ram_mask(fuc, 0x1373f4, 0x00000010, 0x00000000);
170
171 /* (re)program refpll, if required */
172 if ((ram_rd32(fuc, 0x132024) & 0xffffffff) != rcoef ||
173 (ram_rd32(fuc, 0x132034) & 0x0000ffff) != runk1) {
174 ram_mask(fuc, 0x132000, 0x00000001, 0x00000000);
175 ram_mask(fuc, 0x132020, 0x00000001, 0x00000000);
176 ram_wr32(fuc, 0x137320, 0x00000000);
177 ram_mask(fuc, 0x132030, 0xffff0000, runk0);
178 ram_mask(fuc, 0x132034, 0x0000ffff, runk1);
179 ram_wr32(fuc, 0x132024, rcoef);
180 ram_mask(fuc, 0x132028, 0x00080000, 0x00080000);
181 ram_mask(fuc, 0x132020, 0x00000001, 0x00000001);
182 ram_wait(fuc, 0x137390, 0x00020000, 0x00020000, 64000);
183 ram_mask(fuc, 0x132028, 0x00080000, 0x00000000);
184 }
185
186 /* (re)program mempll, if required */
187 if (ram->mode == 2) {
188 ram_mask(fuc, 0x1373f4, 0x00010000, 0x00000000);
189 ram_mask(fuc, 0x132000, 0x00000001, 0x00000000);
190 ram_mask(fuc, 0x132004, 0x103fffff, mcoef);
191 ram_mask(fuc, 0x132000, 0x00000001, 0x00000001);
192 ram_wait(fuc, 0x137390, 0x00000002, 0x00000002, 64000);
193 ram_mask(fuc, 0x1373f4, 0x00000000, 0x00001100);
194 } else {
195 ram_mask(fuc, 0x1373f4, 0x00000000, 0x00010100);
196 }
197
198 ram_mask(fuc, 0x1373f4, 0x00000000, 0x00000010);
199}
200
201static void
202r1373f4_fini(struct nve0_ramfuc *fuc, u32 ramcfg)
203{
204 struct nve0_ram *ram = container_of(fuc, typeof(*ram), fuc);
205 struct nouveau_bios *bios = nouveau_bios(ram);
206 u8 v0 = (nv_ro08(bios, ramcfg + 0x03) & 0xc0) >> 6;
207 u8 v1 = (nv_ro08(bios, ramcfg + 0x03) & 0x30) >> 4;
208 u32 tmp;
209
210 tmp = ram_rd32(fuc, 0x1373ec) & ~0x00030000;
211 ram_wr32(fuc, 0x1373ec, tmp | (v1 << 16));
212 ram_mask(fuc, 0x1373f0, (~ram->mode & 3), 0x00000000);
213 if (ram->mode == 2) {
214 ram_mask(fuc, 0x1373f4, 0x00000003, 0x000000002);
215 ram_mask(fuc, 0x1373f4, 0x00001100, 0x000000000);
216 } else {
217 ram_mask(fuc, 0x1373f4, 0x00000003, 0x000000001);
218 ram_mask(fuc, 0x1373f4, 0x00010000, 0x000000000);
219 }
220 ram_mask(fuc, 0x10f800, 0x00000030, (v0 ^ v1) << 4);
221}
222
223static int
224nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq)
225{
226 struct nouveau_bios *bios = nouveau_bios(pfb);
227 struct nve0_ram *ram = (void *)pfb->ram;
228 struct nve0_ramfuc *fuc = &ram->fuc;
229 const u32 rammap = ram->base.rammap.data;
230 const u32 ramcfg = ram->base.ramcfg.data;
231 const u32 timing = ram->base.timing.data;
232 int vc = !(nv_ro08(bios, ramcfg + 0x02) & 0x08);
233 int mv = 1; /*XXX*/
234 u32 mask, data;
235
236 ram_mask(fuc, 0x10f808, 0x40000000, 0x40000000);
237 ram_wr32(fuc, 0x62c000, 0x0f0f0000);
238
239 /* MR1: turn termination on early, for some reason.. */
240 if ((ram->base.mr[1] & 0x03c) != 0x030)
241 ram_mask(fuc, mr[1], 0x03c, ram->base.mr[1] & 0x03c);
242
243 if (vc == 1 && ram_have(fuc, gpio2E)) {
244 u32 temp = ram_mask(fuc, gpio2E, 0x3000, fuc->r_func2E[1]);
245 if (temp != ram_rd32(fuc, gpio2E)) {
246 ram_wr32(fuc, gpiotrig, 1);
247 ram_nsec(fuc, 20000);
248 }
249 }
250
251 ram_mask(fuc, 0x10f200, 0x00000800, 0x00000000);
252
253 ram_mask(fuc, 0x10f914, 0x01020000, 0x000c0000);
254 ram_mask(fuc, 0x10f910, 0x01020000, 0x000c0000);
255
256 ram_wr32(fuc, 0x10f210, 0x00000000); /* REFRESH_AUTO = 0 */
257 ram_nsec(fuc, 1000);
258 ram_wr32(fuc, 0x10f310, 0x00000001); /* REFRESH */
259 ram_nsec(fuc, 1000);
260
261 ram_mask(fuc, 0x10f200, 0x80000000, 0x80000000);
262 ram_wr32(fuc, 0x10f314, 0x00000001); /* PRECHARGE */
263 ram_mask(fuc, 0x10f200, 0x80000000, 0x00000000);
264 ram_wr32(fuc, 0x10f090, 0x00000061);
265 ram_wr32(fuc, 0x10f090, 0xc000007f);
266 ram_nsec(fuc, 1000);
267
268 ram_wr32(fuc, 0x10f698, 0x00000000);
269 ram_wr32(fuc, 0x10f69c, 0x00000000);
270
271 /*XXX: there does appear to be some kind of condition here, simply
272 * modifying these bits in the vbios from the default pl0
273 * entries shows no change. however, the data does appear to
274 * be correct and may be required for the transition back
275 */
276 mask = 0x800f07e0;
277 data = 0x00030000;
278 if (ram_rd32(fuc, 0x10f978) & 0x00800000)
279 data |= 0x00040000;
280
281 if (1) {
282 data |= 0x800807e0;
283 switch (nv_ro08(bios, ramcfg + 0x03) & 0xc0) {
284 case 0xc0: data &= ~0x00000040; break;
285 case 0x80: data &= ~0x00000100; break;
286 case 0x40: data &= ~0x80000000; break;
287 case 0x00: data &= ~0x00000400; break;
288 }
289
290 switch (nv_ro08(bios, ramcfg + 0x03) & 0x30) {
291 case 0x30: data &= ~0x00000020; break;
292 case 0x20: data &= ~0x00000080; break;
293 case 0x10: data &= ~0x00080000; break;
294 case 0x00: data &= ~0x00000200; break;
295 }
296 }
297
298 if (nv_ro08(bios, ramcfg + 0x02) & 0x80)
299 mask |= 0x03000000;
300 if (nv_ro08(bios, ramcfg + 0x02) & 0x40)
301 mask |= 0x00002000;
302 if (nv_ro08(bios, ramcfg + 0x07) & 0x10)
303 mask |= 0x00004000;
304 if (nv_ro08(bios, ramcfg + 0x07) & 0x08)
305 mask |= 0x00000003;
306 else {
307 mask |= 0x34000000;
308 if (ram_rd32(fuc, 0x10f978) & 0x00800000)
309 mask |= 0x40000000;
310 }
311 ram_mask(fuc, 0x10f824, mask, data);
312
313 ram_mask(fuc, 0x132040, 0x00010000, 0x00000000);
314
315 if (ram->from == 2 && ram->mode != 2) {
316 ram_mask(fuc, 0x10f808, 0x00080000, 0x00000000);
317 ram_mask(fuc, 0x10f200, 0x00008000, 0x00008000);
318 ram_mask(fuc, 0x10f800, 0x00000000, 0x00000004);
319 ram_mask(fuc, 0x10f830, 0x00008000, 0x01040010);
320 ram_mask(fuc, 0x10f830, 0x01000000, 0x00000000);
321 r1373f4_init(fuc);
322 ram_mask(fuc, 0x1373f0, 0x00000002, 0x00000001);
323 r1373f4_fini(fuc, ramcfg);
324 ram_mask(fuc, 0x10f830, 0x00c00000, 0x00240001);
325 } else
326 if (ram->from != 2 && ram->mode != 2) {
327 r1373f4_init(fuc);
328 r1373f4_fini(fuc, ramcfg);
329 }
330
331 if (ram_have(fuc, gpioMV)) {
332 u32 temp = ram_mask(fuc, gpioMV, 0x3000, fuc->r_funcMV[mv]);
333 if (temp != ram_rd32(fuc, gpioMV)) {
334 ram_wr32(fuc, gpiotrig, 1);
335 ram_nsec(fuc, 64000);
336 }
337 }
338
339 if ( (nv_ro08(bios, ramcfg + 0x02) & 0x40) ||
340 (nv_ro08(bios, ramcfg + 0x07) & 0x10)) {
341 ram_mask(fuc, 0x132040, 0x00010000, 0x00010000);
342 ram_nsec(fuc, 20000);
343 }
344
345 if (ram->from != 2 && ram->mode == 2) {
346 ram_mask(fuc, 0x10f800, 0x00000004, 0x00000000);
347 ram_mask(fuc, 0x1373f0, 0x00000000, 0x00000002);
348 ram_mask(fuc, 0x10f830, 0x00800001, 0x00408010);
349 r1373f4_init(fuc);
350 r1373f4_fini(fuc, ramcfg);
351 ram_mask(fuc, 0x10f808, 0x00000000, 0x00080000);
352 ram_mask(fuc, 0x10f200, 0x00808000, 0x00800000);
353 } else
354 if (ram->from == 2 && ram->mode == 2) {
355 ram_mask(fuc, 0x10f800, 0x00000004, 0x00000000);
356 r1373f4_init(fuc);
357 r1373f4_fini(fuc, ramcfg);
358 }
359
360 if (ram->mode != 2) /*XXX*/ {
361 if (nv_ro08(bios, ramcfg + 0x07) & 0x40)
362 ram_mask(fuc, 0x10f670, 0x80000000, 0x80000000);
363 }
364
365 data = (nv_ro08(bios, rammap + 0x11) & 0x0c) >> 2;
366 ram_wr32(fuc, 0x10f65c, 0x00000011 * data);
367 ram_wr32(fuc, 0x10f6b8, 0x01010101 * nv_ro08(bios, ramcfg + 0x09));
368 ram_wr32(fuc, 0x10f6bc, 0x01010101 * nv_ro08(bios, ramcfg + 0x09));
369
370 data = nv_ro08(bios, ramcfg + 0x04);
371 if (!(nv_ro08(bios, ramcfg + 0x07) & 0x08)) {
372 ram_wr32(fuc, 0x10f698, 0x01010101 * data);
373 ram_wr32(fuc, 0x10f69c, 0x01010101 * data);
374 }
375
376 if (ram->mode != 2) {
377 u32 temp = ram_rd32(fuc, 0x10f694) & ~0xff00ff00;
378 ram_wr32(fuc, 0x10f694, temp | (0x01000100 * data));
379 }
380
381 if (ram->mode == 2 && (nv_ro08(bios, ramcfg + 0x08) & 0x10))
382 data = 0x00000080;
383 else
384 data = 0x00000000;
385 ram_mask(fuc, 0x10f60c, 0x00000080, data);
386
387 mask = 0x00070000;
388 data = 0x00000000;
389 if (!(nv_ro08(bios, ramcfg + 0x02) & 0x80))
390 data |= 0x03000000;
391 if (!(nv_ro08(bios, ramcfg + 0x02) & 0x40))
392 data |= 0x00002000;
393 if (!(nv_ro08(bios, ramcfg + 0x07) & 0x10))
394 data |= 0x00004000;
395 if (!(nv_ro08(bios, ramcfg + 0x07) & 0x08))
396 data |= 0x00000003;
397 else
398 data |= 0x74000000;
399 ram_mask(fuc, 0x10f824, mask, data);
400
401 if (nv_ro08(bios, ramcfg + 0x01) & 0x08)
402 data = 0x00000000;
403 else
404 data = 0x00001000;
405 ram_mask(fuc, 0x10f200, 0x00001000, data);
406
407 if (ram_rd32(fuc, 0x10f670) & 0x80000000) {
408 ram_nsec(fuc, 10000);
409 ram_mask(fuc, 0x10f670, 0x80000000, 0x00000000);
410 }
411
412 if (nv_ro08(bios, ramcfg + 0x08) & 0x01)
413 data = 0x00100000;
414 else
415 data = 0x00000000;
416 ram_mask(fuc, 0x10f82c, 0x00100000, data);
417
418 data = 0x00000000;
419 if (nv_ro08(bios, ramcfg + 0x08) & 0x08)
420 data |= 0x00002000;
421 if (nv_ro08(bios, ramcfg + 0x08) & 0x04)
422 data |= 0x00001000;
423 if (nv_ro08(bios, ramcfg + 0x08) & 0x02)
424 data |= 0x00004000;
425 ram_mask(fuc, 0x10f830, 0x00007000, data);
426
427 /* PFB timing */
428 ram_mask(fuc, 0x10f248, 0xffffffff, nv_ro32(bios, timing + 0x28));
429 ram_mask(fuc, 0x10f290, 0xffffffff, nv_ro32(bios, timing + 0x00));
430 ram_mask(fuc, 0x10f294, 0xffffffff, nv_ro32(bios, timing + 0x04));
431 ram_mask(fuc, 0x10f298, 0xffffffff, nv_ro32(bios, timing + 0x08));
432 ram_mask(fuc, 0x10f29c, 0xffffffff, nv_ro32(bios, timing + 0x0c));
433 ram_mask(fuc, 0x10f2a0, 0xffffffff, nv_ro32(bios, timing + 0x10));
434 ram_mask(fuc, 0x10f2a4, 0xffffffff, nv_ro32(bios, timing + 0x14));
435 ram_mask(fuc, 0x10f2a8, 0xffffffff, nv_ro32(bios, timing + 0x18));
436 ram_mask(fuc, 0x10f2ac, 0xffffffff, nv_ro32(bios, timing + 0x1c));
437 ram_mask(fuc, 0x10f2cc, 0xffffffff, nv_ro32(bios, timing + 0x20));
438 ram_mask(fuc, 0x10f2e8, 0xffffffff, nv_ro32(bios, timing + 0x24));
439
440 data = (nv_ro08(bios, ramcfg + 0x02) & 0x03) << 8;
441 if (nv_ro08(bios, ramcfg + 0x01) & 0x10)
442 data |= 0x70000000;
443 ram_mask(fuc, 0x10f604, 0x70000300, data);
444
445 data = (nv_ro08(bios, timing + 0x30) & 0x07) << 28;
446 if (nv_ro08(bios, ramcfg + 0x01) & 0x01)
447 data |= 0x00000100;
448 ram_mask(fuc, 0x10f614, 0x70000000, data);
449
450 data = (nv_ro08(bios, timing + 0x30) & 0x07) << 28;
451 if (nv_ro08(bios, ramcfg + 0x01) & 0x02)
452 data |= 0x00000100;
453 ram_mask(fuc, 0x10f610, 0x70000000, data);
454
455 mask = 0x33f00000;
456 data = 0x00000000;
457 if (!(nv_ro08(bios, ramcfg + 0x01) & 0x04))
458 data |= 0x20200000;
459 if (!(nv_ro08(bios, ramcfg + 0x07) & 0x80))
460 data |= 0x12800000;
461 /*XXX: see note above about there probably being some condition
462 * for the 10f824 stuff that uses ramcfg 3...
463 */
464 if ( (nv_ro08(bios, ramcfg + 0x03) & 0xf0)) {
465 if (nv_ro08(bios, rammap + 0x08) & 0x0c) {
466 if (!(nv_ro08(bios, ramcfg + 0x07) & 0x80))
467 mask |= 0x00000020;
468 else
469 data |= 0x00000020;
470 mask |= 0x00000004;
471 }
472 } else {
473 mask |= 0x40000020;
474 data |= 0x00000004;
475 }
476
477 ram_mask(fuc, 0x10f808, mask, data);
478
479 data = nv_ro08(bios, ramcfg + 0x03) & 0x0f;
480 ram_wr32(fuc, 0x10f870, 0x11111111 * data);
481
482 data = nv_ro08(bios, ramcfg + 0x02) & 0x03;
483 if (nv_ro08(bios, ramcfg + 0x01) & 0x10)
484 data |= 0x00000004;
485 if ((nv_rd32(bios, 0x100770) & 0x00000004) != (data & 0x00000004)) {
486 ram_wr32(fuc, 0x10f750, 0x04000009);
487 ram_wr32(fuc, 0x100710, 0x00000000);
488 ram_wait(fuc, 0x100710, 0x80000000, 0x80000000, 200000);
489 }
490 ram_mask(fuc, 0x100770, 0x00000007, data);
491
492 data = (nv_ro08(bios, timing + 0x30) & 0x07) << 8;
493 if (nv_ro08(bios, ramcfg + 0x01) & 0x01)
494 data |= 0x80000000;
495 ram_mask(fuc, 0x100778, 0x00000700, data);
496
497 data = nv_ro16(bios, timing + 0x2c);
498 ram_mask(fuc, 0x10f250, 0x000003f0, (data & 0x003f) << 4);
499 ram_mask(fuc, 0x10f24c, 0x7f000000, (data & 0x1fc0) << 18);
500
501 data = nv_ro08(bios, timing + 0x30);
502 ram_mask(fuc, 0x10f224, 0x001f0000, (data & 0xf8) << 13);
503
504 data = nv_ro16(bios, timing + 0x31);
505 ram_mask(fuc, 0x10fec4, 0x041e0f07, (data & 0x0800) << 15 |
506 (data & 0x0780) << 10 |
507 (data & 0x0078) << 5 |
508 (data & 0x0007));
509 ram_mask(fuc, 0x10fec8, 0x00000027, (data & 0x8000) >> 10 |
510 (data & 0x7000) >> 12);
511
512 ram_wr32(fuc, 0x10f090, 0x4000007e);
513 ram_nsec(fuc, 1000);
514 ram_wr32(fuc, 0x10f314, 0x00000001); /* PRECHARGE */
515 ram_wr32(fuc, 0x10f310, 0x00000001); /* REFRESH */
516 ram_nsec(fuc, 2000);
517 ram_wr32(fuc, 0x10f210, 0x80000000); /* REFRESH_AUTO = 1 */
518
519 if ((nv_ro08(bios, ramcfg + 0x08) & 0x10) && (ram->mode == 2) /*XXX*/) {
520 u32 temp = ram_mask(fuc, 0x10f294, 0xff000000, 0x24000000);
521 train(fuc, 0xa4010000); /*XXX*/
522 ram_nsec(fuc, 1000);
523 ram_wr32(fuc, 0x10f294, temp);
524 }
525
526 ram_mask(fuc, mr[3], 0xfff, ram->base.mr[3]);
527 ram_wr32(fuc, mr[0], ram->base.mr[0]);
528 ram_mask(fuc, mr[8], 0xfff, ram->base.mr[8]);
529 ram_nsec(fuc, 1000);
530 ram_mask(fuc, mr[1], 0xfff, ram->base.mr[1]);
531 ram_mask(fuc, mr[5], 0xfff, ram->base.mr[5]);
532 ram_mask(fuc, mr[6], 0xfff, ram->base.mr[6]);
533 ram_mask(fuc, mr[7], 0xfff, ram->base.mr[7]);
534
535 if (vc == 0 && ram_have(fuc, gpio2E)) {
536 u32 temp = ram_mask(fuc, gpio2E, 0x3000, fuc->r_func2E[0]);
537 if (temp != ram_rd32(fuc, gpio2E)) {
538 ram_wr32(fuc, gpiotrig, 1);
539 ram_nsec(fuc, 20000);
540 }
541 }
542
543 ram_mask(fuc, 0x10f200, 0x80000000, 0x80000000);
544 ram_wr32(fuc, 0x10f318, 0x00000001); /* NOP? */
545 ram_mask(fuc, 0x10f200, 0x80000000, 0x00000000);
546 ram_nsec(fuc, 1000);
547
548 data = ram_rd32(fuc, 0x10f978);
549 data &= ~0x00046144;
550 data |= 0x0000000b;
551 if (!(nv_ro08(bios, ramcfg + 0x07) & 0x08)) {
552 if (!(nv_ro08(bios, ramcfg + 0x07) & 0x04))
553 data |= 0x0000200c;
554 else
555 data |= 0x00000000;
556 } else {
557 data |= 0x00040044;
558 }
559 ram_wr32(fuc, 0x10f978, data);
560
561 if (ram->mode == 1) {
562 data = ram_rd32(fuc, 0x10f830) | 0x00000001;
563 ram_wr32(fuc, 0x10f830, data);
564 }
565
566 if (!(nv_ro08(bios, ramcfg + 0x07) & 0x08)) {
567 data = 0x88020000;
568 if ( (nv_ro08(bios, ramcfg + 0x07) & 0x04))
569 data |= 0x10000000;
570 if (!(nv_ro08(bios, rammap + 0x08) & 0x10))
571 data |= 0x00080000;
572 } else {
573 data = 0xa40e0000;
574 }
575 train(fuc, data);
576 ram_nsec(fuc, 1000);
577
578 if (ram->mode == 2) { /*XXX*/
579 ram_mask(fuc, 0x10f800, 0x00000004, 0x00000004);
580 }
581
582 /* MR5: (re)enable LP3 if necessary
583 * XXX: need to find the switch, keeping off for now
584 */
585 ram_mask(fuc, mr[5], 0x00000004, 0x00000000);
586
587 if (ram->mode != 2) {
588 ram_mask(fuc, 0x10f830, 0x01000000, 0x01000000);
589 ram_mask(fuc, 0x10f830, 0x01000000, 0x00000000);
590 }
591
592 if (nv_ro08(bios, ramcfg + 0x07) & 0x02) {
593 ram_mask(fuc, 0x10f910, 0x80020000, 0x01000000);
594 ram_mask(fuc, 0x10f914, 0x80020000, 0x01000000);
595 }
596
597 ram_wr32(fuc, 0x62c000, 0x0f0f0f00);
598
599 if (nv_ro08(bios, rammap + 0x08) & 0x01)
600 data = 0x00000800;
601 else
602 data = 0x00000000;
603 ram_mask(fuc, 0x10f200, 0x00000800, data);
604 return 0;
605}
606
607/*******************************************************************************
608 * DDR3
609 ******************************************************************************/
610
611static int
612nve0_ram_calc_sddr3(struct nouveau_fb *pfb, u32 freq)
613{
614 struct nouveau_bios *bios = nouveau_bios(pfb);
615 struct nve0_ram *ram = (void *)pfb->ram;
616 struct nve0_ramfuc *fuc = &ram->fuc;
617 const u32 rcoef = (( ram->P1 << 16) | (ram->N1 << 8) | ram->M1);
618 const u32 runk0 = ram->fN1 << 16;
619 const u32 runk1 = ram->fN1;
620 const u32 rammap = ram->base.rammap.data;
621 const u32 ramcfg = ram->base.ramcfg.data;
622 const u32 timing = ram->base.timing.data;
623 int vc = !(nv_ro08(bios, ramcfg + 0x02) & 0x08);
624 int mv = 1; /*XXX*/
625 u32 mask, data;
626
627 ram_mask(fuc, 0x10f808, 0x40000000, 0x40000000);
628 ram_wr32(fuc, 0x62c000, 0x0f0f0000);
629
630 if (vc == 1 && ram_have(fuc, gpio2E)) {
631 u32 temp = ram_mask(fuc, gpio2E, 0x3000, fuc->r_func2E[1]);
632 if (temp != ram_rd32(fuc, gpio2E)) {
633 ram_wr32(fuc, gpiotrig, 1);
634 ram_nsec(fuc, 20000);
635 }
636 }
637
638 ram_mask(fuc, 0x10f200, 0x00000800, 0x00000000);
639 if ((nv_ro08(bios, ramcfg + 0x03) & 0xf0))
640 ram_mask(fuc, 0x10f808, 0x04000000, 0x04000000);
641
642 ram_wr32(fuc, 0x10f314, 0x00000001); /* PRECHARGE */
643 ram_wr32(fuc, 0x10f210, 0x00000000); /* REFRESH_AUTO = 0 */
644 ram_wr32(fuc, 0x10f310, 0x00000001); /* REFRESH */
645 ram_mask(fuc, 0x10f200, 0x80000000, 0x80000000);
646 ram_wr32(fuc, 0x10f310, 0x00000001); /* REFRESH */
647 ram_mask(fuc, 0x10f200, 0x80000000, 0x00000000);
648 ram_nsec(fuc, 1000);
649
650 ram_wr32(fuc, 0x10f090, 0x00000060);
651 ram_wr32(fuc, 0x10f090, 0xc000007e);
652
653 /*XXX: there does appear to be some kind of condition here, simply
654 * modifying these bits in the vbios from the default pl0
655 * entries shows no change. however, the data does appear to
656 * be correct and may be required for the transition back
657 */
658 mask = 0x00010000;
659 data = 0x00010000;
660
661 if (1) {
662 mask |= 0x800807e0;
663 data |= 0x800807e0;
664 switch (nv_ro08(bios, ramcfg + 0x03) & 0xc0) {
665 case 0xc0: data &= ~0x00000040; break;
666 case 0x80: data &= ~0x00000100; break;
667 case 0x40: data &= ~0x80000000; break;
668 case 0x00: data &= ~0x00000400; break;
669 }
670
671 switch (nv_ro08(bios, ramcfg + 0x03) & 0x30) {
672 case 0x30: data &= ~0x00000020; break;
673 case 0x20: data &= ~0x00000080; break;
674 case 0x10: data &= ~0x00080000; break;
675 case 0x00: data &= ~0x00000200; break;
676 }
677 }
678
679 if (nv_ro08(bios, ramcfg + 0x02) & 0x80)
680 mask |= 0x03000000;
681 if (nv_ro08(bios, ramcfg + 0x02) & 0x40)
682 mask |= 0x00002000;
683 if (nv_ro08(bios, ramcfg + 0x07) & 0x10)
684 mask |= 0x00004000;
685 if (nv_ro08(bios, ramcfg + 0x07) & 0x08)
686 mask |= 0x00000003;
687 else
688 mask |= 0x14000000;
689 ram_mask(fuc, 0x10f824, mask, data);
690
691 ram_mask(fuc, 0x132040, 0x00010000, 0x00000000);
692
693 ram_mask(fuc, 0x1373f4, 0x00000000, 0x00010010);
694 data = ram_rd32(fuc, 0x1373ec) & ~0x00030000;
695 data |= (nv_ro08(bios, ramcfg + 0x03) & 0x30) << 12;
696 ram_wr32(fuc, 0x1373ec, data);
697 ram_mask(fuc, 0x1373f4, 0x00000003, 0x00000000);
698 ram_mask(fuc, 0x1373f4, 0x00000010, 0x00000000);
699
700 /* (re)program refpll, if required */
701 if ((ram_rd32(fuc, 0x132024) & 0xffffffff) != rcoef ||
702 (ram_rd32(fuc, 0x132034) & 0x0000ffff) != runk1) {
703 ram_mask(fuc, 0x132000, 0x00000001, 0x00000000);
704 ram_mask(fuc, 0x132020, 0x00000001, 0x00000000);
705 ram_wr32(fuc, 0x137320, 0x00000000);
706 ram_mask(fuc, 0x132030, 0xffff0000, runk0);
707 ram_mask(fuc, 0x132034, 0x0000ffff, runk1);
708 ram_wr32(fuc, 0x132024, rcoef);
709 ram_mask(fuc, 0x132028, 0x00080000, 0x00080000);
710 ram_mask(fuc, 0x132020, 0x00000001, 0x00000001);
711 ram_wait(fuc, 0x137390, 0x00020000, 0x00020000, 64000);
712 ram_mask(fuc, 0x132028, 0x00080000, 0x00000000);
713 }
714
715 ram_mask(fuc, 0x1373f4, 0x00000010, 0x00000010);
716 ram_mask(fuc, 0x1373f4, 0x00000003, 0x00000001);
717 ram_mask(fuc, 0x1373f4, 0x00010000, 0x00000000);
718
719 if (ram_have(fuc, gpioMV)) {
720 u32 temp = ram_mask(fuc, gpioMV, 0x3000, fuc->r_funcMV[mv]);
721 if (temp != ram_rd32(fuc, gpioMV)) {
722 ram_wr32(fuc, gpiotrig, 1);
723 ram_nsec(fuc, 64000);
724 }
725 }
726
727 if ( (nv_ro08(bios, ramcfg + 0x02) & 0x40) ||
728 (nv_ro08(bios, ramcfg + 0x07) & 0x10)) {
729 ram_mask(fuc, 0x132040, 0x00010000, 0x00010000);
730 ram_nsec(fuc, 20000);
731 }
732
733 if (ram->mode != 2) /*XXX*/ {
734 if (nv_ro08(bios, ramcfg + 0x07) & 0x40)
735 ram_mask(fuc, 0x10f670, 0x80000000, 0x80000000);
736 }
737
738 data = (nv_ro08(bios, rammap + 0x11) & 0x0c) >> 2;
739 ram_wr32(fuc, 0x10f65c, 0x00000011 * data);
740 ram_wr32(fuc, 0x10f6b8, 0x01010101 * nv_ro08(bios, ramcfg + 0x09));
741 ram_wr32(fuc, 0x10f6bc, 0x01010101 * nv_ro08(bios, ramcfg + 0x09));
742
743 mask = 0x00010000;
744 data = 0x00000000;
745 if (!(nv_ro08(bios, ramcfg + 0x02) & 0x80))
746 data |= 0x03000000;
747 if (!(nv_ro08(bios, ramcfg + 0x02) & 0x40))
748 data |= 0x00002000;
749 if (!(nv_ro08(bios, ramcfg + 0x07) & 0x10))
750 data |= 0x00004000;
751 if (!(nv_ro08(bios, ramcfg + 0x07) & 0x08))
752 data |= 0x00000003;
753 else
754 data |= 0x14000000;
755 ram_mask(fuc, 0x10f824, mask, data);
756 ram_nsec(fuc, 1000);
757
758 if (nv_ro08(bios, ramcfg + 0x08) & 0x01)
759 data = 0x00100000;
760 else
761 data = 0x00000000;
762 ram_mask(fuc, 0x10f82c, 0x00100000, data);
763
764 /* PFB timing */
765 ram_mask(fuc, 0x10f248, 0xffffffff, nv_ro32(bios, timing + 0x28));
766 ram_mask(fuc, 0x10f290, 0xffffffff, nv_ro32(bios, timing + 0x00));
767 ram_mask(fuc, 0x10f294, 0xffffffff, nv_ro32(bios, timing + 0x04));
768 ram_mask(fuc, 0x10f298, 0xffffffff, nv_ro32(bios, timing + 0x08));
769 ram_mask(fuc, 0x10f29c, 0xffffffff, nv_ro32(bios, timing + 0x0c));
770 ram_mask(fuc, 0x10f2a0, 0xffffffff, nv_ro32(bios, timing + 0x10));
771 ram_mask(fuc, 0x10f2a4, 0xffffffff, nv_ro32(bios, timing + 0x14));
772 ram_mask(fuc, 0x10f2a8, 0xffffffff, nv_ro32(bios, timing + 0x18));
773 ram_mask(fuc, 0x10f2ac, 0xffffffff, nv_ro32(bios, timing + 0x1c));
774 ram_mask(fuc, 0x10f2cc, 0xffffffff, nv_ro32(bios, timing + 0x20));
775 ram_mask(fuc, 0x10f2e8, 0xffffffff, nv_ro32(bios, timing + 0x24));
776
777 mask = 0x33f00000;
778 data = 0x00000000;
779 if (!(nv_ro08(bios, ramcfg + 0x01) & 0x04))
780 data |= 0x20200000;
781 if (!(nv_ro08(bios, ramcfg + 0x07) & 0x80))
782 data |= 0x12800000;
783 /*XXX: see note above about there probably being some condition
784 * for the 10f824 stuff that uses ramcfg 3...
785 */
786 if ( (nv_ro08(bios, ramcfg + 0x03) & 0xf0)) {
787 if (nv_ro08(bios, rammap + 0x08) & 0x0c) {
788 if (!(nv_ro08(bios, ramcfg + 0x07) & 0x80))
789 mask |= 0x00000020;
790 else
791 data |= 0x00000020;
792 mask |= 0x08000004;
793 }
794 data |= 0x04000000;
795 } else {
796 mask |= 0x44000020;
797 data |= 0x08000004;
798 }
799
800 ram_mask(fuc, 0x10f808, mask, data);
801
802 data = nv_ro08(bios, ramcfg + 0x03) & 0x0f;
803 ram_wr32(fuc, 0x10f870, 0x11111111 * data);
804
805 data = nv_ro16(bios, timing + 0x2c);
806 ram_mask(fuc, 0x10f250, 0x000003f0, (data & 0x003f) << 4);
807
808 if (((nv_ro32(bios, timing + 0x2c) & 0x00001fc0) >> 6) >
809 ((nv_ro32(bios, timing + 0x28) & 0x7f000000) >> 24))
810 data = (nv_ro32(bios, timing + 0x2c) & 0x00001fc0) >> 6;
811 else
812 data = (nv_ro32(bios, timing + 0x28) & 0x1f000000) >> 24;
813 ram_mask(fuc, 0x10f24c, 0x7f000000, data << 24);
814
815 data = nv_ro08(bios, timing + 0x30);
816 ram_mask(fuc, 0x10f224, 0x001f0000, (data & 0xf8) << 13);
817
818 ram_wr32(fuc, 0x10f090, 0x4000007f);
819 ram_nsec(fuc, 1000);
820
821 ram_wr32(fuc, 0x10f314, 0x00000001); /* PRECHARGE */
822 ram_wr32(fuc, 0x10f310, 0x00000001); /* REFRESH */
823 ram_wr32(fuc, 0x10f210, 0x80000000); /* REFRESH_AUTO = 1 */
824 ram_nsec(fuc, 1000);
825
826 ram_nuke(fuc, mr[0]);
827 ram_mask(fuc, mr[0], 0x100, 0x100);
828 ram_mask(fuc, mr[0], 0x100, 0x000);
829
830 ram_mask(fuc, mr[2], 0xfff, ram->base.mr[2]);
831 ram_wr32(fuc, mr[0], ram->base.mr[0]);
832 ram_nsec(fuc, 1000);
833
834 ram_nuke(fuc, mr[0]);
835 ram_mask(fuc, mr[0], 0x100, 0x100);
836 ram_mask(fuc, mr[0], 0x100, 0x000);
837
838 if (vc == 0 && ram_have(fuc, gpio2E)) {
839 u32 temp = ram_mask(fuc, gpio2E, 0x3000, fuc->r_func2E[0]);
840 if (temp != ram_rd32(fuc, gpio2E)) {
841 ram_wr32(fuc, gpiotrig, 1);
842 ram_nsec(fuc, 20000);
843 }
844 }
845
846 if (ram->mode != 2) {
847 ram_mask(fuc, 0x10f830, 0x01000000, 0x01000000);
848 ram_mask(fuc, 0x10f830, 0x01000000, 0x00000000);
849 }
850
851 ram_mask(fuc, 0x10f200, 0x80000000, 0x80000000);
852 ram_wr32(fuc, 0x10f318, 0x00000001); /* NOP? */
853 ram_mask(fuc, 0x10f200, 0x80000000, 0x00000000);
854 ram_nsec(fuc, 1000);
855
856 ram_wr32(fuc, 0x62c000, 0x0f0f0f00);
857
858 if (nv_ro08(bios, rammap + 0x08) & 0x01)
859 data = 0x00000800;
860 else
861 data = 0x00000000;
862 ram_mask(fuc, 0x10f200, 0x00000800, data);
863 return 0;
864}
865
866/*******************************************************************************
867 * main hooks
868 ******************************************************************************/
869
870static int
871nve0_ram_calc(struct nouveau_fb *pfb, u32 freq)
872{
873 struct nouveau_bios *bios = nouveau_bios(pfb);
874 struct nve0_ram *ram = (void *)pfb->ram;
875 struct nve0_ramfuc *fuc = &ram->fuc;
876 struct bit_entry M;
877 int ret, refclk, strap, i;
878 u32 data;
879 u8 cnt;
880
881 /* lookup memory config data relevant to the target frequency */
882 ram->base.rammap.data = nvbios_rammap_match(bios, freq / 1000,
883 &ram->base.rammap.version,
884 &ram->base.rammap.size, &cnt,
885 &ram->base.ramcfg.size);
886 if (!ram->base.rammap.data || ram->base.rammap.version != 0x11 ||
887 ram->base.rammap.size < 0x09) {
888 nv_error(pfb, "invalid/missing rammap entry\n");
889 return -EINVAL;
890 }
891
892 /* locate specific data set for the attached memory */
893 if (bit_entry(bios, 'M', &M) || M.version != 2 || M.length < 3) {
894 nv_error(pfb, "invalid/missing memory table\n");
895 return -EINVAL;
896 }
897
898 strap = (nv_rd32(pfb, 0x101000) & 0x0000003c) >> 2;
899 data = nv_ro16(bios, M.offset + 1);
900 if (data)
901 strap = nv_ro08(bios, data + strap);
902
903 if (strap >= cnt) {
904 nv_error(pfb, "invalid ramcfg strap\n");
905 return -EINVAL;
906 }
907
908 ram->base.ramcfg.version = ram->base.rammap.version;
909 ram->base.ramcfg.data = ram->base.rammap.data + ram->base.rammap.size +
910 (ram->base.ramcfg.size * strap);
911 if (!ram->base.ramcfg.data || ram->base.ramcfg.version != 0x11 ||
912 ram->base.ramcfg.size < 0x08) {
913 nv_error(pfb, "invalid/missing ramcfg entry\n");
914 return -EINVAL;
915 }
916
917 /* lookup memory timings, if bios says they're present */
918 strap = nv_ro08(bios, ram->base.ramcfg.data + 0x00);
919 if (strap != 0xff) {
920 ram->base.timing.data =
921 nvbios_timing_entry(bios, strap,
922 &ram->base.timing.version,
923 &ram->base.timing.size);
924 if (!ram->base.timing.data ||
925 ram->base.timing.version != 0x20 ||
926 ram->base.timing.size < 0x33) {
927 nv_error(pfb, "invalid/missing timing entry\n");
928 return -EINVAL;
929 }
930 } else {
931 ram->base.timing.data = 0;
932 }
933
934 ret = ram_init(fuc, pfb);
935 if (ret)
936 return ret;
937
938 ram->mode = (freq > fuc->refpll.vco1.max_freq) ? 2 : 1;
939 ram->from = ram_rd32(fuc, 0x1373f4) & 0x0000000f;
940
941 /* XXX: this is *not* what nvidia do. on fermi nvidia generally
942 * select, based on some unknown condition, one of the two possible
943 * reference frequencies listed in the vbios table for mempll and
944 * program refpll to that frequency.
945 *
946 * so far, i've seen very weird values being chosen by nvidia on
947 * kepler boards, no idea how/why they're chosen.
948 */
949 refclk = freq;
950 if (ram->mode == 2)
951 refclk = fuc->mempll.refclk;
952
953 /* calculate refpll coefficients */
954 ret = nva3_pll_calc(nv_subdev(pfb), &fuc->refpll, refclk, &ram->N1,
955 &ram->fN1, &ram->M1, &ram->P1);
956 fuc->mempll.refclk = ret;
957 if (ret <= 0) {
958 nv_error(pfb, "unable to calc refpll\n");
959 return -EINVAL;
960 }
961
962 /* calculate mempll coefficients, if we're using it */
963 if (ram->mode == 2) {
964 /* post-divider doesn't work... the reg takes the values but
965 * appears to completely ignore it. there *is* a bit at
966 * bit 28 that appears to divide the clock by 2 if set.
967 */
968 fuc->mempll.min_p = 1;
969 fuc->mempll.max_p = 2;
970
971 ret = nva3_pll_calc(nv_subdev(pfb), &fuc->mempll, freq,
972 &ram->N2, NULL, &ram->M2, &ram->P2);
973 if (ret <= 0) {
974 nv_error(pfb, "unable to calc mempll\n");
975 return -EINVAL;
976 }
977 }
978
979 for (i = 0; i < ARRAY_SIZE(fuc->r_mr); i++) {
980 if (ram_have(fuc, mr[i]))
981 ram->base.mr[i] = ram_rd32(fuc, mr[i]);
982 }
983
984 switch (ram->base.type) {
985 case NV_MEM_TYPE_DDR3:
986 ret = nouveau_sddr3_calc(&ram->base);
987 if (ret == 0)
988 ret = nve0_ram_calc_sddr3(pfb, freq);
989 break;
990 case NV_MEM_TYPE_GDDR5:
991 ret = nouveau_gddr5_calc(&ram->base);
992 if (ret == 0)
993 ret = nve0_ram_calc_gddr5(pfb, freq);
994 break;
995 default:
996 ret = -ENOSYS;
997 break;
998 }
999
1000 return ret;
1001}
1002
1003static int
1004nve0_ram_prog(struct nouveau_fb *pfb)
1005{
1006 struct nouveau_device *device = nv_device(pfb);
1007 struct nve0_ram *ram = (void *)pfb->ram;
1008 struct nve0_ramfuc *fuc = &ram->fuc;
1009 ram_exec(fuc, nouveau_boolopt(device->cfgopt, "NvMemExec", false));
1010 return 0;
1011}
1012
1013static void
1014nve0_ram_tidy(struct nouveau_fb *pfb)
1015{
1016 struct nve0_ram *ram = (void *)pfb->ram;
1017 struct nve0_ramfuc *fuc = &ram->fuc;
1018 ram_exec(fuc, false);
1019}
1020
1021static int
1022nve0_ram_init(struct nouveau_object *object)
1023{
1024 struct nouveau_fb *pfb = (void *)object->parent;
1025 struct nve0_ram *ram = (void *)object;
1026 struct nouveau_bios *bios = nouveau_bios(pfb);
1027 static const u8 train0[] = {
1028 0x00, 0xff, 0xff, 0x00, 0xff, 0x00,
1029 0x00, 0xff, 0xff, 0x00, 0xff, 0x00,
1030 };
1031 static const u32 train1[] = {
1032 0x00000000, 0xffffffff,
1033 0x55555555, 0xaaaaaaaa,
1034 0x33333333, 0xcccccccc,
1035 0xf0f0f0f0, 0x0f0f0f0f,
1036 0x00ff00ff, 0xff00ff00,
1037 0x0000ffff, 0xffff0000,
1038 };
1039 u8 ver, hdr, cnt, len, snr, ssz;
1040 u32 data, save;
1041 int ret, i;
1042
1043 ret = nouveau_ram_init(&ram->base);
1044 if (ret)
1045 return ret;
1046
1047 /* run a bunch of tables from rammap table. there's actually
1048 * individual pointers for each rammap entry too, but, nvidia
1049 * seem to just run the last two entries' scripts early on in
1050 * their init, and never again.. we'll just run 'em all once
1051 * for now.
1052 *
1053 * i strongly suspect that each script is for a separate mode
1054 * (likely selected by 0x10f65c's lower bits?), and the
1055 * binary driver skips the one that's already been setup by
1056 * the init tables.
1057 */
1058 data = nvbios_rammap_table(bios, &ver, &hdr, &cnt, &len, &snr, &ssz);
1059 if (!data || hdr < 0x15)
1060 return -EINVAL;
1061
1062 cnt = nv_ro08(bios, data + 0x14); /* guess at count */
1063 data = nv_ro32(bios, data + 0x10); /* guess u32... */
1064 save = nv_rd32(pfb, 0x10f65c);
1065 for (i = 0; i < cnt; i++) {
1066 nv_mask(pfb, 0x10f65c, 0x000000f0, i << 4);
1067 nvbios_exec(&(struct nvbios_init) {
1068 .subdev = nv_subdev(pfb),
1069 .bios = bios,
1070 .offset = nv_ro32(bios, data), /* guess u32 */
1071 .execute = 1,
1072 });
1073 data += 4;
1074 }
1075 nv_wr32(pfb, 0x10f65c, save);
1076
1077 switch (ram->base.type) {
1078 case NV_MEM_TYPE_GDDR5:
1079 for (i = 0; i < 0x30; i++) {
1080 nv_wr32(pfb, 0x10f968, 0x00000000 | (i << 8));
1081 nv_wr32(pfb, 0x10f920, 0x00000000 | train0[i % 12]);
1082 nv_wr32(pfb, 0x10f918, train1[i % 12]);
1083 nv_wr32(pfb, 0x10f920, 0x00000100 | train0[i % 12]);
1084 nv_wr32(pfb, 0x10f918, train1[i % 12]);
1085
1086 nv_wr32(pfb, 0x10f96c, 0x00000000 | (i << 8));
1087 nv_wr32(pfb, 0x10f924, 0x00000000 | train0[i % 12]);
1088 nv_wr32(pfb, 0x10f91c, train1[i % 12]);
1089 nv_wr32(pfb, 0x10f924, 0x00000100 | train0[i % 12]);
1090 nv_wr32(pfb, 0x10f91c, train1[i % 12]);
1091 }
1092
1093 for (i = 0; i < 0x100; i++) {
1094 nv_wr32(pfb, 0x10f968, i);
1095 nv_wr32(pfb, 0x10f900, train1[2 + (i & 1)]);
1096 }
1097
1098 for (i = 0; i < 0x100; i++) {
1099 nv_wr32(pfb, 0x10f96c, i);
1100 nv_wr32(pfb, 0x10f900, train1[2 + (i & 1)]);
1101 }
1102 break;
1103 default:
1104 break;
1105 }
1106
1107 return 0;
1108}
1109
1110static int
1111nve0_ram_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
1112 struct nouveau_oclass *oclass, void *data, u32 size,
1113 struct nouveau_object **pobject)
1114{
1115 struct nouveau_fb *pfb = nouveau_fb(parent);
1116 struct nouveau_bios *bios = nouveau_bios(pfb);
1117 struct nouveau_gpio *gpio = nouveau_gpio(pfb);
1118 struct dcb_gpio_func func;
1119 struct nve0_ram *ram;
1120 int ret;
1121
1122 ret = nvc0_ram_create(parent, engine, oclass, &ram);
1123 *pobject = nv_object(ram);
1124 if (ret)
1125 return ret;
1126
1127 switch (ram->base.type) {
1128 case NV_MEM_TYPE_DDR3:
1129 case NV_MEM_TYPE_GDDR5:
1130 ram->base.calc = nve0_ram_calc;
1131 ram->base.prog = nve0_ram_prog;
1132 ram->base.tidy = nve0_ram_tidy;
1133 break;
1134 default:
1135 nv_warn(pfb, "reclocking of this RAM type is unsupported\n");
1136 break;
1137 }
1138
1139 // parse bios data for both pll's
1140 ret = nvbios_pll_parse(bios, 0x0c, &ram->fuc.refpll);
1141 if (ret) {
1142 nv_error(pfb, "mclk refpll data not found\n");
1143 return ret;
1144 }
1145
1146 ret = nvbios_pll_parse(bios, 0x04, &ram->fuc.mempll);
1147 if (ret) {
1148 nv_error(pfb, "mclk pll data not found\n");
1149 return ret;
1150 }
1151
1152 ret = gpio->find(gpio, 0, 0x18, DCB_GPIO_UNUSED, &func);
1153 if (ret == 0) {
1154 ram->fuc.r_gpioMV = ramfuc_reg(0x00d610 + (func.line * 0x04));
1155 ram->fuc.r_funcMV[0] = (func.log[0] ^ 2) << 12;
1156 ram->fuc.r_funcMV[1] = (func.log[1] ^ 2) << 12;
1157 }
1158
1159 ret = gpio->find(gpio, 0, 0x2e, DCB_GPIO_UNUSED, &func);
1160 if (ret == 0) {
1161 ram->fuc.r_gpio2E = ramfuc_reg(0x00d610 + (func.line * 0x04));
1162 ram->fuc.r_func2E[0] = (func.log[0] ^ 2) << 12;
1163 ram->fuc.r_func2E[1] = (func.log[1] ^ 2) << 12;
1164 }
1165
1166 ram->fuc.r_gpiotrig = ramfuc_reg(0x00d604);
1167
1168 ram->fuc.r_0x132020 = ramfuc_reg(0x132020);
1169 ram->fuc.r_0x132028 = ramfuc_reg(0x132028);
1170 ram->fuc.r_0x132024 = ramfuc_reg(0x132024);
1171 ram->fuc.r_0x132030 = ramfuc_reg(0x132030);
1172 ram->fuc.r_0x132034 = ramfuc_reg(0x132034);
1173 ram->fuc.r_0x132000 = ramfuc_reg(0x132000);
1174 ram->fuc.r_0x132004 = ramfuc_reg(0x132004);
1175 ram->fuc.r_0x132040 = ramfuc_reg(0x132040);
1176
1177 ram->fuc.r_0x10f248 = ramfuc_reg(0x10f248);
1178 ram->fuc.r_0x10f290 = ramfuc_reg(0x10f290);
1179 ram->fuc.r_0x10f294 = ramfuc_reg(0x10f294);
1180 ram->fuc.r_0x10f298 = ramfuc_reg(0x10f298);
1181 ram->fuc.r_0x10f29c = ramfuc_reg(0x10f29c);
1182 ram->fuc.r_0x10f2a0 = ramfuc_reg(0x10f2a0);
1183 ram->fuc.r_0x10f2a4 = ramfuc_reg(0x10f2a4);
1184 ram->fuc.r_0x10f2a8 = ramfuc_reg(0x10f2a8);
1185 ram->fuc.r_0x10f2ac = ramfuc_reg(0x10f2ac);
1186 ram->fuc.r_0x10f2cc = ramfuc_reg(0x10f2cc);
1187 ram->fuc.r_0x10f2e8 = ramfuc_reg(0x10f2e8);
1188 ram->fuc.r_0x10f250 = ramfuc_reg(0x10f250);
1189 ram->fuc.r_0x10f24c = ramfuc_reg(0x10f24c);
1190 ram->fuc.r_0x10fec4 = ramfuc_reg(0x10fec4);
1191 ram->fuc.r_0x10fec8 = ramfuc_reg(0x10fec8);
1192 ram->fuc.r_0x10f604 = ramfuc_reg(0x10f604);
1193 ram->fuc.r_0x10f614 = ramfuc_reg(0x10f614);
1194 ram->fuc.r_0x10f610 = ramfuc_reg(0x10f610);
1195 ram->fuc.r_0x100770 = ramfuc_reg(0x100770);
1196 ram->fuc.r_0x100778 = ramfuc_reg(0x100778);
1197 ram->fuc.r_0x10f224 = ramfuc_reg(0x10f224);
1198
1199 ram->fuc.r_0x10f870 = ramfuc_reg(0x10f870);
1200 ram->fuc.r_0x10f698 = ramfuc_reg(0x10f698);
1201 ram->fuc.r_0x10f694 = ramfuc_reg(0x10f694);
1202 ram->fuc.r_0x10f6b8 = ramfuc_reg(0x10f6b8);
1203 ram->fuc.r_0x10f808 = ramfuc_reg(0x10f808);
1204 ram->fuc.r_0x10f670 = ramfuc_reg(0x10f670);
1205 ram->fuc.r_0x10f60c = ramfuc_reg(0x10f60c);
1206 ram->fuc.r_0x10f830 = ramfuc_reg(0x10f830);
1207 ram->fuc.r_0x1373ec = ramfuc_reg(0x1373ec);
1208 ram->fuc.r_0x10f800 = ramfuc_reg(0x10f800);
1209 ram->fuc.r_0x10f82c = ramfuc_reg(0x10f82c);
1210
1211 ram->fuc.r_0x10f978 = ramfuc_reg(0x10f978);
1212 ram->fuc.r_0x10f910 = ramfuc_reg(0x10f910);
1213 ram->fuc.r_0x10f914 = ramfuc_reg(0x10f914);
1214
1215 switch (ram->base.type) {
1216 case NV_MEM_TYPE_GDDR5:
1217 ram->fuc.r_mr[0] = ramfuc_reg(0x10f300);
1218 ram->fuc.r_mr[1] = ramfuc_reg(0x10f330);
1219 ram->fuc.r_mr[2] = ramfuc_reg(0x10f334);
1220 ram->fuc.r_mr[3] = ramfuc_reg(0x10f338);
1221 ram->fuc.r_mr[4] = ramfuc_reg(0x10f33c);
1222 ram->fuc.r_mr[5] = ramfuc_reg(0x10f340);
1223 ram->fuc.r_mr[6] = ramfuc_reg(0x10f344);
1224 ram->fuc.r_mr[7] = ramfuc_reg(0x10f348);
1225 ram->fuc.r_mr[8] = ramfuc_reg(0x10f354);
1226 ram->fuc.r_mr[15] = ramfuc_reg(0x10f34c);
1227 break;
1228 case NV_MEM_TYPE_DDR3:
1229 ram->fuc.r_mr[0] = ramfuc_reg(0x10f300);
1230 ram->fuc.r_mr[2] = ramfuc_reg(0x10f320);
1231 break;
1232 default:
1233 break;
1234 }
1235
1236 ram->fuc.r_0x62c000 = ramfuc_reg(0x62c000);
1237 ram->fuc.r_0x10f200 = ramfuc_reg(0x10f200);
1238 ram->fuc.r_0x10f210 = ramfuc_reg(0x10f210);
1239 ram->fuc.r_0x10f310 = ramfuc_reg(0x10f310);
1240 ram->fuc.r_0x10f314 = ramfuc_reg(0x10f314);
1241 ram->fuc.r_0x10f318 = ramfuc_reg(0x10f318);
1242 ram->fuc.r_0x10f090 = ramfuc_reg(0x10f090);
1243 ram->fuc.r_0x10f69c = ramfuc_reg(0x10f69c);
1244 ram->fuc.r_0x10f824 = ramfuc_reg(0x10f824);
1245 ram->fuc.r_0x1373f0 = ramfuc_reg(0x1373f0);
1246 ram->fuc.r_0x1373f4 = ramfuc_reg(0x1373f4);
1247 ram->fuc.r_0x137320 = ramfuc_reg(0x137320);
1248 ram->fuc.r_0x10f65c = ramfuc_reg(0x10f65c);
1249 ram->fuc.r_0x10f6bc = ramfuc_reg(0x10f6bc);
1250 ram->fuc.r_0x100710 = ramfuc_reg(0x100710);
1251 ram->fuc.r_0x10f750 = ramfuc_reg(0x10f750);
1252 return 0;
1253}
1254
1255struct nouveau_oclass
1256nve0_ram_oclass = {
1257 .handle = 0,
1258 .ofuncs = &(struct nouveau_ofuncs) {
1259 .ctor = nve0_ram_ctor,
1260 .dtor = _nouveau_ram_dtor,
1261 .init = nve0_ram_init,
1262 .fini = _nouveau_ram_fini,
1263 }
1264};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramseq.h b/drivers/gpu/drm/nouveau/core/subdev/fb/ramseq.h
new file mode 100644
index 000000000000..571077e39071
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramseq.h
@@ -0,0 +1,18 @@
1#ifndef __NVKM_FBRAM_SEQ_H__
2#define __NVKM_FBRAM_SEQ_H__
3
4#include <subdev/bus.h>
5#include <subdev/bus/hwsq.h>
6
7#define ram_init(s,p) hwsq_init(&(s)->base, (p))
8#define ram_exec(s,e) hwsq_exec(&(s)->base, (e))
9#define ram_have(s,r) ((s)->r_##r.addr != 0x000000)
10#define ram_rd32(s,r) hwsq_rd32(&(s)->base, &(s)->r_##r)
11#define ram_wr32(s,r,d) hwsq_wr32(&(s)->base, &(s)->r_##r, (d))
12#define ram_nuke(s,r) hwsq_nuke(&(s)->base, &(s)->r_##r)
13#define ram_mask(s,r,m,d) hwsq_mask(&(s)->base, &(s)->r_##r, (m), (d))
14#define ram_setf(s,f,d) hwsq_setf(&(s)->base, (f), (d))
15#define ram_wait(s,f,d) hwsq_wait(&(s)->base, (f), (d))
16#define ram_nsec(s,n) hwsq_nsec(&(s)->base, (n))
17
18#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/sddr3.c b/drivers/gpu/drm/nouveau/core/subdev/fb/sddr3.c
new file mode 100644
index 000000000000..ebd4cd9c35d9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/sddr3.c
@@ -0,0 +1,99 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */
24
25#include <subdev/bios.h>
26#include "priv.h"
27
28struct ramxlat {
29 int id;
30 u8 enc;
31};
32
33static inline int
34ramxlat(const struct ramxlat *xlat, int id)
35{
36 while (xlat->id >= 0) {
37 if (xlat->id == id)
38 return xlat->enc;
39 xlat++;
40 }
41 return -EINVAL;
42}
43
44static const struct ramxlat
45ramddr3_cl[] = {
46 { 5, 2 }, { 6, 4 }, { 7, 6 }, { 8, 8 }, { 9, 10 }, { 10, 12 },
47 { 11, 14 },
48 /* the below are mentioned in some, but not all, ddr3 docs */
49 { 12, 1 }, { 13, 3 }, { 14, 5 },
50 { -1 }
51};
52
53static const struct ramxlat
54ramddr3_wr[] = {
55 { 5, 1 }, { 6, 2 }, { 7, 3 }, { 8, 4 }, { 10, 5 }, { 12, 6 },
56 /* the below are mentioned in some, but not all, ddr3 docs */
57 { 14, 7 }, { 16, 0 },
58 { -1 }
59};
60
61static const struct ramxlat
62ramddr3_cwl[] = {
63 { 5, 0 }, { 6, 1 }, { 7, 2 }, { 8, 3 },
64 /* the below are mentioned in some, but not all, ddr3 docs */
65 { 9, 4 },
66 { -1 }
67};
68
69int
70nouveau_sddr3_calc(struct nouveau_ram *ram)
71{
72 struct nouveau_bios *bios = nouveau_bios(ram);
73 int WL, CL, WR;
74
75 switch (!!ram->timing.data * ram->timing.version) {
76 case 0x20:
77 WL = (nv_ro16(bios, ram->timing.data + 0x04) & 0x0f80) >> 7;
78 CL = nv_ro08(bios, ram->timing.data + 0x04) & 0x1f;
79 WR = nv_ro08(bios, ram->timing.data + 0x0a) & 0x7f;
80 break;
81 default:
82 return -ENOSYS;
83 }
84
85 WL = ramxlat(ramddr3_cwl, WL);
86 CL = ramxlat(ramddr3_cl, CL);
87 WR = ramxlat(ramddr3_wr, WR);
88 if (WL < 0 || CL < 0 || WR < 0)
89 return -EINVAL;
90
91 ram->mr[0] &= ~0xe74;
92 ram->mr[0] |= (WR & 0x07) << 9;
93 ram->mr[0] |= (CL & 0x0e) << 3;
94 ram->mr[0] |= (CL & 0x01) << 2;
95
96 ram->mr[2] &= ~0x038;
97 ram->mr[2] |= (WL & 0x07) << 3;
98 return 0;
99}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/gpio/base.c b/drivers/gpu/drm/nouveau/core/subdev/gpio/base.c
index d422acc9af15..f572c2804c32 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/gpio/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/gpio/base.c
@@ -67,7 +67,7 @@ nouveau_gpio_find(struct nouveau_gpio *gpio, int idx, u8 tag, u8 line,
67 } 67 }
68 } 68 }
69 69
70 return -EINVAL; 70 return -ENOENT;
71} 71}
72 72
73static int 73static int
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
index 2895c19bb152..041fd5edaebf 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
@@ -195,7 +195,7 @@ nouveau_i2c_find_type(struct nouveau_i2c *i2c, u16 type)
195 195
196static int 196static int
197nouveau_i2c_identify(struct nouveau_i2c *i2c, int index, const char *what, 197nouveau_i2c_identify(struct nouveau_i2c *i2c, int index, const char *what,
198 struct i2c_board_info *info, 198 struct nouveau_i2c_board_info *info,
199 bool (*match)(struct nouveau_i2c_port *, 199 bool (*match)(struct nouveau_i2c_port *,
200 struct i2c_board_info *)) 200 struct i2c_board_info *))
201{ 201{
@@ -208,12 +208,29 @@ nouveau_i2c_identify(struct nouveau_i2c *i2c, int index, const char *what,
208 } 208 }
209 209
210 nv_debug(i2c, "probing %ss on bus: %d\n", what, port->index); 210 nv_debug(i2c, "probing %ss on bus: %d\n", what, port->index);
211 for (i = 0; info[i].addr; i++) { 211 for (i = 0; info[i].dev.addr; i++) {
212 if (nv_probe_i2c(port, info[i].addr) && 212 u8 orig_udelay = 0;
213 (!match || match(port, &info[i]))) { 213
214 nv_info(i2c, "detected %s: %s\n", what, info[i].type); 214 if ((port->adapter.algo == &i2c_bit_algo) &&
215 (info[i].udelay != 0)) {
216 struct i2c_algo_bit_data *algo = port->adapter.algo_data;
217 nv_debug(i2c, "using custom udelay %d instead of %d\n",
218 info[i].udelay, algo->udelay);
219 orig_udelay = algo->udelay;
220 algo->udelay = info[i].udelay;
221 }
222
223 if (nv_probe_i2c(port, info[i].dev.addr) &&
224 (!match || match(port, &info[i].dev))) {
225 nv_info(i2c, "detected %s: %s\n", what,
226 info[i].dev.type);
215 return i; 227 return i;
216 } 228 }
229
230 if (orig_udelay) {
231 struct i2c_algo_bit_data *algo = port->adapter.algo_data;
232 algo->udelay = orig_udelay;
233 }
217 } 234 }
218 235
219 nv_debug(i2c, "no devices found.\n"); 236 nv_debug(i2c, "no devices found.\n");
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/base.c b/drivers/gpu/drm/nouveau/core/subdev/mc/base.c
index e290cfa4acee..b4b9943773bc 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/base.c
@@ -25,38 +25,48 @@
25#include <subdev/mc.h> 25#include <subdev/mc.h>
26#include <core/option.h> 26#include <core/option.h>
27 27
28static inline u32
29nouveau_mc_intr_mask(struct nouveau_mc *pmc)
30{
31 u32 intr = nv_rd32(pmc, 0x000100);
32 if (intr == 0xffffffff) /* likely fallen off the bus */
33 intr = 0x00000000;
34 return intr;
35}
36
28static irqreturn_t 37static irqreturn_t
29nouveau_mc_intr(int irq, void *arg) 38nouveau_mc_intr(int irq, void *arg)
30{ 39{
31 struct nouveau_mc *pmc = arg; 40 struct nouveau_mc *pmc = arg;
32 const struct nouveau_mc_intr *map = pmc->intr_map; 41 const struct nouveau_mc_oclass *oclass = (void *)nv_object(pmc)->oclass;
33 struct nouveau_device *device = nv_device(pmc); 42 const struct nouveau_mc_intr *map = oclass->intr;
34 struct nouveau_subdev *unit; 43 struct nouveau_subdev *unit;
35 u32 stat, intr; 44 u32 intr;
36
37 intr = stat = nv_rd32(pmc, 0x000100);
38 if (intr == 0xffffffff)
39 return IRQ_NONE;
40 while (stat && map->stat) {
41 if (stat & map->stat) {
42 unit = nouveau_subdev(pmc, map->unit);
43 if (unit && unit->intr)
44 unit->intr(unit);
45 intr &= ~map->stat;
46 }
47 map++;
48 }
49 45
46 nv_wr32(pmc, 0x000140, 0x00000000);
47 nv_rd32(pmc, 0x000140);
48 intr = nouveau_mc_intr_mask(pmc);
50 if (pmc->use_msi) 49 if (pmc->use_msi)
51 nv_wr08(pmc->base.base.parent, 0x00088068, 0xff); 50 oclass->msi_rearm(pmc);
52 51
53 if (intr) { 52 if (intr) {
54 nv_error(pmc, "unknown intr 0x%08x\n", stat); 53 u32 stat = intr = nouveau_mc_intr_mask(pmc);
54 while (map->stat) {
55 if (intr & map->stat) {
56 unit = nouveau_subdev(pmc, map->unit);
57 if (unit && unit->intr)
58 unit->intr(unit);
59 stat &= ~map->stat;
60 }
61 map++;
62 }
63
64 if (stat)
65 nv_error(pmc, "unknown intr 0x%08x\n", stat);
55 } 66 }
56 67
57 if (stat == IRQ_HANDLED) 68 nv_wr32(pmc, 0x000140, 0x00000001);
58 pm_runtime_mark_last_busy(&device->pdev->dev); 69 return intr ? IRQ_HANDLED : IRQ_NONE;
59 return stat ? IRQ_HANDLED : IRQ_NONE;
60} 70}
61 71
62int 72int
@@ -91,37 +101,42 @@ _nouveau_mc_dtor(struct nouveau_object *object)
91 101
92int 102int
93nouveau_mc_create_(struct nouveau_object *parent, struct nouveau_object *engine, 103nouveau_mc_create_(struct nouveau_object *parent, struct nouveau_object *engine,
94 struct nouveau_oclass *oclass, 104 struct nouveau_oclass *bclass, int length, void **pobject)
95 const struct nouveau_mc_intr *intr_map,
96 int length, void **pobject)
97{ 105{
106 const struct nouveau_mc_oclass *oclass = (void *)bclass;
98 struct nouveau_device *device = nv_device(parent); 107 struct nouveau_device *device = nv_device(parent);
99 struct nouveau_mc *pmc; 108 struct nouveau_mc *pmc;
100 int ret; 109 int ret;
101 110
102 ret = nouveau_subdev_create_(parent, engine, oclass, 0, "PMC", 111 ret = nouveau_subdev_create_(parent, engine, bclass, 0, "PMC",
103 "master", length, pobject); 112 "master", length, pobject);
104 pmc = *pobject; 113 pmc = *pobject;
105 if (ret) 114 if (ret)
106 return ret; 115 return ret;
107 116
108 pmc->intr_map = intr_map;
109
110 switch (device->pdev->device & 0x0ff0) { 117 switch (device->pdev->device & 0x0ff0) {
111 case 0x00f0: /* BR02? */ 118 case 0x00f0:
112 case 0x02e0: /* BR02? */ 119 case 0x02e0:
113 pmc->use_msi = false; 120 /* BR02? NFI how these would be handled yet exactly */
114 break; 121 break;
115 default: 122 default:
116 pmc->use_msi = nouveau_boolopt(device->cfgopt, "NvMSI", false); 123 switch (device->chipset) {
124 case 0xaa: break; /* reported broken, nv also disable it */
125 default:
126 pmc->use_msi = true;
127 break;
128 }
129 }
130
131 pmc->use_msi = nouveau_boolopt(device->cfgopt, "NvMSI", pmc->use_msi);
132 if (pmc->use_msi && oclass->msi_rearm) {
133 pmc->use_msi = pci_enable_msi(device->pdev) == 0;
117 if (pmc->use_msi) { 134 if (pmc->use_msi) {
118 pmc->use_msi = pci_enable_msi(device->pdev) == 0; 135 nv_info(pmc, "MSI interrupts enabled\n");
119 if (pmc->use_msi) { 136 oclass->msi_rearm(pmc);
120 nv_info(pmc, "MSI interrupts enabled\n");
121 nv_wr08(device, 0x00088068, 0xff);
122 }
123 } 137 }
124 break; 138 } else {
139 pmc->use_msi = false;
125 } 140 }
126 141
127 ret = request_irq(device->pdev->irq, nouveau_mc_intr, 142 ret = request_irq(device->pdev->irq, nouveau_mc_intr,
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c
index 64aa4edb0d9d..2d787e4dfefa 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c
@@ -22,17 +22,14 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include <subdev/mc.h> 25#include "nv04.h"
26
27struct nv04_mc_priv {
28 struct nouveau_mc base;
29};
30 26
31const struct nouveau_mc_intr 27const struct nouveau_mc_intr
32nv04_mc_intr[] = { 28nv04_mc_intr[] = {
33 { 0x00000001, NVDEV_ENGINE_MPEG }, /* NV17- MPEG/ME */ 29 { 0x00000001, NVDEV_ENGINE_MPEG }, /* NV17- MPEG/ME */
34 { 0x00000100, NVDEV_ENGINE_FIFO }, 30 { 0x00000100, NVDEV_ENGINE_FIFO },
35 { 0x00001000, NVDEV_ENGINE_GR }, 31 { 0x00001000, NVDEV_ENGINE_GR },
32 { 0x00010000, NVDEV_ENGINE_DISP },
36 { 0x00020000, NVDEV_ENGINE_VP }, /* NV40- */ 33 { 0x00020000, NVDEV_ENGINE_VP }, /* NV40- */
37 { 0x00100000, NVDEV_SUBDEV_TIMER }, 34 { 0x00100000, NVDEV_SUBDEV_TIMER },
38 { 0x01000000, NVDEV_ENGINE_DISP }, /* NV04- PCRTC0 */ 35 { 0x01000000, NVDEV_ENGINE_DISP }, /* NV04- PCRTC0 */
@@ -42,7 +39,18 @@ nv04_mc_intr[] = {
42 {} 39 {}
43}; 40};
44 41
45static int 42int
43nv04_mc_init(struct nouveau_object *object)
44{
45 struct nv04_mc_priv *priv = (void *)object;
46
47 nv_wr32(priv, 0x000200, 0xffffffff); /* everything enabled */
48 nv_wr32(priv, 0x001850, 0x00000001); /* disable rom access */
49
50 return nouveau_mc_init(&priv->base);
51}
52
53int
46nv04_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine, 54nv04_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
47 struct nouveau_oclass *oclass, void *data, u32 size, 55 struct nouveau_oclass *oclass, void *data, u32 size,
48 struct nouveau_object **pobject) 56 struct nouveau_object **pobject)
@@ -50,7 +58,7 @@ nv04_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
50 struct nv04_mc_priv *priv; 58 struct nv04_mc_priv *priv;
51 int ret; 59 int ret;
52 60
53 ret = nouveau_mc_create(parent, engine, oclass, nv04_mc_intr, &priv); 61 ret = nouveau_mc_create(parent, engine, oclass, &priv);
54 *pobject = nv_object(priv); 62 *pobject = nv_object(priv);
55 if (ret) 63 if (ret)
56 return ret; 64 return ret;
@@ -58,24 +66,14 @@ nv04_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
58 return 0; 66 return 0;
59} 67}
60 68
61int 69struct nouveau_oclass *
62nv04_mc_init(struct nouveau_object *object) 70nv04_mc_oclass = &(struct nouveau_mc_oclass) {
63{ 71 .base.handle = NV_SUBDEV(MC, 0x04),
64 struct nv04_mc_priv *priv = (void *)object; 72 .base.ofuncs = &(struct nouveau_ofuncs) {
65
66 nv_wr32(priv, 0x000200, 0xffffffff); /* everything enabled */
67 nv_wr32(priv, 0x001850, 0x00000001); /* disable rom access */
68
69 return nouveau_mc_init(&priv->base);
70}
71
72struct nouveau_oclass
73nv04_mc_oclass = {
74 .handle = NV_SUBDEV(MC, 0x04),
75 .ofuncs = &(struct nouveau_ofuncs) {
76 .ctor = nv04_mc_ctor, 73 .ctor = nv04_mc_ctor,
77 .dtor = _nouveau_mc_dtor, 74 .dtor = _nouveau_mc_dtor,
78 .init = nv04_mc_init, 75 .init = nv04_mc_init,
79 .fini = _nouveau_mc_fini, 76 .fini = _nouveau_mc_fini,
80 }, 77 },
81}; 78 .intr = nv04_mc_intr,
79}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.h b/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.h
new file mode 100644
index 000000000000..b0d5c31606c1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.h
@@ -0,0 +1,21 @@
1#ifndef __NVKM_MC_NV04_H__
2#define __NVKM_MC_NV04_H__
3
4#include <subdev/mc.h>
5
6struct nv04_mc_priv {
7 struct nouveau_mc base;
8};
9
10int nv04_mc_ctor(struct nouveau_object *, struct nouveau_object *,
11 struct nouveau_oclass *, void *, u32,
12 struct nouveau_object **);
13
14extern const struct nouveau_mc_intr nv04_mc_intr[];
15int nv04_mc_init(struct nouveau_object *);
16void nv40_mc_msi_rearm(struct nouveau_mc *);
17int nv50_mc_init(struct nouveau_object *);
18extern const struct nouveau_mc_intr nv50_mc_intr[];
19extern const struct nouveau_mc_intr nvc0_mc_intr[];
20
21#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv40.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv40.c
new file mode 100644
index 000000000000..5b1faecfed2d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv40.c
@@ -0,0 +1,45 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "nv04.h"
26
27void
28nv40_mc_msi_rearm(struct nouveau_mc *pmc)
29{
30 struct nv04_mc_priv *priv = (void *)pmc;
31 nv_wr08(priv, 0x088068, 0xff);
32}
33
34struct nouveau_oclass *
35nv40_mc_oclass = &(struct nouveau_mc_oclass) {
36 .base.handle = NV_SUBDEV(MC, 0x40),
37 .base.ofuncs = &(struct nouveau_ofuncs) {
38 .ctor = nv04_mc_ctor,
39 .dtor = _nouveau_mc_dtor,
40 .init = nv04_mc_init,
41 .fini = _nouveau_mc_fini,
42 },
43 .intr = nv04_mc_intr,
44 .msi_rearm = nv40_mc_msi_rearm,
45}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c
index d9891782bf28..3bfee5c6c4f2 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c
@@ -22,32 +22,12 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include <subdev/mc.h> 25#include "nv04.h"
26
27struct nv44_mc_priv {
28 struct nouveau_mc base;
29};
30
31static int
32nv44_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
33 struct nouveau_oclass *oclass, void *data, u32 size,
34 struct nouveau_object **pobject)
35{
36 struct nv44_mc_priv *priv;
37 int ret;
38
39 ret = nouveau_mc_create(parent, engine, oclass, nv04_mc_intr, &priv);
40 *pobject = nv_object(priv);
41 if (ret)
42 return ret;
43
44 return 0;
45}
46 26
47static int 27static int
48nv44_mc_init(struct nouveau_object *object) 28nv44_mc_init(struct nouveau_object *object)
49{ 29{
50 struct nv44_mc_priv *priv = (void *)object; 30 struct nv04_mc_priv *priv = (void *)object;
51 u32 tmp = nv_rd32(priv, 0x10020c); 31 u32 tmp = nv_rd32(priv, 0x10020c);
52 32
53 nv_wr32(priv, 0x000200, 0xffffffff); /* everything enabled */ 33 nv_wr32(priv, 0x000200, 0xffffffff); /* everything enabled */
@@ -60,13 +40,15 @@ nv44_mc_init(struct nouveau_object *object)
60 return nouveau_mc_init(&priv->base); 40 return nouveau_mc_init(&priv->base);
61} 41}
62 42
63struct nouveau_oclass 43struct nouveau_oclass *
64nv44_mc_oclass = { 44nv44_mc_oclass = &(struct nouveau_mc_oclass) {
65 .handle = NV_SUBDEV(MC, 0x44), 45 .base.handle = NV_SUBDEV(MC, 0x44),
66 .ofuncs = &(struct nouveau_ofuncs) { 46 .base.ofuncs = &(struct nouveau_ofuncs) {
67 .ctor = nv44_mc_ctor, 47 .ctor = nv04_mc_ctor,
68 .dtor = _nouveau_mc_dtor, 48 .dtor = _nouveau_mc_dtor,
69 .init = nv44_mc_init, 49 .init = nv44_mc_init,
70 .fini = _nouveau_mc_fini, 50 .fini = _nouveau_mc_fini,
71 }, 51 },
72}; 52 .intr = nv04_mc_intr,
53 .msi_rearm = nv40_mc_msi_rearm,
54}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c
index 2b1afe225db8..e8822a934c48 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c
@@ -22,13 +22,9 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include <subdev/mc.h> 25#include "nv04.h"
26 26
27struct nv50_mc_priv { 27const struct nouveau_mc_intr
28 struct nouveau_mc base;
29};
30
31static const struct nouveau_mc_intr
32nv50_mc_intr[] = { 28nv50_mc_intr[] = {
33 { 0x00000001, NVDEV_ENGINE_MPEG }, 29 { 0x00000001, NVDEV_ENGINE_MPEG },
34 { 0x00000100, NVDEV_ENGINE_FIFO }, 30 { 0x00000100, NVDEV_ENGINE_FIFO },
@@ -45,37 +41,30 @@ nv50_mc_intr[] = {
45 {}, 41 {},
46}; 42};
47 43
48static int 44static void
49nv50_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine, 45nv50_mc_msi_rearm(struct nouveau_mc *pmc)
50 struct nouveau_oclass *oclass, void *data, u32 size,
51 struct nouveau_object **pobject)
52{ 46{
53 struct nv50_mc_priv *priv; 47 struct nouveau_device *device = nv_device(pmc);
54 int ret; 48 pci_write_config_byte(device->pdev, 0x68, 0xff);
55
56 ret = nouveau_mc_create(parent, engine, oclass, nv50_mc_intr, &priv);
57 *pobject = nv_object(priv);
58 if (ret)
59 return ret;
60
61 return 0;
62} 49}
63 50
64int 51int
65nv50_mc_init(struct nouveau_object *object) 52nv50_mc_init(struct nouveau_object *object)
66{ 53{
67 struct nv50_mc_priv *priv = (void *)object; 54 struct nv04_mc_priv *priv = (void *)object;
68 nv_wr32(priv, 0x000200, 0xffffffff); /* everything on */ 55 nv_wr32(priv, 0x000200, 0xffffffff); /* everything on */
69 return nouveau_mc_init(&priv->base); 56 return nouveau_mc_init(&priv->base);
70} 57}
71 58
72struct nouveau_oclass 59struct nouveau_oclass *
73nv50_mc_oclass = { 60nv50_mc_oclass = &(struct nouveau_mc_oclass) {
74 .handle = NV_SUBDEV(MC, 0x50), 61 .base.handle = NV_SUBDEV(MC, 0x50),
75 .ofuncs = &(struct nouveau_ofuncs) { 62 .base.ofuncs = &(struct nouveau_ofuncs) {
76 .ctor = nv50_mc_ctor, 63 .ctor = nv04_mc_ctor,
77 .dtor = _nouveau_mc_dtor, 64 .dtor = _nouveau_mc_dtor,
78 .init = nv50_mc_init, 65 .init = nv50_mc_init,
79 .fini = _nouveau_mc_fini, 66 .fini = _nouveau_mc_fini,
80 }, 67 },
81}; 68 .intr = nv50_mc_intr,
69 .msi_rearm = nv50_mc_msi_rearm,
70}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv94.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv94.c
new file mode 100644
index 000000000000..5f4541105e73
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv94.c
@@ -0,0 +1,38 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "nv04.h"
26
27struct nouveau_oclass *
28nv94_mc_oclass = &(struct nouveau_mc_oclass) {
29 .base.handle = NV_SUBDEV(MC, 0x94),
30 .base.ofuncs = &(struct nouveau_ofuncs) {
31 .ctor = nv04_mc_ctor,
32 .dtor = _nouveau_mc_dtor,
33 .init = nv50_mc_init,
34 .fini = _nouveau_mc_fini,
35 },
36 .intr = nv50_mc_intr,
37 .msi_rearm = nv40_mc_msi_rearm,
38}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c
index 06710419a59b..f8a6f18e2d34 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c
@@ -22,11 +22,7 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include <subdev/mc.h> 25#include "nv04.h"
26
27struct nv98_mc_priv {
28 struct nouveau_mc base;
29};
30 26
31static const struct nouveau_mc_intr 27static const struct nouveau_mc_intr
32nv98_mc_intr[] = { 28nv98_mc_intr[] = {
@@ -36,6 +32,7 @@ nv98_mc_intr[] = {
36 { 0x00004000, NVDEV_ENGINE_CRYPT }, /* NV84:NVA3 */ 32 { 0x00004000, NVDEV_ENGINE_CRYPT }, /* NV84:NVA3 */
37 { 0x00008000, NVDEV_ENGINE_BSP }, 33 { 0x00008000, NVDEV_ENGINE_BSP },
38 { 0x00020000, NVDEV_ENGINE_VP }, 34 { 0x00020000, NVDEV_ENGINE_VP },
35 { 0x00040000, NVDEV_SUBDEV_PWR }, /* NVA3:NVC0 */
39 { 0x00080000, NVDEV_SUBDEV_THERM }, /* NVA3:NVC0 */ 36 { 0x00080000, NVDEV_SUBDEV_THERM }, /* NVA3:NVC0 */
40 { 0x00100000, NVDEV_SUBDEV_TIMER }, 37 { 0x00100000, NVDEV_SUBDEV_TIMER },
41 { 0x00200000, NVDEV_SUBDEV_GPIO }, 38 { 0x00200000, NVDEV_SUBDEV_GPIO },
@@ -47,29 +44,15 @@ nv98_mc_intr[] = {
47 {}, 44 {},
48}; 45};
49 46
50static int 47struct nouveau_oclass *
51nv98_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine, 48nv98_mc_oclass = &(struct nouveau_mc_oclass) {
52 struct nouveau_oclass *oclass, void *data, u32 size, 49 .base.handle = NV_SUBDEV(MC, 0x98),
53 struct nouveau_object **pobject) 50 .base.ofuncs = &(struct nouveau_ofuncs) {
54{ 51 .ctor = nv04_mc_ctor,
55 struct nv98_mc_priv *priv;
56 int ret;
57
58 ret = nouveau_mc_create(parent, engine, oclass, nv98_mc_intr, &priv);
59 *pobject = nv_object(priv);
60 if (ret)
61 return ret;
62
63 return 0;
64}
65
66struct nouveau_oclass
67nv98_mc_oclass = {
68 .handle = NV_SUBDEV(MC, 0x98),
69 .ofuncs = &(struct nouveau_ofuncs) {
70 .ctor = nv98_mc_ctor,
71 .dtor = _nouveau_mc_dtor, 52 .dtor = _nouveau_mc_dtor,
72 .init = nv50_mc_init, 53 .init = nv50_mc_init,
73 .fini = _nouveau_mc_fini, 54 .fini = _nouveau_mc_fini,
74 }, 55 },
75}; 56 .intr = nv98_mc_intr,
57 .msi_rearm = nv40_mc_msi_rearm,
58}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
index 104175c5a2dd..c02b4763a2d5 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
@@ -22,13 +22,9 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include <subdev/mc.h> 25#include "nv04.h"
26 26
27struct nvc0_mc_priv { 27const struct nouveau_mc_intr
28 struct nouveau_mc base;
29};
30
31static const struct nouveau_mc_intr
32nvc0_mc_intr[] = { 28nvc0_mc_intr[] = {
33 { 0x00000001, NVDEV_ENGINE_PPP }, 29 { 0x00000001, NVDEV_ENGINE_PPP },
34 { 0x00000020, NVDEV_ENGINE_COPY0 }, 30 { 0x00000020, NVDEV_ENGINE_COPY0 },
@@ -41,6 +37,7 @@ nvc0_mc_intr[] = {
41 { 0x00020000, NVDEV_ENGINE_VP }, 37 { 0x00020000, NVDEV_ENGINE_VP },
42 { 0x00100000, NVDEV_SUBDEV_TIMER }, 38 { 0x00100000, NVDEV_SUBDEV_TIMER },
43 { 0x00200000, NVDEV_SUBDEV_GPIO }, 39 { 0x00200000, NVDEV_SUBDEV_GPIO },
40 { 0x01000000, NVDEV_SUBDEV_PWR },
44 { 0x02000000, NVDEV_SUBDEV_LTCG }, 41 { 0x02000000, NVDEV_SUBDEV_LTCG },
45 { 0x04000000, NVDEV_ENGINE_DISP }, 42 { 0x04000000, NVDEV_ENGINE_DISP },
46 { 0x10000000, NVDEV_SUBDEV_BUS }, 43 { 0x10000000, NVDEV_SUBDEV_BUS },
@@ -49,29 +46,22 @@ nvc0_mc_intr[] = {
49 {}, 46 {},
50}; 47};
51 48
52static int 49static void
53nvc0_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine, 50nvc0_mc_msi_rearm(struct nouveau_mc *pmc)
54 struct nouveau_oclass *oclass, void *data, u32 size,
55 struct nouveau_object **pobject)
56{ 51{
57 struct nvc0_mc_priv *priv; 52 struct nv04_mc_priv *priv = (void *)pmc;
58 int ret; 53 nv_wr32(priv, 0x088704, 0x00000000);
59
60 ret = nouveau_mc_create(parent, engine, oclass, nvc0_mc_intr, &priv);
61 *pobject = nv_object(priv);
62 if (ret)
63 return ret;
64
65 return 0;
66} 54}
67 55
68struct nouveau_oclass 56struct nouveau_oclass *
69nvc0_mc_oclass = { 57nvc0_mc_oclass = &(struct nouveau_mc_oclass) {
70 .handle = NV_SUBDEV(MC, 0xc0), 58 .base.handle = NV_SUBDEV(MC, 0xc0),
71 .ofuncs = &(struct nouveau_ofuncs) { 59 .base.ofuncs = &(struct nouveau_ofuncs) {
72 .ctor = nvc0_mc_ctor, 60 .ctor = nv04_mc_ctor,
73 .dtor = _nouveau_mc_dtor, 61 .dtor = _nouveau_mc_dtor,
74 .init = nv50_mc_init, 62 .init = nv50_mc_init,
75 .fini = _nouveau_mc_fini, 63 .fini = _nouveau_mc_fini,
76 }, 64 },
77}; 65 .intr = nvc0_mc_intr,
66 .msi_rearm = nvc0_mc_msi_rearm,
67}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nvc3.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nvc3.c
new file mode 100644
index 000000000000..837e545aeb9f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nvc3.c
@@ -0,0 +1,38 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "nv04.h"
26
27struct nouveau_oclass *
28nvc3_mc_oclass = &(struct nouveau_mc_oclass) {
29 .base.handle = NV_SUBDEV(MC, 0xc3),
30 .base.ofuncs = &(struct nouveau_ofuncs) {
31 .ctor = nv04_mc_ctor,
32 .dtor = _nouveau_mc_dtor,
33 .init = nv50_mc_init,
34 .fini = _nouveau_mc_fini,
35 },
36 .intr = nvc0_mc_intr,
37 .msi_rearm = nv40_mc_msi_rearm,
38}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/base.c b/drivers/gpu/drm/nouveau/core/subdev/pwr/base.c
new file mode 100644
index 000000000000..d4fd3bc9c66f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/base.c
@@ -0,0 +1,247 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/pwr.h>
26#include <subdev/timer.h>
27
28static int
29nouveau_pwr_send(struct nouveau_pwr *ppwr, u32 reply[2],
30 u32 process, u32 message, u32 data0, u32 data1)
31{
32 struct nouveau_subdev *subdev = nv_subdev(ppwr);
33 u32 addr;
34
35 /* wait for a free slot in the fifo */
36 addr = nv_rd32(ppwr, 0x10a4a0);
37 if (!nv_wait_ne(ppwr, 0x10a4b0, 0xffffffff, addr ^ 8))
38 return -EBUSY;
39
40 /* we currently only support a single process at a time waiting
41 * on a synchronous reply, take the PPWR mutex and tell the
42 * receive handler what we're waiting for
43 */
44 if (reply) {
45 mutex_lock(&subdev->mutex);
46 ppwr->recv.message = message;
47 ppwr->recv.process = process;
48 }
49
50 /* acquire data segment access */
51 do {
52 nv_wr32(ppwr, 0x10a580, 0x00000001);
53 } while (nv_rd32(ppwr, 0x10a580) != 0x00000001);
54
55 /* write the packet */
56 nv_wr32(ppwr, 0x10a1c0, 0x01000000 | (((addr & 0x07) << 4) +
57 ppwr->send.base));
58 nv_wr32(ppwr, 0x10a1c4, process);
59 nv_wr32(ppwr, 0x10a1c4, message);
60 nv_wr32(ppwr, 0x10a1c4, data0);
61 nv_wr32(ppwr, 0x10a1c4, data1);
62 nv_wr32(ppwr, 0x10a4a0, (addr + 1) & 0x0f);
63
64 /* release data segment access */
65 nv_wr32(ppwr, 0x10a580, 0x00000000);
66
67 /* wait for reply, if requested */
68 if (reply) {
69 wait_event(ppwr->recv.wait, (ppwr->recv.process == 0));
70 reply[0] = ppwr->recv.data[0];
71 reply[1] = ppwr->recv.data[1];
72 mutex_unlock(&subdev->mutex);
73 }
74
75 return 0;
76}
77
78static void
79nouveau_pwr_recv(struct work_struct *work)
80{
81 struct nouveau_pwr *ppwr =
82 container_of(work, struct nouveau_pwr, recv.work);
83 u32 process, message, data0, data1;
84
85 /* nothing to do if GET == PUT */
86 u32 addr = nv_rd32(ppwr, 0x10a4cc);
87 if (addr == nv_rd32(ppwr, 0x10a4c8))
88 return;
89
90 /* acquire data segment access */
91 do {
92 nv_wr32(ppwr, 0x10a580, 0x00000002);
93 } while (nv_rd32(ppwr, 0x10a580) != 0x00000002);
94
95 /* read the packet */
96 nv_wr32(ppwr, 0x10a1c0, 0x02000000 | (((addr & 0x07) << 4) +
97 ppwr->recv.base));
98 process = nv_rd32(ppwr, 0x10a1c4);
99 message = nv_rd32(ppwr, 0x10a1c4);
100 data0 = nv_rd32(ppwr, 0x10a1c4);
101 data1 = nv_rd32(ppwr, 0x10a1c4);
102 nv_wr32(ppwr, 0x10a4cc, (addr + 1) & 0x0f);
103
104 /* release data segment access */
105 nv_wr32(ppwr, 0x10a580, 0x00000000);
106
107 /* wake process if it's waiting on a synchronous reply */
108 if (ppwr->recv.process) {
109 if (process == ppwr->recv.process &&
110 message == ppwr->recv.message) {
111 ppwr->recv.data[0] = data0;
112 ppwr->recv.data[1] = data1;
113 ppwr->recv.process = 0;
114 wake_up(&ppwr->recv.wait);
115 return;
116 }
117 }
118
119 /* right now there's no other expected responses from the engine,
120 * so assume that any unexpected message is an error.
121 */
122 nv_warn(ppwr, "%c%c%c%c 0x%08x 0x%08x 0x%08x 0x%08x\n",
123 (char)((process & 0x000000ff) >> 0),
124 (char)((process & 0x0000ff00) >> 8),
125 (char)((process & 0x00ff0000) >> 16),
126 (char)((process & 0xff000000) >> 24),
127 process, message, data0, data1);
128}
129
130static void
131nouveau_pwr_intr(struct nouveau_subdev *subdev)
132{
133 struct nouveau_pwr *ppwr = (void *)subdev;
134 u32 disp = nv_rd32(ppwr, 0x10a01c);
135 u32 intr = nv_rd32(ppwr, 0x10a008) & disp & ~(disp >> 16);
136
137 if (intr & 0x00000020) {
138 u32 stat = nv_rd32(ppwr, 0x10a16c);
139 if (stat & 0x80000000) {
140 nv_error(ppwr, "UAS fault at 0x%06x addr 0x%08x\n",
141 stat & 0x00ffffff, nv_rd32(ppwr, 0x10a168));
142 nv_wr32(ppwr, 0x10a16c, 0x00000000);
143 intr &= ~0x00000020;
144 }
145 }
146
147 if (intr & 0x00000040) {
148 schedule_work(&ppwr->recv.work);
149 nv_wr32(ppwr, 0x10a004, 0x00000040);
150 intr &= ~0x00000040;
151 }
152
153 if (intr & 0x00000080) {
154 nv_info(ppwr, "wr32 0x%06x 0x%08x\n", nv_rd32(ppwr, 0x10a7a0),
155 nv_rd32(ppwr, 0x10a7a4));
156 nv_wr32(ppwr, 0x10a004, 0x00000080);
157 intr &= ~0x00000080;
158 }
159
160 if (intr) {
161 nv_error(ppwr, "intr 0x%08x\n", intr);
162 nv_wr32(ppwr, 0x10a004, intr);
163 }
164}
165
166int
167_nouveau_pwr_fini(struct nouveau_object *object, bool suspend)
168{
169 struct nouveau_pwr *ppwr = (void *)object;
170
171 nv_wr32(ppwr, 0x10a014, 0x00000060);
172 flush_work(&ppwr->recv.work);
173
174 return nouveau_subdev_fini(&ppwr->base, suspend);
175}
176
177int
178_nouveau_pwr_init(struct nouveau_object *object)
179{
180 struct nouveau_pwr *ppwr = (void *)object;
181 int ret, i;
182
183 ret = nouveau_subdev_init(&ppwr->base);
184 if (ret)
185 return ret;
186
187 nv_subdev(ppwr)->intr = nouveau_pwr_intr;
188 ppwr->message = nouveau_pwr_send;
189
190 /* prevent previous ucode from running, wait for idle, reset */
191 nv_wr32(ppwr, 0x10a014, 0x0000ffff); /* INTR_EN_CLR = ALL */
192 nv_wait(ppwr, 0x10a04c, 0xffffffff, 0x00000000);
193 nv_mask(ppwr, 0x000200, 0x00002000, 0x00000000);
194 nv_mask(ppwr, 0x000200, 0x00002000, 0x00002000);
195
196 /* upload data segment */
197 nv_wr32(ppwr, 0x10a1c0, 0x01000000);
198 for (i = 0; i < ppwr->data.size / 4; i++)
199 nv_wr32(ppwr, 0x10a1c4, ppwr->data.data[i]);
200
201 /* upload code segment */
202 nv_wr32(ppwr, 0x10a180, 0x01000000);
203 for (i = 0; i < ppwr->code.size / 4; i++) {
204 if ((i & 0x3f) == 0)
205 nv_wr32(ppwr, 0x10a188, i >> 6);
206 nv_wr32(ppwr, 0x10a184, ppwr->code.data[i]);
207 }
208
209 /* start it running */
210 nv_wr32(ppwr, 0x10a10c, 0x00000000);
211 nv_wr32(ppwr, 0x10a104, 0x00000000);
212 nv_wr32(ppwr, 0x10a100, 0x00000002);
213
214 /* wait for valid host->pwr ring configuration */
215 if (!nv_wait_ne(ppwr, 0x10a4d0, 0xffffffff, 0x00000000))
216 return -EBUSY;
217 ppwr->send.base = nv_rd32(ppwr, 0x10a4d0) & 0x0000ffff;
218 ppwr->send.size = nv_rd32(ppwr, 0x10a4d0) >> 16;
219
220 /* wait for valid pwr->host ring configuration */
221 if (!nv_wait_ne(ppwr, 0x10a4dc, 0xffffffff, 0x00000000))
222 return -EBUSY;
223 ppwr->recv.base = nv_rd32(ppwr, 0x10a4dc) & 0x0000ffff;
224 ppwr->recv.size = nv_rd32(ppwr, 0x10a4dc) >> 16;
225
226 nv_wr32(ppwr, 0x10a010, 0x000000e0);
227 return 0;
228}
229
230int
231nouveau_pwr_create_(struct nouveau_object *parent,
232 struct nouveau_object *engine,
233 struct nouveau_oclass *oclass, int length, void **pobject)
234{
235 struct nouveau_pwr *ppwr;
236 int ret;
237
238 ret = nouveau_subdev_create_(parent, engine, oclass, 0, "PPWR",
239 "pwr", length, pobject);
240 ppwr = *pobject;
241 if (ret)
242 return ret;
243
244 INIT_WORK(&ppwr->recv.work, nouveau_pwr_recv);
245 init_waitqueue_head(&ppwr->recv.wait);
246 return 0;
247}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/host.fuc b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/host.fuc
new file mode 100644
index 000000000000..2284ecb1c9b8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/host.fuc
@@ -0,0 +1,151 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#ifdef INCLUDE_PROC
26process(PROC_HOST, #host_init, #host_recv)
27#endif
28
29/******************************************************************************
30 * HOST data segment
31 *****************************************************************************/
32#ifdef INCLUDE_DATA
33// HOST (R)FIFO packet format
34.equ #fifo_process 0x00
35.equ #fifo_message 0x04
36.equ #fifo_data0 0x08
37.equ #fifo_data1 0x0c
38
39// HOST HOST->PWR queue description
40.equ #fifo_qlen 4 // log2(size of queue entry in bytes)
41.equ #fifo_qnum 3 // log2(max number of entries in queue)
42.equ #fifo_qmaskb (1 << #fifo_qnum) // max number of entries in queue
43.equ #fifo_qmaskp (#fifo_qmaskb - 1)
44.equ #fifo_qmaskf ((#fifo_qmaskb << 1) - 1)
45.equ #fifo_qsize (1 << (#fifo_qlen + #fifo_qnum))
46fifo_queue: .skip 128 // #fifo_qsize
47
48// HOST PWR->HOST queue description
49.equ #rfifo_qlen 4 // log2(size of queue entry in bytes)
50.equ #rfifo_qnum 3 // log2(max number of entries in queue)
51.equ #rfifo_qmaskb (1 << #rfifo_qnum) // max number of entries in queue
52.equ #rfifo_qmaskp (#rfifo_qmaskb - 1)
53.equ #rfifo_qmaskf ((#rfifo_qmaskb << 1) - 1)
54.equ #rfifo_qsize (1 << (#rfifo_qlen + #rfifo_qnum))
55rfifo_queue: .skip 128 // #rfifo_qsize
56#endif
57
58/******************************************************************************
59 * HOST code segment
60 *****************************************************************************/
61#ifdef INCLUDE_CODE
62// HOST->PWR comms - dequeue message(s) for process(es) from FIFO
63//
64// $r15 - current (host)
65// $r0 - zero
66host_send:
67 nv_iord($r1, NV_PPWR_FIFO_GET(0))
68 nv_iord($r2, NV_PPWR_FIFO_PUT(0))
69 cmp b32 $r1 $r2
70 bra e #host_send_done
71 // calculate address of message
72 and $r14 $r1 #fifo_qmaskp
73 shl b32 $r14 $r14 #fifo_qlen
74 add b32 $r14 #fifo_queue
75
76 // read message data, and pass to appropriate process
77 ld b32 $r11 D[$r14 + #fifo_data1]
78 ld b32 $r12 D[$r14 + #fifo_data0]
79 ld b32 $r13 D[$r14 + #fifo_message]
80 ld b32 $r14 D[$r14 + #fifo_process]
81 call(send)
82
83 // increment GET
84 add b32 $r1 0x1
85 and $r14 $r1 #fifo_qmaskf
86 nv_iowr(NV_PPWR_FIFO_GET(0), $r1)
87 bra #host_send
88 host_send_done:
89 ret
90
91// PWR->HOST comms - enqueue message for HOST to RFIFO
92//
93// $r15 - current (host)
94// $r14 - process
95// $r13 - message
96// $r12 - message data 0
97// $r11 - message data 1
98// $r0 - zero
99host_recv:
100 // message from intr handler == HOST->PWR comms pending
101 mov $r1 (PROC_KERN & 0x0000ffff)
102 sethi $r1 (PROC_KERN & 0xffff0000)
103 cmp b32 $r14 $r1
104 bra e #host_send
105
106 // wait for space in RFIFO
107 host_recv_wait:
108 nv_iord($r1, NV_PPWR_RFIFO_GET)
109 nv_iord($r2, NV_PPWR_RFIFO_PUT)
110 xor $r1 #rfifo_qmaskb
111 cmp b32 $r1 $r2
112 bra e #host_recv_wait
113
114 and $r3 $r2 #rfifo_qmaskp
115 shl b32 $r3 #rfifo_qlen
116 add b32 $r3 #rfifo_queue
117
118 // enqueue message
119 st b32 D[$r3 + #fifo_data1] $r11
120 st b32 D[$r3 + #fifo_data0] $r12
121 st b32 D[$r3 + #fifo_message] $r13
122 st b32 D[$r3 + #fifo_process] $r14
123
124 add b32 $r2 0x1
125 and $r2 #rfifo_qmaskf
126 nv_iowr(NV_PPWR_RFIFO_PUT, $r2)
127
128 // notify host of pending message
129 mov $r2 NV_PPWR_INTR_TRIGGER_USER0
130 nv_iowr(NV_PPWR_INTR_TRIGGER, $r2)
131 ret
132
133// $r15 - current (host)
134// $r0 - zero
135host_init:
136 // store each fifo's base/size in H2D/D2H scratch regs
137 mov $r1 #fifo_qsize
138 shl b32 $r1 16
139 or $r1 #fifo_queue
140 nv_iowr(NV_PPWR_H2D, $r1);
141
142 mov $r1 #rfifo_qsize
143 shl b32 $r1 16
144 or $r1 #rfifo_queue
145 nv_iowr(NV_PPWR_D2H, $r1);
146
147 // enable fifo subintr for first fifo
148 mov $r1 1
149 nv_iowr(NV_PPWR_FIFO_INTR_EN, $r1)
150 ret
151#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/idle.fuc b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/idle.fuc
new file mode 100644
index 000000000000..98f1c3738b42
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/idle.fuc
@@ -0,0 +1,84 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#ifdef INCLUDE_PROC
26process(PROC_IDLE, #idle, #idle_recv)
27#endif
28
29/******************************************************************************
30 * IDLE data segment
31 *****************************************************************************/
32#ifdef INCLUDE_DATA
33#endif
34
35/******************************************************************************
36 * IDLE code segment
37 *****************************************************************************/
38#ifdef INCLUDE_CODE
39// description
40//
41// $r15 - current (idle)
42// $r14 - message
43// $r0 - zero
44idle_recv:
45 ret
46
47// description
48//
49// $r15 - current (idle)
50// $r0 - zero
51idle:
52 // set our "no interrupt has occurred during our execution" flag
53 bset $flags $p0
54
55 // count IDLE invocations for debugging purposes
56 nv_iord($r1, NV_PPWR_DSCRATCH(1))
57 add b32 $r1 1
58 nv_iowr(NV_PPWR_DSCRATCH(1), $r1)
59
60 // keep looping while there's pending messages for any process
61 idle_loop:
62 mov $r1 #proc_list_head
63 bclr $flags $p2
64 idle_proc:
65 // process the process' messages until there's none left
66 idle_proc_exec:
67 push $r1
68 mov b32 $r14 $r1
69 call(recv)
70 pop $r1
71 bra not $p1 #idle_proc_next
72 bset $flags $p2
73 bra #idle_proc_exec
74 // next process!
75 idle_proc_next:
76 add b32 $r1 #proc_size
77 cmp b32 $r1 $r15
78 bra ne #idle_proc
79 bra $p2 #idle_loop
80
81 // sleep if no interrupts have occurred
82 sleep $p0
83 bra #idle
84#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/kernel.fuc b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/kernel.fuc
new file mode 100644
index 000000000000..0a7b05fa5c11
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/kernel.fuc
@@ -0,0 +1,452 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25/******************************************************************************
26 * kernel data segment
27 *****************************************************************************/
28#ifdef INCLUDE_PROC
29proc_kern:
30process(PROC_KERN, 0, 0)
31proc_list_head:
32#endif
33
34#ifdef INCLUDE_DATA
35proc_list_tail:
36time_prev: .b32 0
37time_next: .b32 0
38#endif
39
40/******************************************************************************
41 * kernel code segment
42 *****************************************************************************/
43#ifdef INCLUDE_CODE
44 bra #init
45
46// read nv register
47//
48// $r15 - current
49// $r14 - addr
50// $r13 - data (return)
51// $r0 - zero
52rd32:
53 nv_iowr(NV_PPWR_MMIO_ADDR, $r14)
54 mov $r14 NV_PPWR_MMIO_CTRL_OP_RD
55 sethi $r14 NV_PPWR_MMIO_CTRL_TRIGGER
56 nv_iowr(NV_PPWR_MMIO_CTRL, $r14)
57 rd32_wait:
58 nv_iord($r14, NV_PPWR_MMIO_CTRL)
59 and $r14 NV_PPWR_MMIO_CTRL_STATUS
60 bra nz #rd32_wait
61 nv_iord($r13, NV_PPWR_MMIO_DATA)
62 ret
63
64// write nv register
65//
66// $r15 - current
67// $r14 - addr
68// $r13 - data
69// $r0 - zero
70wr32:
71 nv_iowr(NV_PPWR_MMIO_ADDR, $r14)
72 nv_iowr(NV_PPWR_MMIO_DATA, $r13)
73 mov $r14 NV_PPWR_MMIO_CTRL_OP_WR
74 or $r14 NV_PPWR_MMIO_CTRL_MASK_B32_0
75 sethi $r14 NV_PPWR_MMIO_CTRL_TRIGGER
76
77#ifdef NVKM_FALCON_MMIO_TRAP
78 mov $r8 NV_PPWR_INTR_TRIGGER_USER1
79 nv_iowr(NV_PPWR_INTR_TRIGGER, $r8)
80 wr32_host:
81 nv_iord($r8, NV_PPWR_INTR)
82 and $r8 NV_PPWR_INTR_USER1
83 bra nz #wr32_host
84#endif
85
86 nv_iowr(NV_PPWR_MMIO_CTRL, $r14)
87 wr32_wait:
88 nv_iord($r14, NV_PPWR_MMIO_CTRL)
89 and $r14 NV_PPWR_MMIO_CTRL_STATUS
90 bra nz #wr32_wait
91 ret
92
93// busy-wait for a period of time
94//
95// $r15 - current
96// $r14 - ns
97// $r0 - zero
98nsec:
99 nv_iord($r8, NV_PPWR_TIMER_LOW)
100 nsec_loop:
101 nv_iord($r9, NV_PPWR_TIMER_LOW)
102 sub b32 $r9 $r8
103 cmp b32 $r9 $r14
104 bra l #nsec_loop
105 ret
106
107// busy-wait for a period of time
108//
109// $r15 - current
110// $r14 - addr
111// $r13 - mask
112// $r12 - data
113// $r11 - timeout (ns)
114// $r0 - zero
115wait:
116 nv_iord($r8, NV_PPWR_TIMER_LOW)
117 wait_loop:
118 nv_rd32($r10, $r14)
119 and $r10 $r13
120 cmp b32 $r10 $r12
121 bra e #wait_done
122 nv_iord($r9, NV_PPWR_TIMER_LOW)
123 sub b32 $r9 $r8
124 cmp b32 $r9 $r11
125 bra l #wait_loop
126 wait_done:
127 ret
128
129// $r15 - current (kern)
130// $r14 - process
131// $r8 - NV_PPWR_INTR
132intr_watchdog:
133 // read process' timer status, skip if not enabled
134 ld b32 $r9 D[$r14 + #proc_time]
135 cmp b32 $r9 0
136 bra z #intr_watchdog_next_proc
137
138 // subtract last timer's value from process' timer,
139 // if it's <= 0 then the timer has expired
140 ld b32 $r10 D[$r0 + #time_prev]
141 sub b32 $r9 $r10
142 bra g #intr_watchdog_next_time
143 mov $r13 KMSG_ALARM
144 call(send_proc)
145 clear b32 $r9
146 bra #intr_watchdog_next_proc
147
148 // otherwise, update the next timer's value if this
149 // process' timer is the soonest
150 intr_watchdog_next_time:
151 // ... or if there's no next timer yet
152 ld b32 $r10 D[$r0 + #time_next]
153 cmp b32 $r10 0
154 bra z #intr_watchdog_next_time_set
155
156 cmp b32 $r9 $r10
157 bra g #intr_watchdog_next_proc
158 intr_watchdog_next_time_set:
159 st b32 D[$r0 + #time_next] $r9
160
161 // update process' timer status, and advance
162 intr_watchdog_next_proc:
163 st b32 D[$r14 + #proc_time] $r9
164 add b32 $r14 #proc_size
165 cmp b32 $r14 #proc_list_tail
166 bra ne #intr_watchdog
167 ret
168
169intr:
170 push $r0
171 clear b32 $r0
172 push $r8
173 push $r9
174 push $r10
175 push $r11
176 push $r12
177 push $r13
178 push $r14
179 push $r15
180 mov $r15 #proc_kern
181 mov $r8 $flags
182 push $r8
183
184 nv_iord($r8, NV_PPWR_DSCRATCH(0))
185 add b32 $r8 1
186 nv_iowr(NV_PPWR_DSCRATCH(0), $r8)
187
188 nv_iord($r8, NV_PPWR_INTR)
189 and $r9 $r8 NV_PPWR_INTR_WATCHDOG
190 bra z #intr_skip_watchdog
191 st b32 D[$r0 + #time_next] $r0
192 mov $r14 #proc_list_head
193 call(intr_watchdog)
194 ld b32 $r9 D[$r0 + #time_next]
195 cmp b32 $r9 0
196 bra z #intr_skip_watchdog
197 nv_iowr(NV_PPWR_WATCHDOG_TIME, $r9)
198 st b32 D[$r0 + #time_prev] $r9
199
200 intr_skip_watchdog:
201 and $r9 $r8 NV_PPWR_INTR_SUBINTR
202 bra z #intr_skip_subintr
203 nv_iord($r9, NV_PPWR_SUBINTR)
204 and $r10 $r9 NV_PPWR_SUBINTR_FIFO
205 bra z #intr_subintr_skip_fifo
206 nv_iord($r12, NV_PPWR_FIFO_INTR)
207 push $r12
208 mov $r14 (PROC_HOST & 0x0000ffff)
209 sethi $r14 (PROC_HOST & 0xffff0000)
210 mov $r13 KMSG_FIFO
211 call(send)
212 pop $r12
213 nv_iowr(NV_PPWR_FIFO_INTR, $r12)
214 intr_subintr_skip_fifo:
215 nv_iowr(NV_PPWR_SUBINTR, $r9)
216
217 intr_skip_subintr:
218 and $r9 $r8 NV_PPWR_INTR_PAUSE
219 bra z #intr_skip_pause
220 and $r10 0xffbf
221
222 intr_skip_pause:
223 and $r9 $r8 NV_PPWR_INTR_USER0
224 bra z #intr_skip_user0
225 and $r10 0xffbf
226
227 intr_skip_user0:
228 nv_iowr(NV_PPWR_INTR_ACK, $r8)
229 pop $r8
230 mov $flags $r8
231 pop $r15
232 pop $r14
233 pop $r13
234 pop $r12
235 pop $r11
236 pop $r10
237 pop $r9
238 pop $r8
239 pop $r0
240 bclr $flags $p0
241 iret
242
243// request the current process be sent a message after a timeout expires
244//
245// $r15 - current
246// $r14 - ticks
247// $r0 - zero
248timer:
249 // interrupts off to prevent racing with timer isr
250 bclr $flags ie0
251
252 // if current process already has a timer set, bail
253 ld b32 $r8 D[$r15 + #proc_time]
254 cmp b32 $r8 0
255 bra g #timer_done
256 st b32 D[$r15 + #proc_time] $r14
257
258 // halt watchdog timer temporarily and check for a pending
259 // interrupt. if there's one already pending, we can just
260 // bail since the timer isr will queue the next soonest
261 // right after it's done
262 nv_iowr(NV_PPWR_WATCHDOG_ENABLE, $r8)
263 nv_iord($r8, NV_PPWR_INTR)
264 and $r8 NV_PPWR_INTR_WATCHDOG
265 bra nz #timer_enable
266
267 // update the watchdog if this timer should expire first,
268 // or if there's no timeout already set
269 nv_iord($r8, NV_PPWR_WATCHDOG_TIME)
270 cmp b32 $r14 $r0
271 bra e #timer_reset
272 cmp b32 $r14 $r8
273 bra l #timer_done
274 timer_reset:
275 nv_iowr(NV_PPWR_WATCHDOG_TIME, $r14)
276 st b32 D[$r0 + #time_prev] $r14
277
278 // re-enable the watchdog timer
279 timer_enable:
280 mov $r8 1
281 nv_iowr(NV_PPWR_WATCHDOG_ENABLE, $r8)
282
283 // interrupts back on
284 timer_done:
285 bset $flags ie0
286 ret
287
288// send message to another process
289//
290// $r15 - current
291// $r14 - process
292// $r13 - message
293// $r12 - message data 0
294// $r11 - message data 1
295// $r0 - zero
296send_proc:
297 push $r8
298 push $r9
299 // check for space in queue
300 ld b32 $r8 D[$r14 + #proc_qget]
301 ld b32 $r9 D[$r14 + #proc_qput]
302 xor $r8 #proc_qmaskb
303 cmp b32 $r8 $r9
304 bra e #send_done
305
306 // enqueue message
307 and $r8 $r9 #proc_qmaskp
308 shl b32 $r8 $r8 #proc_qlen
309 add b32 $r8 #proc_queue
310 add b32 $r8 $r14
311
312 ld b32 $r10 D[$r15 + #proc_id]
313 st b32 D[$r8 + #msg_process] $r10
314 st b32 D[$r8 + #msg_message] $r13
315 st b32 D[$r8 + #msg_data0] $r12
316 st b32 D[$r8 + #msg_data1] $r11
317
318 // increment PUT
319 add b32 $r9 1
320 and $r9 #proc_qmaskf
321 st b32 D[$r14 + #proc_qput] $r9
322 bset $flags $p2
323 send_done:
324 pop $r9
325 pop $r8
326 ret
327
328// lookup process structure by its name
329//
330// $r15 - current
331// $r14 - process name
332// $r0 - zero
333//
334// $r14 - process
335// $p1 - success
336find:
337 push $r8
338 mov $r8 #proc_list_head
339 bset $flags $p1
340 find_loop:
341 ld b32 $r10 D[$r8 + #proc_id]
342 cmp b32 $r10 $r14
343 bra e #find_done
344 add b32 $r8 #proc_size
345 cmp b32 $r8 #proc_list_tail
346 bra ne #find_loop
347 bclr $flags $p1
348 find_done:
349 mov b32 $r14 $r8
350 pop $r8
351 ret
352
353// send message to another process
354//
355// $r15 - current
356// $r14 - process id
357// $r13 - message
358// $r12 - message data 0
359// $r11 - message data 1
360// $r0 - zero
361send:
362 call(find)
363 bra $p1 #send_proc
364 ret
365
366// process single message for a given process
367//
368// $r15 - current
369// $r14 - process
370// $r0 - zero
371recv:
372 ld b32 $r8 D[$r14 + #proc_qget]
373 ld b32 $r9 D[$r14 + #proc_qput]
374 bclr $flags $p1
375 cmp b32 $r8 $r9
376 bra e #recv_done
377 // dequeue message
378 and $r9 $r8 #proc_qmaskp
379 add b32 $r8 1
380 and $r8 #proc_qmaskf
381 st b32 D[$r14 + #proc_qget] $r8
382 ld b32 $r10 D[$r14 + #proc_recv]
383
384 push $r15
385 mov $r15 $flags
386 push $r15
387 mov b32 $r15 $r14
388
389 shl b32 $r9 $r9 #proc_qlen
390 add b32 $r14 $r9
391 add b32 $r14 #proc_queue
392 ld b32 $r11 D[$r14 + #msg_data1]
393 ld b32 $r12 D[$r14 + #msg_data0]
394 ld b32 $r13 D[$r14 + #msg_message]
395 ld b32 $r14 D[$r14 + #msg_process]
396
397 // process it
398 call $r10
399 pop $r15
400 mov $flags $r15
401 bset $flags $p1
402 pop $r15
403 recv_done:
404 ret
405
406init:
407 // setup stack
408 nv_iord($r1, NV_PPWR_CAPS)
409 extr $r1 $r1 9:17
410 shl b32 $r1 8
411 mov $sp $r1
412
413#ifdef NVKM_FALCON_MMIO_UAS
414 // somehow allows the magic "access mmio via D[]" stuff that's
415 // used by the nv_rd32/nv_wr32 macros to work
416 mov $r1 0x0010
417 sethi $r1 NV_PPWR_UAS_CONFIG_ENABLE
418 nv_iowrs(NV_PPWR_UAS_CONFIG, $r1)
419#endif
420
421 // route all interrupts except user0/1 and pause to fuc
422 mov $r1 0x00e0
423 sethi $r1 0x00000000
424 nv_iowr(NV_PPWR_INTR_ROUTE, $r1)
425
426 // enable watchdog and subintr intrs
427 mov $r1 NV_PPWR_INTR_EN_CLR_MASK
428 nv_iowr(NV_PPWR_INTR_EN_CLR, $r1)
429 mov $r1 NV_PPWR_INTR_EN_SET_WATCHDOG
430 or $r1 NV_PPWR_INTR_EN_SET_SUBINTR
431 nv_iowr(NV_PPWR_INTR_EN_SET, $r1)
432
433 // enable interrupts globally
434 mov $r1 #intr
435 sethi $r1 0x00000000
436 mov $iv0 $r1
437 bset $flags ie0
438
439 // enable watchdog timer
440 mov $r1 1
441 nv_iowr(NV_PPWR_WATCHDOG_ENABLE, $r1)
442
443 // bootstrap processes, idle process will be last, and not return
444 mov $r15 #proc_list_head
445 init_proc:
446 ld b32 $r1 D[$r15 + #proc_init]
447 cmp b32 $r1 0
448 bra z #init_proc
449 call $r1
450 add b32 $r15 #proc_size
451 bra #init_proc
452#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/macros.fuc b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/macros.fuc
new file mode 100644
index 000000000000..2a74ea907604
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/macros.fuc
@@ -0,0 +1,199 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#define GT215 0xa3
26#define GF100 0xc0
27#define GF119 0xd9
28#define GK208 0x108
29
30#include "os.h"
31
32// IO addresses
33#define NV_PPWR_INTR_TRIGGER 0x0000
34#define NV_PPWR_INTR_TRIGGER_USER1 0x00000080
35#define NV_PPWR_INTR_TRIGGER_USER0 0x00000040
36#define NV_PPWR_INTR_ACK 0x0004
37#define NV_PPWR_INTR_ACK_SUBINTR 0x00000800
38#define NV_PPWR_INTR_ACK_WATCHDOG 0x00000002
39#define NV_PPWR_INTR 0x0008
40#define NV_PPWR_INTR_SUBINTR 0x00000800
41#define NV_PPWR_INTR_USER1 0x00000080
42#define NV_PPWR_INTR_USER0 0x00000040
43#define NV_PPWR_INTR_PAUSE 0x00000020
44#define NV_PPWR_INTR_WATCHDOG 0x00000002
45#define NV_PPWR_INTR_EN_SET 0x0010
46#define NV_PPWR_INTR_EN_SET_SUBINTR 0x00000800
47#define NV_PPWR_INTR_EN_SET_WATCHDOG 0x00000002
48#define NV_PPWR_INTR_EN_CLR 0x0014
49#define NV_PPWR_INTR_EN_CLR_MASK /* fuck i hate envyas */ -1
50#define NV_PPWR_INTR_ROUTE 0x001c
51#define NV_PPWR_TIMER_LOW 0x002c
52#define NV_PPWR_WATCHDOG_TIME 0x0034
53#define NV_PPWR_WATCHDOG_ENABLE 0x0038
54#define NV_PPWR_CAPS 0x0108
55#define NV_PPWR_UAS_CONFIG 0x0164
56#define NV_PPWR_UAS_CONFIG_ENABLE 0x00010000
57#if NVKM_PPWR_CHIPSET >= GK208
58#define NV_PPWR_DSCRATCH(i) (4 * (i) + 0x0450)
59#endif
60#define NV_PPWR_FIFO_PUT(i) (4 * (i) + 0x04a0)
61#define NV_PPWR_FIFO_GET(i) (4 * (i) + 0x04b0)
62#define NV_PPWR_FIFO_INTR 0x04c0
63#define NV_PPWR_FIFO_INTR_EN 0x04c4
64#define NV_PPWR_RFIFO_PUT 0x04c8
65#define NV_PPWR_RFIFO_GET 0x04cc
66#define NV_PPWR_H2D 0x04d0
67#define NV_PPWR_D2H 0x04dc
68#if NVKM_PPWR_CHIPSET < GK208
69#define NV_PPWR_DSCRATCH(i) (4 * (i) + 0x05d0)
70#endif
71#define NV_PPWR_SUBINTR 0x0688
72#define NV_PPWR_SUBINTR_FIFO 0x00000002
73#define NV_PPWR_MMIO_ADDR 0x07a0
74#define NV_PPWR_MMIO_DATA 0x07a4
75#define NV_PPWR_MMIO_CTRL 0x07ac
76#define NV_PPWR_MMIO_CTRL_TRIGGER 0x00010000
77#define NV_PPWR_MMIO_CTRL_STATUS 0x00007000
78#define NV_PPWR_MMIO_CTRL_STATUS_IDLE 0x00000000
79#define NV_PPWR_MMIO_CTRL_MASK 0x000000f0
80#define NV_PPWR_MMIO_CTRL_MASK_B32_0 0x000000f0
81#define NV_PPWR_MMIO_CTRL_OP 0x00000003
82#define NV_PPWR_MMIO_CTRL_OP_RD 0x00000001
83#define NV_PPWR_MMIO_CTRL_OP_WR 0x00000002
84#define NV_PPWR_OUTPUT 0x07c0
85#define NV_PPWR_OUTPUT_FB_PAUSE 0x00000004
86#define NV_PPWR_OUTPUT_SET 0x07e0
87#define NV_PPWR_OUTPUT_SET_FB_PAUSE 0x00000004
88#define NV_PPWR_OUTPUT_CLR 0x07e4
89#define NV_PPWR_OUTPUT_CLR_FB_PAUSE 0x00000004
90
91// Inter-process message format
92.equ #msg_process 0x00 /* send() target, recv() sender */
93.equ #msg_message 0x04
94.equ #msg_data0 0x08
95.equ #msg_data1 0x0c
96
97// Kernel message IDs
98#define KMSG_FIFO 0x00000000
99#define KMSG_ALARM 0x00000001
100
101// Process message queue description
102.equ #proc_qlen 4 // log2(size of queue entry in bytes)
103.equ #proc_qnum 2 // log2(max number of entries in queue)
104.equ #proc_qmaskb (1 << #proc_qnum) // max number of entries in queue
105.equ #proc_qmaskp (#proc_qmaskb - 1)
106.equ #proc_qmaskf ((#proc_qmaskb << 1) - 1)
107.equ #proc_qsize (1 << (#proc_qlen + #proc_qnum))
108
109// Process table entry
110.equ #proc_id 0x00
111.equ #proc_init 0x04
112.equ #proc_recv 0x08
113.equ #proc_time 0x0c
114.equ #proc_qput 0x10
115.equ #proc_qget 0x14
116.equ #proc_queue 0x18
117.equ #proc_size (0x18 + #proc_qsize)
118
119#define process(id,init,recv) /*
120*/ .b32 id /*
121*/ .b32 init /*
122*/ .b32 recv /*
123*/ .b32 0 /*
124*/ .b32 0 /*
125*/ .b32 0 /*
126*/ .skip 64
127
128#ifndef NVKM_FALCON_UNSHIFTED_IO
129#define nv_iord(reg,ior) /*
130*/ mov reg ior /*
131*/ shl b32 reg 6 /*
132*/ iord reg I[reg + 0x000]
133#else
134#define nv_iord(reg,ior) /*
135*/ mov reg ior /*
136*/ iord reg I[reg + 0x000]
137#endif
138
139#ifndef NVKM_FALCON_UNSHIFTED_IO
140#define nv_iowr(ior,reg) /*
141*/ mov $r0 ior /*
142*/ shl b32 $r0 6 /*
143*/ iowr I[$r0 + 0x000] reg /*
144*/ clear b32 $r0
145#else
146#define nv_iowr(ior,reg) /*
147*/ mov $r0 ior /*
148*/ iowr I[$r0 + 0x000] reg /*
149*/ clear b32 $r0
150#endif
151
152#ifndef NVKM_FALCON_UNSHIFTED_IO
153#define nv_iowrs(ior,reg) /*
154*/ mov $r0 ior /*
155*/ shl b32 $r0 6 /*
156*/ iowrs I[$r0 + 0x000] reg /*
157*/ clear b32 $r0
158#else
159#define nv_iowrs(ior,reg) /*
160*/ mov $r0 ior /*
161*/ iowrs I[$r0 + 0x000] reg /*
162*/ clear b32 $r0
163#endif
164
165#define hash #
166#define fn(a) a
167#ifndef NVKM_FALCON_PC24
168#define call(a) call fn(hash)a
169#else
170#define call(a) lcall fn(hash)a
171#endif
172
173#ifndef NVKM_FALCON_MMIO_UAS
174#define nv_rd32(reg,addr) /*
175*/ mov b32 $r14 addr /*
176*/ call(rd32) /*
177*/ mov b32 reg $r13
178#else
179#define nv_rd32(reg,addr) /*
180*/ sethi $r0 0x14000000 /*
181*/ or $r0 addr /*
182*/ ld b32 reg D[$r0] /*
183*/ clear b32 $r0
184#endif
185
186#if !defined(NVKM_FALCON_MMIO_UAS) || defined(NVKM_FALCON_MMIO_TRAP)
187#define nv_wr32(addr,reg) /*
188*/ push addr /*
189*/ push reg /*
190*/ pop $r13 /*
191*/ pop $r14 /*
192*/ call(wr32) /*
193#else
194#define nv_wr32(addr,reg) /*
195*/ sethi $r0 0x14000000 /*
196*/ or $r0 addr /*
197*/ st b32 D[$r0] reg /*
198*/ clear b32 $r0
199#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/memx.fuc b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/memx.fuc
new file mode 100644
index 000000000000..d43741eccb11
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/memx.fuc
@@ -0,0 +1,219 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#ifdef INCLUDE_PROC
26process(PROC_MEMX, #memx_init, #memx_recv)
27#endif
28
29/******************************************************************************
30 * MEMX data segment
31 *****************************************************************************/
32#ifdef INCLUDE_DATA
33.equ #memx_opcode 0
34.equ #memx_header 2
35.equ #memx_length 4
36.equ #memx_func 8
37
38#define handler(cmd,hdr,len,func) /*
39*/ .b16 MEMX_##cmd /*
40*/ .b16 hdr /*
41*/ .b16 len /*
42*/ .b16 0 /*
43*/ .b32 func
44
45memx_func_head:
46handler(ENTER , 0x0001, 0x0000, #memx_func_enter)
47memx_func_next:
48handler(LEAVE , 0x0000, 0x0000, #memx_func_leave)
49handler(WR32 , 0x0000, 0x0002, #memx_func_wr32)
50handler(WAIT , 0x0004, 0x0000, #memx_func_wait)
51handler(DELAY , 0x0001, 0x0000, #memx_func_delay)
52memx_func_tail:
53
54.equ #memx_func_size #memx_func_next - #memx_func_head
55.equ #memx_func_num (#memx_func_tail - #memx_func_head) / #memx_func_size
56
57memx_data_head:
58.skip 0x0800
59memx_data_tail:
60#endif
61
62/******************************************************************************
63 * MEMX code segment
64 *****************************************************************************/
65#ifdef INCLUDE_CODE
66// description
67//
68// $r15 - current (memx)
69// $r4 - packet length
70// +00: bitmask of heads to wait for vblank on
71// $r3 - opcode desciption
72// $r0 - zero
73memx_func_enter:
74 mov $r6 NV_PPWR_OUTPUT_SET_FB_PAUSE
75 nv_iowr(NV_PPWR_OUTPUT_SET, $r6)
76 memx_func_enter_wait:
77 nv_iord($r6, NV_PPWR_OUTPUT)
78 and $r6 NV_PPWR_OUTPUT_FB_PAUSE
79 bra z #memx_func_enter_wait
80 //XXX: TODO
81 ld b32 $r6 D[$r1 + 0x00]
82 add b32 $r1 0x04
83 ret
84
85// description
86//
87// $r15 - current (memx)
88// $r4 - packet length
89// $r3 - opcode desciption
90// $r0 - zero
91memx_func_leave:
92 mov $r6 NV_PPWR_OUTPUT_CLR_FB_PAUSE
93 nv_iowr(NV_PPWR_OUTPUT_CLR, $r6)
94 memx_func_leave_wait:
95 nv_iord($r6, NV_PPWR_OUTPUT)
96 and $r6 NV_PPWR_OUTPUT_FB_PAUSE
97 bra nz #memx_func_leave_wait
98 ret
99
100// description
101//
102// $r15 - current (memx)
103// $r4 - packet length
104// +00*n: addr
105// +04*n: data
106// $r3 - opcode desciption
107// $r0 - zero
108memx_func_wr32:
109 ld b32 $r6 D[$r1 + 0x00]
110 ld b32 $r5 D[$r1 + 0x04]
111 add b32 $r1 0x08
112 nv_wr32($r6, $r5)
113 sub b32 $r4 0x02
114 bra nz #memx_func_wr32
115 ret
116
117// description
118//
119// $r15 - current (memx)
120// $r4 - packet length
121// +00: addr
122// +04: mask
123// +08: data
124// +0c: timeout (ns)
125// $r3 - opcode desciption
126// $r0 - zero
127memx_func_wait:
128 nv_iord($r8, NV_PPWR_TIMER_LOW)
129 ld b32 $r14 D[$r1 + 0x00]
130 ld b32 $r13 D[$r1 + 0x04]
131 ld b32 $r12 D[$r1 + 0x08]
132 ld b32 $r11 D[$r1 + 0x0c]
133 add b32 $r1 0x10
134 call(wait)
135 ret
136
137// description
138//
139// $r15 - current (memx)
140// $r4 - packet length
141// +00: time (ns)
142// $r3 - opcode desciption
143// $r0 - zero
144memx_func_delay:
145 ld b32 $r14 D[$r1 + 0x00]
146 add b32 $r1 0x04
147 call(nsec)
148 ret
149
150// description
151//
152// $r15 - current (memx)
153// $r14 - sender process name
154// $r13 - message (exec)
155// $r12 - head of script
156// $r11 - tail of script
157// $r0 - zero
158memx_exec:
159 push $r14
160 push $r13
161 mov b32 $r1 $r12
162 mov b32 $r2 $r11
163 memx_exec_next:
164 // fetch the packet header, and locate opcode info
165 ld b32 $r3 D[$r1]
166 add b32 $r1 4
167 shr b32 $r4 $r3 16
168 mulu $r3 #memx_func_size
169
170 // execute the opcode handler
171 ld b32 $r5 D[$r3 + #memx_func_head + #memx_func]
172 call $r5
173
174 // keep going, if we haven't reached the end
175 cmp b32 $r1 $r2
176 bra l #memx_exec_next
177
178 // send completion reply
179 pop $r13
180 pop $r14
181 call(send)
182 ret
183
184// description
185//
186// $r15 - current (memx)
187// $r14 - sender process name
188// $r13 - message
189// $r12 - data0
190// $r11 - data1
191// $r0 - zero
192memx_info:
193 mov $r12 #memx_data_head
194 mov $r11 #memx_data_tail - #memx_data_head
195 call(send)
196 ret
197
198// description
199//
200// $r15 - current (memx)
201// $r14 - sender process name
202// $r13 - message
203// $r12 - data0
204// $r11 - data1
205// $r0 - zero
206memx_recv:
207 cmp b32 $r13 MEMX_MSG_EXEC
208 bra e #memx_exec
209 cmp b32 $r13 MEMX_MSG_INFO
210 bra e #memx_info
211 ret
212
213// description
214//
215// $r15 - current (memx)
216// $r0 - zero
217memx_init:
218 ret
219#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc
new file mode 100644
index 000000000000..947be536daef
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc
@@ -0,0 +1,63 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#define NVKM_PPWR_CHIPSET GK208
26
27#define NVKM_FALCON_PC24
28#define NVKM_FALCON_UNSHIFTED_IO
29//#define NVKM_FALCON_MMIO_UAS
30//#define NVKM_FALCON_MMIO_TRAP
31
32#include "macros.fuc"
33
34.section #nv108_pwr_data
35#define INCLUDE_PROC
36#include "kernel.fuc"
37#include "host.fuc"
38#include "memx.fuc"
39#include "perf.fuc"
40#include "test.fuc"
41#include "idle.fuc"
42#undef INCLUDE_PROC
43
44#define INCLUDE_DATA
45#include "kernel.fuc"
46#include "host.fuc"
47#include "memx.fuc"
48#include "perf.fuc"
49#include "test.fuc"
50#include "idle.fuc"
51#undef INCLUDE_DATA
52.align 256
53
54.section #nv108_pwr_code
55#define INCLUDE_CODE
56#include "kernel.fuc"
57#include "host.fuc"
58#include "memx.fuc"
59#include "perf.fuc"
60#include "test.fuc"
61#include "idle.fuc"
62#undef INCLUDE_CODE
63.align 256
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc.h b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc.h
new file mode 100644
index 000000000000..9342e2d7d3b7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc.h
@@ -0,0 +1,1165 @@
1uint32_t nv108_pwr_data[] = {
2/* 0x0000: proc_kern */
3 0x52544e49,
4 0x00000000,
5 0x00000000,
6 0x00000000,
7 0x00000000,
8 0x00000000,
9 0x00000000,
10 0x00000000,
11 0x00000000,
12 0x00000000,
13 0x00000000,
14 0x00000000,
15 0x00000000,
16 0x00000000,
17 0x00000000,
18 0x00000000,
19 0x00000000,
20 0x00000000,
21 0x00000000,
22 0x00000000,
23 0x00000000,
24 0x00000000,
25/* 0x0058: proc_list_head */
26 0x54534f48,
27 0x00000379,
28 0x0000032a,
29 0x00000000,
30 0x00000000,
31 0x00000000,
32 0x00000000,
33 0x00000000,
34 0x00000000,
35 0x00000000,
36 0x00000000,
37 0x00000000,
38 0x00000000,
39 0x00000000,
40 0x00000000,
41 0x00000000,
42 0x00000000,
43 0x00000000,
44 0x00000000,
45 0x00000000,
46 0x00000000,
47 0x00000000,
48 0x584d454d,
49 0x0000046f,
50 0x00000461,
51 0x00000000,
52 0x00000000,
53 0x00000000,
54 0x00000000,
55 0x00000000,
56 0x00000000,
57 0x00000000,
58 0x00000000,
59 0x00000000,
60 0x00000000,
61 0x00000000,
62 0x00000000,
63 0x00000000,
64 0x00000000,
65 0x00000000,
66 0x00000000,
67 0x00000000,
68 0x00000000,
69 0x00000000,
70 0x46524550,
71 0x00000473,
72 0x00000471,
73 0x00000000,
74 0x00000000,
75 0x00000000,
76 0x00000000,
77 0x00000000,
78 0x00000000,
79 0x00000000,
80 0x00000000,
81 0x00000000,
82 0x00000000,
83 0x00000000,
84 0x00000000,
85 0x00000000,
86 0x00000000,
87 0x00000000,
88 0x00000000,
89 0x00000000,
90 0x00000000,
91 0x00000000,
92 0x54534554,
93 0x00000494,
94 0x00000475,
95 0x00000000,
96 0x00000000,
97 0x00000000,
98 0x00000000,
99 0x00000000,
100 0x00000000,
101 0x00000000,
102 0x00000000,
103 0x00000000,
104 0x00000000,
105 0x00000000,
106 0x00000000,
107 0x00000000,
108 0x00000000,
109 0x00000000,
110 0x00000000,
111 0x00000000,
112 0x00000000,
113 0x00000000,
114 0x454c4449,
115 0x0000049f,
116 0x0000049d,
117 0x00000000,
118 0x00000000,
119 0x00000000,
120 0x00000000,
121 0x00000000,
122 0x00000000,
123 0x00000000,
124 0x00000000,
125 0x00000000,
126 0x00000000,
127 0x00000000,
128 0x00000000,
129 0x00000000,
130 0x00000000,
131 0x00000000,
132 0x00000000,
133 0x00000000,
134 0x00000000,
135 0x00000000,
136/* 0x0210: proc_list_tail */
137/* 0x0210: time_prev */
138 0x00000000,
139/* 0x0214: time_next */
140 0x00000000,
141/* 0x0218: fifo_queue */
142 0x00000000,
143 0x00000000,
144 0x00000000,
145 0x00000000,
146 0x00000000,
147 0x00000000,
148 0x00000000,
149 0x00000000,
150 0x00000000,
151 0x00000000,
152 0x00000000,
153 0x00000000,
154 0x00000000,
155 0x00000000,
156 0x00000000,
157 0x00000000,
158 0x00000000,
159 0x00000000,
160 0x00000000,
161 0x00000000,
162 0x00000000,
163 0x00000000,
164 0x00000000,
165 0x00000000,
166 0x00000000,
167 0x00000000,
168 0x00000000,
169 0x00000000,
170 0x00000000,
171 0x00000000,
172 0x00000000,
173 0x00000000,
174/* 0x0298: rfifo_queue */
175 0x00000000,
176 0x00000000,
177 0x00000000,
178 0x00000000,
179 0x00000000,
180 0x00000000,
181 0x00000000,
182 0x00000000,
183 0x00000000,
184 0x00000000,
185 0x00000000,
186 0x00000000,
187 0x00000000,
188 0x00000000,
189 0x00000000,
190 0x00000000,
191 0x00000000,
192 0x00000000,
193 0x00000000,
194 0x00000000,
195 0x00000000,
196 0x00000000,
197 0x00000000,
198 0x00000000,
199 0x00000000,
200 0x00000000,
201 0x00000000,
202 0x00000000,
203 0x00000000,
204 0x00000000,
205 0x00000000,
206 0x00000000,
207/* 0x0318: memx_func_head */
208 0x00010000,
209 0x00000000,
210 0x000003a9,
211/* 0x0324: memx_func_next */
212 0x00000001,
213 0x00000000,
214 0x000003c7,
215 0x00000002,
216 0x00000002,
217 0x000003df,
218 0x00040003,
219 0x00000000,
220 0x00000407,
221 0x00010004,
222 0x00000000,
223 0x00000421,
224/* 0x0354: memx_func_tail */
225/* 0x0354: memx_data_head */
226 0x00000000,
227 0x00000000,
228 0x00000000,
229 0x00000000,
230 0x00000000,
231 0x00000000,
232 0x00000000,
233 0x00000000,
234 0x00000000,
235 0x00000000,
236 0x00000000,
237 0x00000000,
238 0x00000000,
239 0x00000000,
240 0x00000000,
241 0x00000000,
242 0x00000000,
243 0x00000000,
244 0x00000000,
245 0x00000000,
246 0x00000000,
247 0x00000000,
248 0x00000000,
249 0x00000000,
250 0x00000000,
251 0x00000000,
252 0x00000000,
253 0x00000000,
254 0x00000000,
255 0x00000000,
256 0x00000000,
257 0x00000000,
258 0x00000000,
259 0x00000000,
260 0x00000000,
261 0x00000000,
262 0x00000000,
263 0x00000000,
264 0x00000000,
265 0x00000000,
266 0x00000000,
267 0x00000000,
268 0x00000000,
269 0x00000000,
270 0x00000000,
271 0x00000000,
272 0x00000000,
273 0x00000000,
274 0x00000000,
275 0x00000000,
276 0x00000000,
277 0x00000000,
278 0x00000000,
279 0x00000000,
280 0x00000000,
281 0x00000000,
282 0x00000000,
283 0x00000000,
284 0x00000000,
285 0x00000000,
286 0x00000000,
287 0x00000000,
288 0x00000000,
289 0x00000000,
290 0x00000000,
291 0x00000000,
292 0x00000000,
293 0x00000000,
294 0x00000000,
295 0x00000000,
296 0x00000000,
297 0x00000000,
298 0x00000000,
299 0x00000000,
300 0x00000000,
301 0x00000000,
302 0x00000000,
303 0x00000000,
304 0x00000000,
305 0x00000000,
306 0x00000000,
307 0x00000000,
308 0x00000000,
309 0x00000000,
310 0x00000000,
311 0x00000000,
312 0x00000000,
313 0x00000000,
314 0x00000000,
315 0x00000000,
316 0x00000000,
317 0x00000000,
318 0x00000000,
319 0x00000000,
320 0x00000000,
321 0x00000000,
322 0x00000000,
323 0x00000000,
324 0x00000000,
325 0x00000000,
326 0x00000000,
327 0x00000000,
328 0x00000000,
329 0x00000000,
330 0x00000000,
331 0x00000000,
332 0x00000000,
333 0x00000000,
334 0x00000000,
335 0x00000000,
336 0x00000000,
337 0x00000000,
338 0x00000000,
339 0x00000000,
340 0x00000000,
341 0x00000000,
342 0x00000000,
343 0x00000000,
344 0x00000000,
345 0x00000000,
346 0x00000000,
347 0x00000000,
348 0x00000000,
349 0x00000000,
350 0x00000000,
351 0x00000000,
352 0x00000000,
353 0x00000000,
354 0x00000000,
355 0x00000000,
356 0x00000000,
357 0x00000000,
358 0x00000000,
359 0x00000000,
360 0x00000000,
361 0x00000000,
362 0x00000000,
363 0x00000000,
364 0x00000000,
365 0x00000000,
366 0x00000000,
367 0x00000000,
368 0x00000000,
369 0x00000000,
370 0x00000000,
371 0x00000000,
372 0x00000000,
373 0x00000000,
374 0x00000000,
375 0x00000000,
376 0x00000000,
377 0x00000000,
378 0x00000000,
379 0x00000000,
380 0x00000000,
381 0x00000000,
382 0x00000000,
383 0x00000000,
384 0x00000000,
385 0x00000000,
386 0x00000000,
387 0x00000000,
388 0x00000000,
389 0x00000000,
390 0x00000000,
391 0x00000000,
392 0x00000000,
393 0x00000000,
394 0x00000000,
395 0x00000000,
396 0x00000000,
397 0x00000000,
398 0x00000000,
399 0x00000000,
400 0x00000000,
401 0x00000000,
402 0x00000000,
403 0x00000000,
404 0x00000000,
405 0x00000000,
406 0x00000000,
407 0x00000000,
408 0x00000000,
409 0x00000000,
410 0x00000000,
411 0x00000000,
412 0x00000000,
413 0x00000000,
414 0x00000000,
415 0x00000000,
416 0x00000000,
417 0x00000000,
418 0x00000000,
419 0x00000000,
420 0x00000000,
421 0x00000000,
422 0x00000000,
423 0x00000000,
424 0x00000000,
425 0x00000000,
426 0x00000000,
427 0x00000000,
428 0x00000000,
429 0x00000000,
430 0x00000000,
431 0x00000000,
432 0x00000000,
433 0x00000000,
434 0x00000000,
435 0x00000000,
436 0x00000000,
437 0x00000000,
438 0x00000000,
439 0x00000000,
440 0x00000000,
441 0x00000000,
442 0x00000000,
443 0x00000000,
444 0x00000000,
445 0x00000000,
446 0x00000000,
447 0x00000000,
448 0x00000000,
449 0x00000000,
450 0x00000000,
451 0x00000000,
452 0x00000000,
453 0x00000000,
454 0x00000000,
455 0x00000000,
456 0x00000000,
457 0x00000000,
458 0x00000000,
459 0x00000000,
460 0x00000000,
461 0x00000000,
462 0x00000000,
463 0x00000000,
464 0x00000000,
465 0x00000000,
466 0x00000000,
467 0x00000000,
468 0x00000000,
469 0x00000000,
470 0x00000000,
471 0x00000000,
472 0x00000000,
473 0x00000000,
474 0x00000000,
475 0x00000000,
476 0x00000000,
477 0x00000000,
478 0x00000000,
479 0x00000000,
480 0x00000000,
481 0x00000000,
482 0x00000000,
483 0x00000000,
484 0x00000000,
485 0x00000000,
486 0x00000000,
487 0x00000000,
488 0x00000000,
489 0x00000000,
490 0x00000000,
491 0x00000000,
492 0x00000000,
493 0x00000000,
494 0x00000000,
495 0x00000000,
496 0x00000000,
497 0x00000000,
498 0x00000000,
499 0x00000000,
500 0x00000000,
501 0x00000000,
502 0x00000000,
503 0x00000000,
504 0x00000000,
505 0x00000000,
506 0x00000000,
507 0x00000000,
508 0x00000000,
509 0x00000000,
510 0x00000000,
511 0x00000000,
512 0x00000000,
513 0x00000000,
514 0x00000000,
515 0x00000000,
516 0x00000000,
517 0x00000000,
518 0x00000000,
519 0x00000000,
520 0x00000000,
521 0x00000000,
522 0x00000000,
523 0x00000000,
524 0x00000000,
525 0x00000000,
526 0x00000000,
527 0x00000000,
528 0x00000000,
529 0x00000000,
530 0x00000000,
531 0x00000000,
532 0x00000000,
533 0x00000000,
534 0x00000000,
535 0x00000000,
536 0x00000000,
537 0x00000000,
538 0x00000000,
539 0x00000000,
540 0x00000000,
541 0x00000000,
542 0x00000000,
543 0x00000000,
544 0x00000000,
545 0x00000000,
546 0x00000000,
547 0x00000000,
548 0x00000000,
549 0x00000000,
550 0x00000000,
551 0x00000000,
552 0x00000000,
553 0x00000000,
554 0x00000000,
555 0x00000000,
556 0x00000000,
557 0x00000000,
558 0x00000000,
559 0x00000000,
560 0x00000000,
561 0x00000000,
562 0x00000000,
563 0x00000000,
564 0x00000000,
565 0x00000000,
566 0x00000000,
567 0x00000000,
568 0x00000000,
569 0x00000000,
570 0x00000000,
571 0x00000000,
572 0x00000000,
573 0x00000000,
574 0x00000000,
575 0x00000000,
576 0x00000000,
577 0x00000000,
578 0x00000000,
579 0x00000000,
580 0x00000000,
581 0x00000000,
582 0x00000000,
583 0x00000000,
584 0x00000000,
585 0x00000000,
586 0x00000000,
587 0x00000000,
588 0x00000000,
589 0x00000000,
590 0x00000000,
591 0x00000000,
592 0x00000000,
593 0x00000000,
594 0x00000000,
595 0x00000000,
596 0x00000000,
597 0x00000000,
598 0x00000000,
599 0x00000000,
600 0x00000000,
601 0x00000000,
602 0x00000000,
603 0x00000000,
604 0x00000000,
605 0x00000000,
606 0x00000000,
607 0x00000000,
608 0x00000000,
609 0x00000000,
610 0x00000000,
611 0x00000000,
612 0x00000000,
613 0x00000000,
614 0x00000000,
615 0x00000000,
616 0x00000000,
617 0x00000000,
618 0x00000000,
619 0x00000000,
620 0x00000000,
621 0x00000000,
622 0x00000000,
623 0x00000000,
624 0x00000000,
625 0x00000000,
626 0x00000000,
627 0x00000000,
628 0x00000000,
629 0x00000000,
630 0x00000000,
631 0x00000000,
632 0x00000000,
633 0x00000000,
634 0x00000000,
635 0x00000000,
636 0x00000000,
637 0x00000000,
638 0x00000000,
639 0x00000000,
640 0x00000000,
641 0x00000000,
642 0x00000000,
643 0x00000000,
644 0x00000000,
645 0x00000000,
646 0x00000000,
647 0x00000000,
648 0x00000000,
649 0x00000000,
650 0x00000000,
651 0x00000000,
652 0x00000000,
653 0x00000000,
654 0x00000000,
655 0x00000000,
656 0x00000000,
657 0x00000000,
658 0x00000000,
659 0x00000000,
660 0x00000000,
661 0x00000000,
662 0x00000000,
663 0x00000000,
664 0x00000000,
665 0x00000000,
666 0x00000000,
667 0x00000000,
668 0x00000000,
669 0x00000000,
670 0x00000000,
671 0x00000000,
672 0x00000000,
673 0x00000000,
674 0x00000000,
675 0x00000000,
676 0x00000000,
677 0x00000000,
678 0x00000000,
679 0x00000000,
680 0x00000000,
681 0x00000000,
682 0x00000000,
683 0x00000000,
684 0x00000000,
685 0x00000000,
686 0x00000000,
687 0x00000000,
688 0x00000000,
689 0x00000000,
690 0x00000000,
691 0x00000000,
692 0x00000000,
693 0x00000000,
694 0x00000000,
695 0x00000000,
696 0x00000000,
697 0x00000000,
698 0x00000000,
699 0x00000000,
700 0x00000000,
701 0x00000000,
702 0x00000000,
703 0x00000000,
704 0x00000000,
705 0x00000000,
706 0x00000000,
707 0x00000000,
708 0x00000000,
709 0x00000000,
710 0x00000000,
711 0x00000000,
712 0x00000000,
713 0x00000000,
714 0x00000000,
715 0x00000000,
716 0x00000000,
717 0x00000000,
718 0x00000000,
719 0x00000000,
720 0x00000000,
721 0x00000000,
722 0x00000000,
723 0x00000000,
724 0x00000000,
725 0x00000000,
726 0x00000000,
727 0x00000000,
728 0x00000000,
729 0x00000000,
730 0x00000000,
731 0x00000000,
732 0x00000000,
733 0x00000000,
734 0x00000000,
735 0x00000000,
736 0x00000000,
737 0x00000000,
738/* 0x0b54: memx_data_tail */
739 0x00000000,
740 0x00000000,
741 0x00000000,
742 0x00000000,
743 0x00000000,
744 0x00000000,
745 0x00000000,
746 0x00000000,
747 0x00000000,
748 0x00000000,
749 0x00000000,
750 0x00000000,
751 0x00000000,
752 0x00000000,
753 0x00000000,
754 0x00000000,
755 0x00000000,
756 0x00000000,
757 0x00000000,
758 0x00000000,
759 0x00000000,
760 0x00000000,
761 0x00000000,
762 0x00000000,
763 0x00000000,
764 0x00000000,
765 0x00000000,
766 0x00000000,
767 0x00000000,
768 0x00000000,
769 0x00000000,
770 0x00000000,
771 0x00000000,
772 0x00000000,
773 0x00000000,
774 0x00000000,
775 0x00000000,
776 0x00000000,
777 0x00000000,
778 0x00000000,
779 0x00000000,
780 0x00000000,
781 0x00000000,
782};
783
784uint32_t nv108_pwr_code[] = {
785 0x02910ef5,
786/* 0x0004: rd32 */
787 0xf607a040,
788 0x04bd000e,
789 0xe3f0010e,
790 0x07ac4001,
791 0xbd000ef6,
792/* 0x0019: rd32_wait */
793 0x07ac4e04,
794 0xf100eecf,
795 0xf47000e4,
796 0xa44df61b,
797 0x00ddcf07,
798/* 0x002e: wr32 */
799 0xa04000f8,
800 0x000ef607,
801 0xa44004bd,
802 0x000df607,
803 0x020e04bd,
804 0xf0f0e5f0,
805 0xac4001e3,
806 0x000ef607,
807/* 0x004e: wr32_wait */
808 0xac4e04bd,
809 0x00eecf07,
810 0x7000e4f1,
811 0xf8f61bf4,
812/* 0x005d: nsec */
813 0xcf2c0800,
814/* 0x0062: nsec_loop */
815 0x2c090088,
816 0xbb0099cf,
817 0x9ea60298,
818 0xf8f61ef4,
819/* 0x0071: wait */
820 0xcf2c0800,
821/* 0x0076: wait_loop */
822 0xeeb20088,
823 0x0000047e,
824 0xadfddab2,
825 0xf4aca604,
826 0x2c09100b,
827 0xbb0099cf,
828 0x9ba60298,
829/* 0x0093: wait_done */
830 0xf8e61ef4,
831/* 0x0095: intr_watchdog */
832 0x03e99800,
833 0xf40096b0,
834 0x0a98280b,
835 0x029abb84,
836 0x0d0e1cf4,
837 0x01de7e01,
838 0xf494bd00,
839/* 0x00b2: intr_watchdog_next_time */
840 0x0a98140e,
841 0x00a6b085,
842 0xa6080bf4,
843 0x061cf49a,
844/* 0x00c0: intr_watchdog_next_time_set */
845/* 0x00c3: intr_watchdog_next_proc */
846 0xb58509b5,
847 0xe0b603e9,
848 0x10e6b158,
849 0xc81bf402,
850/* 0x00d2: intr */
851 0x00f900f8,
852 0x80f904bd,
853 0xa0f990f9,
854 0xc0f9b0f9,
855 0xe0f9d0f9,
856 0x000ff0f9,
857 0xf90188fe,
858 0x04504880,
859 0xb60088cf,
860 0x50400180,
861 0x0008f604,
862 0x080804bd,
863 0xc40088cf,
864 0x0bf40289,
865 0x8500b51f,
866 0x957e580e,
867 0x09980000,
868 0x0096b085,
869 0x000d0bf4,
870 0x0009f634,
871 0x09b504bd,
872/* 0x0125: intr_skip_watchdog */
873 0x0089e484,
874 0x360bf408,
875 0xcf068849,
876 0x9ac40099,
877 0x220bf402,
878 0xcf04c04c,
879 0xc0f900cc,
880 0xf14f484e,
881 0x0d5453e3,
882 0x023f7e00,
883 0x40c0fc00,
884 0x0cf604c0,
885/* 0x0157: intr_subintr_skip_fifo */
886 0x4004bd00,
887 0x09f60688,
888/* 0x015f: intr_skip_subintr */
889 0xc404bd00,
890 0x0bf42089,
891 0xbfa4f107,
892/* 0x0169: intr_skip_pause */
893 0x4089c4ff,
894 0xf1070bf4,
895/* 0x0173: intr_skip_user0 */
896 0x00ffbfa4,
897 0x0008f604,
898 0x80fc04bd,
899 0xfc0088fe,
900 0xfce0fcf0,
901 0xfcc0fcd0,
902 0xfca0fcb0,
903 0xfc80fc90,
904 0x0032f400,
905/* 0x0196: timer */
906 0x32f401f8,
907 0x03f89810,
908 0xf40086b0,
909 0xfeb53a1c,
910 0xf6380003,
911 0x04bd0008,
912 0x88cf0808,
913 0x0284f000,
914 0x081c1bf4,
915 0x0088cf34,
916 0x0bf4e0a6,
917 0xf4e8a608,
918/* 0x01c6: timer_reset */
919 0x3400161e,
920 0xbd000ef6,
921 0x840eb504,
922/* 0x01d0: timer_enable */
923 0x38000108,
924 0xbd0008f6,
925/* 0x01d9: timer_done */
926 0x1031f404,
927/* 0x01de: send_proc */
928 0x80f900f8,
929 0xe89890f9,
930 0x04e99805,
931 0xa60486f0,
932 0x2a0bf489,
933 0x940398c4,
934 0x80b60488,
935 0x008ebb18,
936 0xb500fa98,
937 0x8db5008a,
938 0x028cb501,
939 0xb6038bb5,
940 0x94f00190,
941 0x04e9b507,
942/* 0x0217: send_done */
943 0xfc0231f4,
944 0xf880fc90,
945/* 0x021d: find */
946 0x0880f900,
947 0x0131f458,
948/* 0x0224: find_loop */
949 0xa6008a98,
950 0x100bf4ae,
951 0xb15880b6,
952 0xf4021086,
953 0x32f4f11b,
954/* 0x0239: find_done */
955 0xfc8eb201,
956/* 0x023f: send */
957 0x7e00f880,
958 0xf400021d,
959 0x00f89b01,
960/* 0x0248: recv */
961 0x9805e898,
962 0x32f404e9,
963 0xf489a601,
964 0x89c43c0b,
965 0x0180b603,
966 0xb50784f0,
967 0xea9805e8,
968 0xfef0f902,
969 0xf0f9018f,
970 0x9994efb2,
971 0x00e9bb04,
972 0x9818e0b6,
973 0xec9803eb,
974 0x01ed9802,
975 0xf900ee98,
976 0xfef0fca5,
977 0x31f400f8,
978/* 0x028f: recv_done */
979 0xf8f0fc01,
980/* 0x0291: init */
981 0x01084100,
982 0xe70011cf,
983 0xb6010911,
984 0x14fe0814,
985 0x00e04100,
986 0x000013f0,
987 0x0001f61c,
988 0xff0104bd,
989 0x01f61400,
990 0x0104bd00,
991 0x0015f102,
992 0xf6100008,
993 0x04bd0001,
994 0xf000d241,
995 0x10fe0013,
996 0x1031f400,
997 0x38000101,
998 0xbd0001f6,
999/* 0x02db: init_proc */
1000 0x98580f04,
1001 0x16b001f1,
1002 0xfa0bf400,
1003 0xf0b615f9,
1004 0xf20ef458,
1005/* 0x02ec: host_send */
1006 0xcf04b041,
1007 0xa0420011,
1008 0x0022cf04,
1009 0x0bf412a6,
1010 0x071ec42e,
1011 0xb704ee94,
1012 0x980218e0,
1013 0xec9803eb,
1014 0x01ed9802,
1015 0x7e00ee98,
1016 0xb600023f,
1017 0x1ec40110,
1018 0x04b0400f,
1019 0xbd0001f6,
1020 0xc70ef404,
1021/* 0x0328: host_send_done */
1022/* 0x032a: host_recv */
1023 0x494100f8,
1024 0x5413f14e,
1025 0xf4e1a652,
1026/* 0x0336: host_recv_wait */
1027 0xcc41b90b,
1028 0x0011cf04,
1029 0xcf04c842,
1030 0x16f00022,
1031 0xf412a608,
1032 0x23c4ef0b,
1033 0x0434b607,
1034 0x029830b7,
1035 0xb5033bb5,
1036 0x3db5023c,
1037 0x003eb501,
1038 0xf00120b6,
1039 0xc8400f24,
1040 0x0002f604,
1041 0x400204bd,
1042 0x02f60000,
1043 0xf804bd00,
1044/* 0x0379: host_init */
1045 0x00804100,
1046 0xf11014b6,
1047 0x40021815,
1048 0x01f604d0,
1049 0x4104bd00,
1050 0x14b60080,
1051 0x9815f110,
1052 0x04dc4002,
1053 0xbd0001f6,
1054 0x40010104,
1055 0x01f604c4,
1056 0xf804bd00,
1057/* 0x03a9: memx_func_enter */
1058 0x40040600,
1059 0x06f607e0,
1060/* 0x03b3: memx_func_enter_wait */
1061 0x4604bd00,
1062 0x66cf07c0,
1063 0x0464f000,
1064 0x98f70bf4,
1065 0x10b60016,
1066/* 0x03c7: memx_func_leave */
1067 0x0600f804,
1068 0x07e44004,
1069 0xbd0006f6,
1070/* 0x03d1: memx_func_leave_wait */
1071 0x07c04604,
1072 0xf00066cf,
1073 0x1bf40464,
1074/* 0x03df: memx_func_wr32 */
1075 0x9800f8f7,
1076 0x15980016,
1077 0x0810b601,
1078 0x50f960f9,
1079 0xe0fcd0fc,
1080 0x00002e7e,
1081 0x140003f1,
1082 0xa00506fd,
1083 0xb604bd05,
1084 0x1bf40242,
1085/* 0x0407: memx_func_wait */
1086 0x0800f8dd,
1087 0x0088cf2c,
1088 0x98001e98,
1089 0x1c98011d,
1090 0x031b9802,
1091 0x7e1010b6,
1092 0xf8000071,
1093/* 0x0421: memx_func_delay */
1094 0x001e9800,
1095 0x7e0410b6,
1096 0xf800005d,
1097/* 0x042d: memx_exec */
1098 0xf9e0f900,
1099 0xb2c1b2d0,
1100/* 0x0435: memx_exec_next */
1101 0x001398b2,
1102 0x950410b6,
1103 0x30f01034,
1104 0xc835980c,
1105 0x12a655f9,
1106 0xfced1ef4,
1107 0x7ee0fcd0,
1108 0xf800023f,
1109/* 0x0455: memx_info */
1110 0x03544c00,
1111 0x7e08004b,
1112 0xf800023f,
1113/* 0x0461: memx_recv */
1114 0x01d6b000,
1115 0xb0c90bf4,
1116 0x0bf400d6,
1117/* 0x046f: memx_init */
1118 0xf800f8eb,
1119/* 0x0471: perf_recv */
1120/* 0x0473: perf_init */
1121 0xf800f800,
1122/* 0x0475: test_recv */
1123 0x04584100,
1124 0xb60011cf,
1125 0x58400110,
1126 0x0001f604,
1127 0xe7f104bd,
1128 0xe3f1d900,
1129 0x967e134f,
1130 0x00f80001,
1131/* 0x0494: test_init */
1132 0x7e08004e,
1133 0xf8000196,
1134/* 0x049d: idle_recv */
1135/* 0x049f: idle */
1136 0xf400f800,
1137 0x54410031,
1138 0x0011cf04,
1139 0x400110b6,
1140 0x01f60454,
1141/* 0x04b3: idle_loop */
1142 0x0104bd00,
1143 0x0232f458,
1144/* 0x04b8: idle_proc */
1145/* 0x04b8: idle_proc_exec */
1146 0x1eb210f9,
1147 0x0002487e,
1148 0x11f410fc,
1149 0x0231f409,
1150/* 0x04cb: idle_proc_next */
1151 0xb6f00ef4,
1152 0x1fa65810,
1153 0xf4e81bf4,
1154 0x28f4e002,
1155 0xc60ef400,
1156 0x00000000,
1157 0x00000000,
1158 0x00000000,
1159 0x00000000,
1160 0x00000000,
1161 0x00000000,
1162 0x00000000,
1163 0x00000000,
1164 0x00000000,
1165};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc
new file mode 100644
index 000000000000..6fde0b89e5aa
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc
@@ -0,0 +1,63 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#define NVKM_PPWR_CHIPSET GT215
26
27//#define NVKM_FALCON_PC24
28//#define NVKM_FALCON_UNSHIFTED_IO
29//#define NVKM_FALCON_MMIO_UAS
30//#define NVKM_FALCON_MMIO_TRAP
31
32#include "macros.fuc"
33
34.section #nva3_pwr_data
35#define INCLUDE_PROC
36#include "kernel.fuc"
37#include "host.fuc"
38#include "memx.fuc"
39#include "perf.fuc"
40#include "test.fuc"
41#include "idle.fuc"
42#undef INCLUDE_PROC
43
44#define INCLUDE_DATA
45#include "kernel.fuc"
46#include "host.fuc"
47#include "memx.fuc"
48#include "perf.fuc"
49#include "test.fuc"
50#include "idle.fuc"
51#undef INCLUDE_DATA
52.align 256
53
54.section #nva3_pwr_code
55#define INCLUDE_CODE
56#include "kernel.fuc"
57#include "host.fuc"
58#include "memx.fuc"
59#include "perf.fuc"
60#include "test.fuc"
61#include "idle.fuc"
62#undef INCLUDE_CODE
63.align 256
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc.h b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc.h
new file mode 100644
index 000000000000..0fa4d7dcd407
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc.h
@@ -0,0 +1,1229 @@
1uint32_t nva3_pwr_data[] = {
2/* 0x0000: proc_kern */
3 0x52544e49,
4 0x00000000,
5 0x00000000,
6 0x00000000,
7 0x00000000,
8 0x00000000,
9 0x00000000,
10 0x00000000,
11 0x00000000,
12 0x00000000,
13 0x00000000,
14 0x00000000,
15 0x00000000,
16 0x00000000,
17 0x00000000,
18 0x00000000,
19 0x00000000,
20 0x00000000,
21 0x00000000,
22 0x00000000,
23 0x00000000,
24 0x00000000,
25/* 0x0058: proc_list_head */
26 0x54534f48,
27 0x00000430,
28 0x000003cd,
29 0x00000000,
30 0x00000000,
31 0x00000000,
32 0x00000000,
33 0x00000000,
34 0x00000000,
35 0x00000000,
36 0x00000000,
37 0x00000000,
38 0x00000000,
39 0x00000000,
40 0x00000000,
41 0x00000000,
42 0x00000000,
43 0x00000000,
44 0x00000000,
45 0x00000000,
46 0x00000000,
47 0x00000000,
48 0x584d454d,
49 0x0000054e,
50 0x00000540,
51 0x00000000,
52 0x00000000,
53 0x00000000,
54 0x00000000,
55 0x00000000,
56 0x00000000,
57 0x00000000,
58 0x00000000,
59 0x00000000,
60 0x00000000,
61 0x00000000,
62 0x00000000,
63 0x00000000,
64 0x00000000,
65 0x00000000,
66 0x00000000,
67 0x00000000,
68 0x00000000,
69 0x00000000,
70 0x46524550,
71 0x00000552,
72 0x00000550,
73 0x00000000,
74 0x00000000,
75 0x00000000,
76 0x00000000,
77 0x00000000,
78 0x00000000,
79 0x00000000,
80 0x00000000,
81 0x00000000,
82 0x00000000,
83 0x00000000,
84 0x00000000,
85 0x00000000,
86 0x00000000,
87 0x00000000,
88 0x00000000,
89 0x00000000,
90 0x00000000,
91 0x00000000,
92 0x54534554,
93 0x0000057b,
94 0x00000554,
95 0x00000000,
96 0x00000000,
97 0x00000000,
98 0x00000000,
99 0x00000000,
100 0x00000000,
101 0x00000000,
102 0x00000000,
103 0x00000000,
104 0x00000000,
105 0x00000000,
106 0x00000000,
107 0x00000000,
108 0x00000000,
109 0x00000000,
110 0x00000000,
111 0x00000000,
112 0x00000000,
113 0x00000000,
114 0x454c4449,
115 0x00000587,
116 0x00000585,
117 0x00000000,
118 0x00000000,
119 0x00000000,
120 0x00000000,
121 0x00000000,
122 0x00000000,
123 0x00000000,
124 0x00000000,
125 0x00000000,
126 0x00000000,
127 0x00000000,
128 0x00000000,
129 0x00000000,
130 0x00000000,
131 0x00000000,
132 0x00000000,
133 0x00000000,
134 0x00000000,
135 0x00000000,
136/* 0x0210: proc_list_tail */
137/* 0x0210: time_prev */
138 0x00000000,
139/* 0x0214: time_next */
140 0x00000000,
141/* 0x0218: fifo_queue */
142 0x00000000,
143 0x00000000,
144 0x00000000,
145 0x00000000,
146 0x00000000,
147 0x00000000,
148 0x00000000,
149 0x00000000,
150 0x00000000,
151 0x00000000,
152 0x00000000,
153 0x00000000,
154 0x00000000,
155 0x00000000,
156 0x00000000,
157 0x00000000,
158 0x00000000,
159 0x00000000,
160 0x00000000,
161 0x00000000,
162 0x00000000,
163 0x00000000,
164 0x00000000,
165 0x00000000,
166 0x00000000,
167 0x00000000,
168 0x00000000,
169 0x00000000,
170 0x00000000,
171 0x00000000,
172 0x00000000,
173 0x00000000,
174/* 0x0298: rfifo_queue */
175 0x00000000,
176 0x00000000,
177 0x00000000,
178 0x00000000,
179 0x00000000,
180 0x00000000,
181 0x00000000,
182 0x00000000,
183 0x00000000,
184 0x00000000,
185 0x00000000,
186 0x00000000,
187 0x00000000,
188 0x00000000,
189 0x00000000,
190 0x00000000,
191 0x00000000,
192 0x00000000,
193 0x00000000,
194 0x00000000,
195 0x00000000,
196 0x00000000,
197 0x00000000,
198 0x00000000,
199 0x00000000,
200 0x00000000,
201 0x00000000,
202 0x00000000,
203 0x00000000,
204 0x00000000,
205 0x00000000,
206 0x00000000,
207/* 0x0318: memx_func_head */
208 0x00010000,
209 0x00000000,
210 0x0000046f,
211/* 0x0324: memx_func_next */
212 0x00000001,
213 0x00000000,
214 0x00000496,
215 0x00000002,
216 0x00000002,
217 0x000004b7,
218 0x00040003,
219 0x00000000,
220 0x000004df,
221 0x00010004,
222 0x00000000,
223 0x000004fc,
224/* 0x0354: memx_func_tail */
225/* 0x0354: memx_data_head */
226 0x00000000,
227 0x00000000,
228 0x00000000,
229 0x00000000,
230 0x00000000,
231 0x00000000,
232 0x00000000,
233 0x00000000,
234 0x00000000,
235 0x00000000,
236 0x00000000,
237 0x00000000,
238 0x00000000,
239 0x00000000,
240 0x00000000,
241 0x00000000,
242 0x00000000,
243 0x00000000,
244 0x00000000,
245 0x00000000,
246 0x00000000,
247 0x00000000,
248 0x00000000,
249 0x00000000,
250 0x00000000,
251 0x00000000,
252 0x00000000,
253 0x00000000,
254 0x00000000,
255 0x00000000,
256 0x00000000,
257 0x00000000,
258 0x00000000,
259 0x00000000,
260 0x00000000,
261 0x00000000,
262 0x00000000,
263 0x00000000,
264 0x00000000,
265 0x00000000,
266 0x00000000,
267 0x00000000,
268 0x00000000,
269 0x00000000,
270 0x00000000,
271 0x00000000,
272 0x00000000,
273 0x00000000,
274 0x00000000,
275 0x00000000,
276 0x00000000,
277 0x00000000,
278 0x00000000,
279 0x00000000,
280 0x00000000,
281 0x00000000,
282 0x00000000,
283 0x00000000,
284 0x00000000,
285 0x00000000,
286 0x00000000,
287 0x00000000,
288 0x00000000,
289 0x00000000,
290 0x00000000,
291 0x00000000,
292 0x00000000,
293 0x00000000,
294 0x00000000,
295 0x00000000,
296 0x00000000,
297 0x00000000,
298 0x00000000,
299 0x00000000,
300 0x00000000,
301 0x00000000,
302 0x00000000,
303 0x00000000,
304 0x00000000,
305 0x00000000,
306 0x00000000,
307 0x00000000,
308 0x00000000,
309 0x00000000,
310 0x00000000,
311 0x00000000,
312 0x00000000,
313 0x00000000,
314 0x00000000,
315 0x00000000,
316 0x00000000,
317 0x00000000,
318 0x00000000,
319 0x00000000,
320 0x00000000,
321 0x00000000,
322 0x00000000,
323 0x00000000,
324 0x00000000,
325 0x00000000,
326 0x00000000,
327 0x00000000,
328 0x00000000,
329 0x00000000,
330 0x00000000,
331 0x00000000,
332 0x00000000,
333 0x00000000,
334 0x00000000,
335 0x00000000,
336 0x00000000,
337 0x00000000,
338 0x00000000,
339 0x00000000,
340 0x00000000,
341 0x00000000,
342 0x00000000,
343 0x00000000,
344 0x00000000,
345 0x00000000,
346 0x00000000,
347 0x00000000,
348 0x00000000,
349 0x00000000,
350 0x00000000,
351 0x00000000,
352 0x00000000,
353 0x00000000,
354 0x00000000,
355 0x00000000,
356 0x00000000,
357 0x00000000,
358 0x00000000,
359 0x00000000,
360 0x00000000,
361 0x00000000,
362 0x00000000,
363 0x00000000,
364 0x00000000,
365 0x00000000,
366 0x00000000,
367 0x00000000,
368 0x00000000,
369 0x00000000,
370 0x00000000,
371 0x00000000,
372 0x00000000,
373 0x00000000,
374 0x00000000,
375 0x00000000,
376 0x00000000,
377 0x00000000,
378 0x00000000,
379 0x00000000,
380 0x00000000,
381 0x00000000,
382 0x00000000,
383 0x00000000,
384 0x00000000,
385 0x00000000,
386 0x00000000,
387 0x00000000,
388 0x00000000,
389 0x00000000,
390 0x00000000,
391 0x00000000,
392 0x00000000,
393 0x00000000,
394 0x00000000,
395 0x00000000,
396 0x00000000,
397 0x00000000,
398 0x00000000,
399 0x00000000,
400 0x00000000,
401 0x00000000,
402 0x00000000,
403 0x00000000,
404 0x00000000,
405 0x00000000,
406 0x00000000,
407 0x00000000,
408 0x00000000,
409 0x00000000,
410 0x00000000,
411 0x00000000,
412 0x00000000,
413 0x00000000,
414 0x00000000,
415 0x00000000,
416 0x00000000,
417 0x00000000,
418 0x00000000,
419 0x00000000,
420 0x00000000,
421 0x00000000,
422 0x00000000,
423 0x00000000,
424 0x00000000,
425 0x00000000,
426 0x00000000,
427 0x00000000,
428 0x00000000,
429 0x00000000,
430 0x00000000,
431 0x00000000,
432 0x00000000,
433 0x00000000,
434 0x00000000,
435 0x00000000,
436 0x00000000,
437 0x00000000,
438 0x00000000,
439 0x00000000,
440 0x00000000,
441 0x00000000,
442 0x00000000,
443 0x00000000,
444 0x00000000,
445 0x00000000,
446 0x00000000,
447 0x00000000,
448 0x00000000,
449 0x00000000,
450 0x00000000,
451 0x00000000,
452 0x00000000,
453 0x00000000,
454 0x00000000,
455 0x00000000,
456 0x00000000,
457 0x00000000,
458 0x00000000,
459 0x00000000,
460 0x00000000,
461 0x00000000,
462 0x00000000,
463 0x00000000,
464 0x00000000,
465 0x00000000,
466 0x00000000,
467 0x00000000,
468 0x00000000,
469 0x00000000,
470 0x00000000,
471 0x00000000,
472 0x00000000,
473 0x00000000,
474 0x00000000,
475 0x00000000,
476 0x00000000,
477 0x00000000,
478 0x00000000,
479 0x00000000,
480 0x00000000,
481 0x00000000,
482 0x00000000,
483 0x00000000,
484 0x00000000,
485 0x00000000,
486 0x00000000,
487 0x00000000,
488 0x00000000,
489 0x00000000,
490 0x00000000,
491 0x00000000,
492 0x00000000,
493 0x00000000,
494 0x00000000,
495 0x00000000,
496 0x00000000,
497 0x00000000,
498 0x00000000,
499 0x00000000,
500 0x00000000,
501 0x00000000,
502 0x00000000,
503 0x00000000,
504 0x00000000,
505 0x00000000,
506 0x00000000,
507 0x00000000,
508 0x00000000,
509 0x00000000,
510 0x00000000,
511 0x00000000,
512 0x00000000,
513 0x00000000,
514 0x00000000,
515 0x00000000,
516 0x00000000,
517 0x00000000,
518 0x00000000,
519 0x00000000,
520 0x00000000,
521 0x00000000,
522 0x00000000,
523 0x00000000,
524 0x00000000,
525 0x00000000,
526 0x00000000,
527 0x00000000,
528 0x00000000,
529 0x00000000,
530 0x00000000,
531 0x00000000,
532 0x00000000,
533 0x00000000,
534 0x00000000,
535 0x00000000,
536 0x00000000,
537 0x00000000,
538 0x00000000,
539 0x00000000,
540 0x00000000,
541 0x00000000,
542 0x00000000,
543 0x00000000,
544 0x00000000,
545 0x00000000,
546 0x00000000,
547 0x00000000,
548 0x00000000,
549 0x00000000,
550 0x00000000,
551 0x00000000,
552 0x00000000,
553 0x00000000,
554 0x00000000,
555 0x00000000,
556 0x00000000,
557 0x00000000,
558 0x00000000,
559 0x00000000,
560 0x00000000,
561 0x00000000,
562 0x00000000,
563 0x00000000,
564 0x00000000,
565 0x00000000,
566 0x00000000,
567 0x00000000,
568 0x00000000,
569 0x00000000,
570 0x00000000,
571 0x00000000,
572 0x00000000,
573 0x00000000,
574 0x00000000,
575 0x00000000,
576 0x00000000,
577 0x00000000,
578 0x00000000,
579 0x00000000,
580 0x00000000,
581 0x00000000,
582 0x00000000,
583 0x00000000,
584 0x00000000,
585 0x00000000,
586 0x00000000,
587 0x00000000,
588 0x00000000,
589 0x00000000,
590 0x00000000,
591 0x00000000,
592 0x00000000,
593 0x00000000,
594 0x00000000,
595 0x00000000,
596 0x00000000,
597 0x00000000,
598 0x00000000,
599 0x00000000,
600 0x00000000,
601 0x00000000,
602 0x00000000,
603 0x00000000,
604 0x00000000,
605 0x00000000,
606 0x00000000,
607 0x00000000,
608 0x00000000,
609 0x00000000,
610 0x00000000,
611 0x00000000,
612 0x00000000,
613 0x00000000,
614 0x00000000,
615 0x00000000,
616 0x00000000,
617 0x00000000,
618 0x00000000,
619 0x00000000,
620 0x00000000,
621 0x00000000,
622 0x00000000,
623 0x00000000,
624 0x00000000,
625 0x00000000,
626 0x00000000,
627 0x00000000,
628 0x00000000,
629 0x00000000,
630 0x00000000,
631 0x00000000,
632 0x00000000,
633 0x00000000,
634 0x00000000,
635 0x00000000,
636 0x00000000,
637 0x00000000,
638 0x00000000,
639 0x00000000,
640 0x00000000,
641 0x00000000,
642 0x00000000,
643 0x00000000,
644 0x00000000,
645 0x00000000,
646 0x00000000,
647 0x00000000,
648 0x00000000,
649 0x00000000,
650 0x00000000,
651 0x00000000,
652 0x00000000,
653 0x00000000,
654 0x00000000,
655 0x00000000,
656 0x00000000,
657 0x00000000,
658 0x00000000,
659 0x00000000,
660 0x00000000,
661 0x00000000,
662 0x00000000,
663 0x00000000,
664 0x00000000,
665 0x00000000,
666 0x00000000,
667 0x00000000,
668 0x00000000,
669 0x00000000,
670 0x00000000,
671 0x00000000,
672 0x00000000,
673 0x00000000,
674 0x00000000,
675 0x00000000,
676 0x00000000,
677 0x00000000,
678 0x00000000,
679 0x00000000,
680 0x00000000,
681 0x00000000,
682 0x00000000,
683 0x00000000,
684 0x00000000,
685 0x00000000,
686 0x00000000,
687 0x00000000,
688 0x00000000,
689 0x00000000,
690 0x00000000,
691 0x00000000,
692 0x00000000,
693 0x00000000,
694 0x00000000,
695 0x00000000,
696 0x00000000,
697 0x00000000,
698 0x00000000,
699 0x00000000,
700 0x00000000,
701 0x00000000,
702 0x00000000,
703 0x00000000,
704 0x00000000,
705 0x00000000,
706 0x00000000,
707 0x00000000,
708 0x00000000,
709 0x00000000,
710 0x00000000,
711 0x00000000,
712 0x00000000,
713 0x00000000,
714 0x00000000,
715 0x00000000,
716 0x00000000,
717 0x00000000,
718 0x00000000,
719 0x00000000,
720 0x00000000,
721 0x00000000,
722 0x00000000,
723 0x00000000,
724 0x00000000,
725 0x00000000,
726 0x00000000,
727 0x00000000,
728 0x00000000,
729 0x00000000,
730 0x00000000,
731 0x00000000,
732 0x00000000,
733 0x00000000,
734 0x00000000,
735 0x00000000,
736 0x00000000,
737 0x00000000,
738/* 0x0b54: memx_data_tail */
739 0x00000000,
740 0x00000000,
741 0x00000000,
742 0x00000000,
743 0x00000000,
744 0x00000000,
745 0x00000000,
746 0x00000000,
747 0x00000000,
748 0x00000000,
749 0x00000000,
750 0x00000000,
751 0x00000000,
752 0x00000000,
753 0x00000000,
754 0x00000000,
755 0x00000000,
756 0x00000000,
757 0x00000000,
758 0x00000000,
759 0x00000000,
760 0x00000000,
761 0x00000000,
762 0x00000000,
763 0x00000000,
764 0x00000000,
765 0x00000000,
766 0x00000000,
767 0x00000000,
768 0x00000000,
769 0x00000000,
770 0x00000000,
771 0x00000000,
772 0x00000000,
773 0x00000000,
774 0x00000000,
775 0x00000000,
776 0x00000000,
777 0x00000000,
778 0x00000000,
779 0x00000000,
780 0x00000000,
781 0x00000000,
782};
783
784uint32_t nva3_pwr_code[] = {
785 0x030d0ef5,
786/* 0x0004: rd32 */
787 0x07a007f1,
788 0xd00604b6,
789 0x04bd000e,
790 0xf001e7f0,
791 0x07f101e3,
792 0x04b607ac,
793 0x000ed006,
794/* 0x0022: rd32_wait */
795 0xe7f104bd,
796 0xe4b607ac,
797 0x00eecf06,
798 0x7000e4f1,
799 0xf1f21bf4,
800 0xb607a4d7,
801 0xddcf06d4,
802/* 0x003f: wr32 */
803 0xf100f800,
804 0xb607a007,
805 0x0ed00604,
806 0xf104bd00,
807 0xb607a407,
808 0x0dd00604,
809 0xf004bd00,
810 0xe5f002e7,
811 0x01e3f0f0,
812 0x07ac07f1,
813 0xd00604b6,
814 0x04bd000e,
815/* 0x006c: wr32_wait */
816 0x07ace7f1,
817 0xcf06e4b6,
818 0xe4f100ee,
819 0x1bf47000,
820/* 0x007f: nsec */
821 0xf000f8f2,
822 0x84b62c87,
823 0x0088cf06,
824/* 0x0088: nsec_loop */
825 0xb62c97f0,
826 0x99cf0694,
827 0x0298bb00,
828 0xf4069eb8,
829 0x00f8f11e,
830/* 0x009c: wait */
831 0xb62c87f0,
832 0x88cf0684,
833/* 0x00a5: wait_loop */
834 0x02eeb900,
835 0xb90421f4,
836 0xadfd02da,
837 0x06acb804,
838 0xf0150bf4,
839 0x94b62c97,
840 0x0099cf06,
841 0xb80298bb,
842 0x1ef4069b,
843/* 0x00c9: wait_done */
844/* 0x00cb: intr_watchdog */
845 0x9800f8df,
846 0x96b003e9,
847 0x2a0bf400,
848 0xbb840a98,
849 0x1cf4029a,
850 0x01d7f00f,
851 0x025421f5,
852 0x0ef494bd,
853/* 0x00e9: intr_watchdog_next_time */
854 0x850a9815,
855 0xf400a6b0,
856 0x9ab8090b,
857 0x061cf406,
858/* 0x00f8: intr_watchdog_next_time_set */
859/* 0x00fb: intr_watchdog_next_proc */
860 0x80850980,
861 0xe0b603e9,
862 0x10e6b158,
863 0xc61bf402,
864/* 0x010a: intr */
865 0x00f900f8,
866 0x80f904bd,
867 0xa0f990f9,
868 0xc0f9b0f9,
869 0xe0f9d0f9,
870 0xf7f0f0f9,
871 0x0188fe00,
872 0x87f180f9,
873 0x84b605d0,
874 0x0088cf06,
875 0xf10180b6,
876 0xb605d007,
877 0x08d00604,
878 0xf004bd00,
879 0x84b60887,
880 0x0088cf06,
881 0xf40289c4,
882 0x0080230b,
883 0x58e7f085,
884 0x98cb21f4,
885 0x96b08509,
886 0x110bf400,
887 0xb63407f0,
888 0x09d00604,
889 0x8004bd00,
890/* 0x016e: intr_skip_watchdog */
891 0x89e48409,
892 0x0bf40800,
893 0x8897f148,
894 0x0694b606,
895 0xc40099cf,
896 0x0bf4029a,
897 0xc0c7f12c,
898 0x06c4b604,
899 0xf900cccf,
900 0x48e7f1c0,
901 0x53e3f14f,
902 0x00d7f054,
903 0x02b921f5,
904 0x07f1c0fc,
905 0x04b604c0,
906 0x000cd006,
907/* 0x01ae: intr_subintr_skip_fifo */
908 0x07f104bd,
909 0x04b60688,
910 0x0009d006,
911/* 0x01ba: intr_skip_subintr */
912 0x89c404bd,
913 0x070bf420,
914 0xffbfa4f1,
915/* 0x01c4: intr_skip_pause */
916 0xf44089c4,
917 0xa4f1070b,
918/* 0x01ce: intr_skip_user0 */
919 0x07f0ffbf,
920 0x0604b604,
921 0xbd0008d0,
922 0xfe80fc04,
923 0xf0fc0088,
924 0xd0fce0fc,
925 0xb0fcc0fc,
926 0x90fca0fc,
927 0x00fc80fc,
928 0xf80032f4,
929/* 0x01f5: timer */
930 0x1032f401,
931 0xb003f898,
932 0x1cf40086,
933 0x03fe8051,
934 0xb63807f0,
935 0x08d00604,
936 0xf004bd00,
937 0x84b60887,
938 0x0088cf06,
939 0xf40284f0,
940 0x87f0261b,
941 0x0684b634,
942 0xb80088cf,
943 0x0bf406e0,
944 0x06e8b809,
945/* 0x0233: timer_reset */
946 0xf01f1ef4,
947 0x04b63407,
948 0x000ed006,
949 0x0e8004bd,
950/* 0x0241: timer_enable */
951 0x0187f084,
952 0xb63807f0,
953 0x08d00604,
954/* 0x024f: timer_done */
955 0xf404bd00,
956 0x00f81031,
957/* 0x0254: send_proc */
958 0x90f980f9,
959 0x9805e898,
960 0x86f004e9,
961 0x0689b804,
962 0xc42a0bf4,
963 0x88940398,
964 0x1880b604,
965 0x98008ebb,
966 0x8a8000fa,
967 0x018d8000,
968 0x80028c80,
969 0x90b6038b,
970 0x0794f001,
971 0xf404e980,
972/* 0x028e: send_done */
973 0x90fc0231,
974 0x00f880fc,
975/* 0x0294: find */
976 0x87f080f9,
977 0x0131f458,
978/* 0x029c: find_loop */
979 0xb8008a98,
980 0x0bf406ae,
981 0x5880b610,
982 0x021086b1,
983 0xf4f01bf4,
984/* 0x02b2: find_done */
985 0x8eb90132,
986 0xf880fc02,
987/* 0x02b9: send */
988 0x9421f500,
989 0x9701f402,
990/* 0x02c2: recv */
991 0xe89800f8,
992 0x04e99805,
993 0xb80132f4,
994 0x0bf40689,
995 0x0389c43d,
996 0xf00180b6,
997 0xe8800784,
998 0x02ea9805,
999 0x8ffef0f9,
1000 0xb9f0f901,
1001 0x999402ef,
1002 0x00e9bb04,
1003 0x9818e0b6,
1004 0xec9803eb,
1005 0x01ed9802,
1006 0xf900ee98,
1007 0xfef0fca5,
1008 0x31f400f8,
1009/* 0x030b: recv_done */
1010 0xf8f0fc01,
1011/* 0x030d: init */
1012 0x0817f100,
1013 0x0614b601,
1014 0xe70011cf,
1015 0xb6010911,
1016 0x14fe0814,
1017 0xe017f100,
1018 0x0013f000,
1019 0xb61c07f0,
1020 0x01d00604,
1021 0xf004bd00,
1022 0x07f0ff17,
1023 0x0604b614,
1024 0xbd0001d0,
1025 0x0217f004,
1026 0x080015f1,
1027 0xb61007f0,
1028 0x01d00604,
1029 0xf104bd00,
1030 0xf0010a17,
1031 0x10fe0013,
1032 0x1031f400,
1033 0xf00117f0,
1034 0x04b63807,
1035 0x0001d006,
1036 0xf7f004bd,
1037/* 0x0371: init_proc */
1038 0x01f19858,
1039 0xf40016b0,
1040 0x15f9fa0b,
1041 0xf458f0b6,
1042/* 0x0382: host_send */
1043 0x17f1f20e,
1044 0x14b604b0,
1045 0x0011cf06,
1046 0x04a027f1,
1047 0xcf0624b6,
1048 0x12b80022,
1049 0x320bf406,
1050 0x94071ec4,
1051 0xe0b704ee,
1052 0xeb980218,
1053 0x02ec9803,
1054 0x9801ed98,
1055 0x21f500ee,
1056 0x10b602b9,
1057 0x0f1ec401,
1058 0x04b007f1,
1059 0xd00604b6,
1060 0x04bd0001,
1061/* 0x03cb: host_send_done */
1062 0xf8ba0ef4,
1063/* 0x03cd: host_recv */
1064 0x4917f100,
1065 0x5413f14e,
1066 0x06e1b852,
1067/* 0x03db: host_recv_wait */
1068 0xf1aa0bf4,
1069 0xb604cc17,
1070 0x11cf0614,
1071 0xc827f100,
1072 0x0624b604,
1073 0xf00022cf,
1074 0x12b80816,
1075 0xe60bf406,
1076 0xb60723c4,
1077 0x30b70434,
1078 0x3b800298,
1079 0x023c8003,
1080 0x80013d80,
1081 0x20b6003e,
1082 0x0f24f001,
1083 0x04c807f1,
1084 0xd00604b6,
1085 0x04bd0002,
1086 0xf04027f0,
1087 0x04b60007,
1088 0x0002d006,
1089 0x00f804bd,
1090/* 0x0430: host_init */
1091 0x008017f1,
1092 0xf11014b6,
1093 0xf1021815,
1094 0xb604d007,
1095 0x01d00604,
1096 0xf104bd00,
1097 0xb6008017,
1098 0x15f11014,
1099 0x07f10298,
1100 0x04b604dc,
1101 0x0001d006,
1102 0x17f004bd,
1103 0xc407f101,
1104 0x0604b604,
1105 0xbd0001d0,
1106/* 0x046f: memx_func_enter */
1107 0xf000f804,
1108 0x07f10467,
1109 0x04b607e0,
1110 0x0006d006,
1111/* 0x047e: memx_func_enter_wait */
1112 0x67f104bd,
1113 0x64b607c0,
1114 0x0066cf06,
1115 0xf40464f0,
1116 0x1698f30b,
1117 0x0410b600,
1118/* 0x0496: memx_func_leave */
1119 0x67f000f8,
1120 0xe407f104,
1121 0x0604b607,
1122 0xbd0006d0,
1123/* 0x04a5: memx_func_leave_wait */
1124 0xc067f104,
1125 0x0664b607,
1126 0xf00066cf,
1127 0x1bf40464,
1128/* 0x04b7: memx_func_wr32 */
1129 0x9800f8f3,
1130 0x15980016,
1131 0x0810b601,
1132 0x50f960f9,
1133 0xe0fcd0fc,
1134 0xf13f21f4,
1135 0xfd140003,
1136 0x05800506,
1137 0xb604bd00,
1138 0x1bf40242,
1139/* 0x04df: memx_func_wait */
1140 0xf000f8dd,
1141 0x84b62c87,
1142 0x0088cf06,
1143 0x98001e98,
1144 0x1c98011d,
1145 0x031b9802,
1146 0xf41010b6,
1147 0x00f89c21,
1148/* 0x04fc: memx_func_delay */
1149 0xb6001e98,
1150 0x21f40410,
1151/* 0x0507: memx_exec */
1152 0xf900f87f,
1153 0xb9d0f9e0,
1154 0xb2b902c1,
1155/* 0x0511: memx_exec_next */
1156 0x00139802,
1157 0x950410b6,
1158 0x30f01034,
1159 0xc835980c,
1160 0x12b855f9,
1161 0xec1ef406,
1162 0xe0fcd0fc,
1163 0x02b921f5,
1164/* 0x0532: memx_info */
1165 0xc7f100f8,
1166 0xb7f10354,
1167 0x21f50800,
1168 0x00f802b9,
1169/* 0x0540: memx_recv */
1170 0xf401d6b0,
1171 0xd6b0c40b,
1172 0xe90bf400,
1173/* 0x054e: memx_init */
1174 0x00f800f8,
1175/* 0x0550: perf_recv */
1176/* 0x0552: perf_init */
1177 0x00f800f8,
1178/* 0x0554: test_recv */
1179 0x05d817f1,
1180 0xcf0614b6,
1181 0x10b60011,
1182 0xd807f101,
1183 0x0604b605,
1184 0xbd0001d0,
1185 0x00e7f104,
1186 0x4fe3f1d9,
1187 0xf521f513,
1188/* 0x057b: test_init */
1189 0xf100f801,
1190 0xf50800e7,
1191 0xf801f521,
1192/* 0x0585: idle_recv */
1193/* 0x0587: idle */
1194 0xf400f800,
1195 0x17f10031,
1196 0x14b605d4,
1197 0x0011cf06,
1198 0xf10110b6,
1199 0xb605d407,
1200 0x01d00604,
1201/* 0x05a3: idle_loop */
1202 0xf004bd00,
1203 0x32f45817,
1204/* 0x05a9: idle_proc */
1205/* 0x05a9: idle_proc_exec */
1206 0xb910f902,
1207 0x21f5021e,
1208 0x10fc02c2,
1209 0xf40911f4,
1210 0x0ef40231,
1211/* 0x05bd: idle_proc_next */
1212 0x5810b6ef,
1213 0xf4061fb8,
1214 0x02f4e61b,
1215 0x0028f4dd,
1216 0x00bb0ef4,
1217 0x00000000,
1218 0x00000000,
1219 0x00000000,
1220 0x00000000,
1221 0x00000000,
1222 0x00000000,
1223 0x00000000,
1224 0x00000000,
1225 0x00000000,
1226 0x00000000,
1227 0x00000000,
1228 0x00000000,
1229};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc
new file mode 100644
index 000000000000..eaa64da68e36
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc
@@ -0,0 +1,63 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#define NVKM_PPWR_CHIPSET GF100
26
27//#define NVKM_FALCON_PC24
28//#define NVKM_FALCON_UNSHIFTED_IO
29//#define NVKM_FALCON_MMIO_UAS
30//#define NVKM_FALCON_MMIO_TRAP
31
32#include "macros.fuc"
33
34.section #nvc0_pwr_data
35#define INCLUDE_PROC
36#include "kernel.fuc"
37#include "host.fuc"
38#include "memx.fuc"
39#include "perf.fuc"
40#include "test.fuc"
41#include "idle.fuc"
42#undef INCLUDE_PROC
43
44#define INCLUDE_DATA
45#include "kernel.fuc"
46#include "host.fuc"
47#include "memx.fuc"
48#include "perf.fuc"
49#include "test.fuc"
50#include "idle.fuc"
51#undef INCLUDE_DATA
52.align 256
53
54.section #nvc0_pwr_code
55#define INCLUDE_CODE
56#include "kernel.fuc"
57#include "host.fuc"
58#include "memx.fuc"
59#include "perf.fuc"
60#include "test.fuc"
61#include "idle.fuc"
62#undef INCLUDE_CODE
63.align 256
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc.h b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc.h
new file mode 100644
index 000000000000..82c8e8b88917
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc.h
@@ -0,0 +1,1229 @@
1uint32_t nvc0_pwr_data[] = {
2/* 0x0000: proc_kern */
3 0x52544e49,
4 0x00000000,
5 0x00000000,
6 0x00000000,
7 0x00000000,
8 0x00000000,
9 0x00000000,
10 0x00000000,
11 0x00000000,
12 0x00000000,
13 0x00000000,
14 0x00000000,
15 0x00000000,
16 0x00000000,
17 0x00000000,
18 0x00000000,
19 0x00000000,
20 0x00000000,
21 0x00000000,
22 0x00000000,
23 0x00000000,
24 0x00000000,
25/* 0x0058: proc_list_head */
26 0x54534f48,
27 0x00000430,
28 0x000003cd,
29 0x00000000,
30 0x00000000,
31 0x00000000,
32 0x00000000,
33 0x00000000,
34 0x00000000,
35 0x00000000,
36 0x00000000,
37 0x00000000,
38 0x00000000,
39 0x00000000,
40 0x00000000,
41 0x00000000,
42 0x00000000,
43 0x00000000,
44 0x00000000,
45 0x00000000,
46 0x00000000,
47 0x00000000,
48 0x584d454d,
49 0x0000054e,
50 0x00000540,
51 0x00000000,
52 0x00000000,
53 0x00000000,
54 0x00000000,
55 0x00000000,
56 0x00000000,
57 0x00000000,
58 0x00000000,
59 0x00000000,
60 0x00000000,
61 0x00000000,
62 0x00000000,
63 0x00000000,
64 0x00000000,
65 0x00000000,
66 0x00000000,
67 0x00000000,
68 0x00000000,
69 0x00000000,
70 0x46524550,
71 0x00000552,
72 0x00000550,
73 0x00000000,
74 0x00000000,
75 0x00000000,
76 0x00000000,
77 0x00000000,
78 0x00000000,
79 0x00000000,
80 0x00000000,
81 0x00000000,
82 0x00000000,
83 0x00000000,
84 0x00000000,
85 0x00000000,
86 0x00000000,
87 0x00000000,
88 0x00000000,
89 0x00000000,
90 0x00000000,
91 0x00000000,
92 0x54534554,
93 0x0000057b,
94 0x00000554,
95 0x00000000,
96 0x00000000,
97 0x00000000,
98 0x00000000,
99 0x00000000,
100 0x00000000,
101 0x00000000,
102 0x00000000,
103 0x00000000,
104 0x00000000,
105 0x00000000,
106 0x00000000,
107 0x00000000,
108 0x00000000,
109 0x00000000,
110 0x00000000,
111 0x00000000,
112 0x00000000,
113 0x00000000,
114 0x454c4449,
115 0x00000587,
116 0x00000585,
117 0x00000000,
118 0x00000000,
119 0x00000000,
120 0x00000000,
121 0x00000000,
122 0x00000000,
123 0x00000000,
124 0x00000000,
125 0x00000000,
126 0x00000000,
127 0x00000000,
128 0x00000000,
129 0x00000000,
130 0x00000000,
131 0x00000000,
132 0x00000000,
133 0x00000000,
134 0x00000000,
135 0x00000000,
136/* 0x0210: proc_list_tail */
137/* 0x0210: time_prev */
138 0x00000000,
139/* 0x0214: time_next */
140 0x00000000,
141/* 0x0218: fifo_queue */
142 0x00000000,
143 0x00000000,
144 0x00000000,
145 0x00000000,
146 0x00000000,
147 0x00000000,
148 0x00000000,
149 0x00000000,
150 0x00000000,
151 0x00000000,
152 0x00000000,
153 0x00000000,
154 0x00000000,
155 0x00000000,
156 0x00000000,
157 0x00000000,
158 0x00000000,
159 0x00000000,
160 0x00000000,
161 0x00000000,
162 0x00000000,
163 0x00000000,
164 0x00000000,
165 0x00000000,
166 0x00000000,
167 0x00000000,
168 0x00000000,
169 0x00000000,
170 0x00000000,
171 0x00000000,
172 0x00000000,
173 0x00000000,
174/* 0x0298: rfifo_queue */
175 0x00000000,
176 0x00000000,
177 0x00000000,
178 0x00000000,
179 0x00000000,
180 0x00000000,
181 0x00000000,
182 0x00000000,
183 0x00000000,
184 0x00000000,
185 0x00000000,
186 0x00000000,
187 0x00000000,
188 0x00000000,
189 0x00000000,
190 0x00000000,
191 0x00000000,
192 0x00000000,
193 0x00000000,
194 0x00000000,
195 0x00000000,
196 0x00000000,
197 0x00000000,
198 0x00000000,
199 0x00000000,
200 0x00000000,
201 0x00000000,
202 0x00000000,
203 0x00000000,
204 0x00000000,
205 0x00000000,
206 0x00000000,
207/* 0x0318: memx_func_head */
208 0x00010000,
209 0x00000000,
210 0x0000046f,
211/* 0x0324: memx_func_next */
212 0x00000001,
213 0x00000000,
214 0x00000496,
215 0x00000002,
216 0x00000002,
217 0x000004b7,
218 0x00040003,
219 0x00000000,
220 0x000004df,
221 0x00010004,
222 0x00000000,
223 0x000004fc,
224/* 0x0354: memx_func_tail */
225/* 0x0354: memx_data_head */
226 0x00000000,
227 0x00000000,
228 0x00000000,
229 0x00000000,
230 0x00000000,
231 0x00000000,
232 0x00000000,
233 0x00000000,
234 0x00000000,
235 0x00000000,
236 0x00000000,
237 0x00000000,
238 0x00000000,
239 0x00000000,
240 0x00000000,
241 0x00000000,
242 0x00000000,
243 0x00000000,
244 0x00000000,
245 0x00000000,
246 0x00000000,
247 0x00000000,
248 0x00000000,
249 0x00000000,
250 0x00000000,
251 0x00000000,
252 0x00000000,
253 0x00000000,
254 0x00000000,
255 0x00000000,
256 0x00000000,
257 0x00000000,
258 0x00000000,
259 0x00000000,
260 0x00000000,
261 0x00000000,
262 0x00000000,
263 0x00000000,
264 0x00000000,
265 0x00000000,
266 0x00000000,
267 0x00000000,
268 0x00000000,
269 0x00000000,
270 0x00000000,
271 0x00000000,
272 0x00000000,
273 0x00000000,
274 0x00000000,
275 0x00000000,
276 0x00000000,
277 0x00000000,
278 0x00000000,
279 0x00000000,
280 0x00000000,
281 0x00000000,
282 0x00000000,
283 0x00000000,
284 0x00000000,
285 0x00000000,
286 0x00000000,
287 0x00000000,
288 0x00000000,
289 0x00000000,
290 0x00000000,
291 0x00000000,
292 0x00000000,
293 0x00000000,
294 0x00000000,
295 0x00000000,
296 0x00000000,
297 0x00000000,
298 0x00000000,
299 0x00000000,
300 0x00000000,
301 0x00000000,
302 0x00000000,
303 0x00000000,
304 0x00000000,
305 0x00000000,
306 0x00000000,
307 0x00000000,
308 0x00000000,
309 0x00000000,
310 0x00000000,
311 0x00000000,
312 0x00000000,
313 0x00000000,
314 0x00000000,
315 0x00000000,
316 0x00000000,
317 0x00000000,
318 0x00000000,
319 0x00000000,
320 0x00000000,
321 0x00000000,
322 0x00000000,
323 0x00000000,
324 0x00000000,
325 0x00000000,
326 0x00000000,
327 0x00000000,
328 0x00000000,
329 0x00000000,
330 0x00000000,
331 0x00000000,
332 0x00000000,
333 0x00000000,
334 0x00000000,
335 0x00000000,
336 0x00000000,
337 0x00000000,
338 0x00000000,
339 0x00000000,
340 0x00000000,
341 0x00000000,
342 0x00000000,
343 0x00000000,
344 0x00000000,
345 0x00000000,
346 0x00000000,
347 0x00000000,
348 0x00000000,
349 0x00000000,
350 0x00000000,
351 0x00000000,
352 0x00000000,
353 0x00000000,
354 0x00000000,
355 0x00000000,
356 0x00000000,
357 0x00000000,
358 0x00000000,
359 0x00000000,
360 0x00000000,
361 0x00000000,
362 0x00000000,
363 0x00000000,
364 0x00000000,
365 0x00000000,
366 0x00000000,
367 0x00000000,
368 0x00000000,
369 0x00000000,
370 0x00000000,
371 0x00000000,
372 0x00000000,
373 0x00000000,
374 0x00000000,
375 0x00000000,
376 0x00000000,
377 0x00000000,
378 0x00000000,
379 0x00000000,
380 0x00000000,
381 0x00000000,
382 0x00000000,
383 0x00000000,
384 0x00000000,
385 0x00000000,
386 0x00000000,
387 0x00000000,
388 0x00000000,
389 0x00000000,
390 0x00000000,
391 0x00000000,
392 0x00000000,
393 0x00000000,
394 0x00000000,
395 0x00000000,
396 0x00000000,
397 0x00000000,
398 0x00000000,
399 0x00000000,
400 0x00000000,
401 0x00000000,
402 0x00000000,
403 0x00000000,
404 0x00000000,
405 0x00000000,
406 0x00000000,
407 0x00000000,
408 0x00000000,
409 0x00000000,
410 0x00000000,
411 0x00000000,
412 0x00000000,
413 0x00000000,
414 0x00000000,
415 0x00000000,
416 0x00000000,
417 0x00000000,
418 0x00000000,
419 0x00000000,
420 0x00000000,
421 0x00000000,
422 0x00000000,
423 0x00000000,
424 0x00000000,
425 0x00000000,
426 0x00000000,
427 0x00000000,
428 0x00000000,
429 0x00000000,
430 0x00000000,
431 0x00000000,
432 0x00000000,
433 0x00000000,
434 0x00000000,
435 0x00000000,
436 0x00000000,
437 0x00000000,
438 0x00000000,
439 0x00000000,
440 0x00000000,
441 0x00000000,
442 0x00000000,
443 0x00000000,
444 0x00000000,
445 0x00000000,
446 0x00000000,
447 0x00000000,
448 0x00000000,
449 0x00000000,
450 0x00000000,
451 0x00000000,
452 0x00000000,
453 0x00000000,
454 0x00000000,
455 0x00000000,
456 0x00000000,
457 0x00000000,
458 0x00000000,
459 0x00000000,
460 0x00000000,
461 0x00000000,
462 0x00000000,
463 0x00000000,
464 0x00000000,
465 0x00000000,
466 0x00000000,
467 0x00000000,
468 0x00000000,
469 0x00000000,
470 0x00000000,
471 0x00000000,
472 0x00000000,
473 0x00000000,
474 0x00000000,
475 0x00000000,
476 0x00000000,
477 0x00000000,
478 0x00000000,
479 0x00000000,
480 0x00000000,
481 0x00000000,
482 0x00000000,
483 0x00000000,
484 0x00000000,
485 0x00000000,
486 0x00000000,
487 0x00000000,
488 0x00000000,
489 0x00000000,
490 0x00000000,
491 0x00000000,
492 0x00000000,
493 0x00000000,
494 0x00000000,
495 0x00000000,
496 0x00000000,
497 0x00000000,
498 0x00000000,
499 0x00000000,
500 0x00000000,
501 0x00000000,
502 0x00000000,
503 0x00000000,
504 0x00000000,
505 0x00000000,
506 0x00000000,
507 0x00000000,
508 0x00000000,
509 0x00000000,
510 0x00000000,
511 0x00000000,
512 0x00000000,
513 0x00000000,
514 0x00000000,
515 0x00000000,
516 0x00000000,
517 0x00000000,
518 0x00000000,
519 0x00000000,
520 0x00000000,
521 0x00000000,
522 0x00000000,
523 0x00000000,
524 0x00000000,
525 0x00000000,
526 0x00000000,
527 0x00000000,
528 0x00000000,
529 0x00000000,
530 0x00000000,
531 0x00000000,
532 0x00000000,
533 0x00000000,
534 0x00000000,
535 0x00000000,
536 0x00000000,
537 0x00000000,
538 0x00000000,
539 0x00000000,
540 0x00000000,
541 0x00000000,
542 0x00000000,
543 0x00000000,
544 0x00000000,
545 0x00000000,
546 0x00000000,
547 0x00000000,
548 0x00000000,
549 0x00000000,
550 0x00000000,
551 0x00000000,
552 0x00000000,
553 0x00000000,
554 0x00000000,
555 0x00000000,
556 0x00000000,
557 0x00000000,
558 0x00000000,
559 0x00000000,
560 0x00000000,
561 0x00000000,
562 0x00000000,
563 0x00000000,
564 0x00000000,
565 0x00000000,
566 0x00000000,
567 0x00000000,
568 0x00000000,
569 0x00000000,
570 0x00000000,
571 0x00000000,
572 0x00000000,
573 0x00000000,
574 0x00000000,
575 0x00000000,
576 0x00000000,
577 0x00000000,
578 0x00000000,
579 0x00000000,
580 0x00000000,
581 0x00000000,
582 0x00000000,
583 0x00000000,
584 0x00000000,
585 0x00000000,
586 0x00000000,
587 0x00000000,
588 0x00000000,
589 0x00000000,
590 0x00000000,
591 0x00000000,
592 0x00000000,
593 0x00000000,
594 0x00000000,
595 0x00000000,
596 0x00000000,
597 0x00000000,
598 0x00000000,
599 0x00000000,
600 0x00000000,
601 0x00000000,
602 0x00000000,
603 0x00000000,
604 0x00000000,
605 0x00000000,
606 0x00000000,
607 0x00000000,
608 0x00000000,
609 0x00000000,
610 0x00000000,
611 0x00000000,
612 0x00000000,
613 0x00000000,
614 0x00000000,
615 0x00000000,
616 0x00000000,
617 0x00000000,
618 0x00000000,
619 0x00000000,
620 0x00000000,
621 0x00000000,
622 0x00000000,
623 0x00000000,
624 0x00000000,
625 0x00000000,
626 0x00000000,
627 0x00000000,
628 0x00000000,
629 0x00000000,
630 0x00000000,
631 0x00000000,
632 0x00000000,
633 0x00000000,
634 0x00000000,
635 0x00000000,
636 0x00000000,
637 0x00000000,
638 0x00000000,
639 0x00000000,
640 0x00000000,
641 0x00000000,
642 0x00000000,
643 0x00000000,
644 0x00000000,
645 0x00000000,
646 0x00000000,
647 0x00000000,
648 0x00000000,
649 0x00000000,
650 0x00000000,
651 0x00000000,
652 0x00000000,
653 0x00000000,
654 0x00000000,
655 0x00000000,
656 0x00000000,
657 0x00000000,
658 0x00000000,
659 0x00000000,
660 0x00000000,
661 0x00000000,
662 0x00000000,
663 0x00000000,
664 0x00000000,
665 0x00000000,
666 0x00000000,
667 0x00000000,
668 0x00000000,
669 0x00000000,
670 0x00000000,
671 0x00000000,
672 0x00000000,
673 0x00000000,
674 0x00000000,
675 0x00000000,
676 0x00000000,
677 0x00000000,
678 0x00000000,
679 0x00000000,
680 0x00000000,
681 0x00000000,
682 0x00000000,
683 0x00000000,
684 0x00000000,
685 0x00000000,
686 0x00000000,
687 0x00000000,
688 0x00000000,
689 0x00000000,
690 0x00000000,
691 0x00000000,
692 0x00000000,
693 0x00000000,
694 0x00000000,
695 0x00000000,
696 0x00000000,
697 0x00000000,
698 0x00000000,
699 0x00000000,
700 0x00000000,
701 0x00000000,
702 0x00000000,
703 0x00000000,
704 0x00000000,
705 0x00000000,
706 0x00000000,
707 0x00000000,
708 0x00000000,
709 0x00000000,
710 0x00000000,
711 0x00000000,
712 0x00000000,
713 0x00000000,
714 0x00000000,
715 0x00000000,
716 0x00000000,
717 0x00000000,
718 0x00000000,
719 0x00000000,
720 0x00000000,
721 0x00000000,
722 0x00000000,
723 0x00000000,
724 0x00000000,
725 0x00000000,
726 0x00000000,
727 0x00000000,
728 0x00000000,
729 0x00000000,
730 0x00000000,
731 0x00000000,
732 0x00000000,
733 0x00000000,
734 0x00000000,
735 0x00000000,
736 0x00000000,
737 0x00000000,
738/* 0x0b54: memx_data_tail */
739 0x00000000,
740 0x00000000,
741 0x00000000,
742 0x00000000,
743 0x00000000,
744 0x00000000,
745 0x00000000,
746 0x00000000,
747 0x00000000,
748 0x00000000,
749 0x00000000,
750 0x00000000,
751 0x00000000,
752 0x00000000,
753 0x00000000,
754 0x00000000,
755 0x00000000,
756 0x00000000,
757 0x00000000,
758 0x00000000,
759 0x00000000,
760 0x00000000,
761 0x00000000,
762 0x00000000,
763 0x00000000,
764 0x00000000,
765 0x00000000,
766 0x00000000,
767 0x00000000,
768 0x00000000,
769 0x00000000,
770 0x00000000,
771 0x00000000,
772 0x00000000,
773 0x00000000,
774 0x00000000,
775 0x00000000,
776 0x00000000,
777 0x00000000,
778 0x00000000,
779 0x00000000,
780 0x00000000,
781 0x00000000,
782};
783
784uint32_t nvc0_pwr_code[] = {
785 0x030d0ef5,
786/* 0x0004: rd32 */
787 0x07a007f1,
788 0xd00604b6,
789 0x04bd000e,
790 0xf001e7f0,
791 0x07f101e3,
792 0x04b607ac,
793 0x000ed006,
794/* 0x0022: rd32_wait */
795 0xe7f104bd,
796 0xe4b607ac,
797 0x00eecf06,
798 0x7000e4f1,
799 0xf1f21bf4,
800 0xb607a4d7,
801 0xddcf06d4,
802/* 0x003f: wr32 */
803 0xf100f800,
804 0xb607a007,
805 0x0ed00604,
806 0xf104bd00,
807 0xb607a407,
808 0x0dd00604,
809 0xf004bd00,
810 0xe5f002e7,
811 0x01e3f0f0,
812 0x07ac07f1,
813 0xd00604b6,
814 0x04bd000e,
815/* 0x006c: wr32_wait */
816 0x07ace7f1,
817 0xcf06e4b6,
818 0xe4f100ee,
819 0x1bf47000,
820/* 0x007f: nsec */
821 0xf000f8f2,
822 0x84b62c87,
823 0x0088cf06,
824/* 0x0088: nsec_loop */
825 0xb62c97f0,
826 0x99cf0694,
827 0x0298bb00,
828 0xf4069eb8,
829 0x00f8f11e,
830/* 0x009c: wait */
831 0xb62c87f0,
832 0x88cf0684,
833/* 0x00a5: wait_loop */
834 0x02eeb900,
835 0xb90421f4,
836 0xadfd02da,
837 0x06acb804,
838 0xf0150bf4,
839 0x94b62c97,
840 0x0099cf06,
841 0xb80298bb,
842 0x1ef4069b,
843/* 0x00c9: wait_done */
844/* 0x00cb: intr_watchdog */
845 0x9800f8df,
846 0x96b003e9,
847 0x2a0bf400,
848 0xbb840a98,
849 0x1cf4029a,
850 0x01d7f00f,
851 0x025421f5,
852 0x0ef494bd,
853/* 0x00e9: intr_watchdog_next_time */
854 0x850a9815,
855 0xf400a6b0,
856 0x9ab8090b,
857 0x061cf406,
858/* 0x00f8: intr_watchdog_next_time_set */
859/* 0x00fb: intr_watchdog_next_proc */
860 0x80850980,
861 0xe0b603e9,
862 0x10e6b158,
863 0xc61bf402,
864/* 0x010a: intr */
865 0x00f900f8,
866 0x80f904bd,
867 0xa0f990f9,
868 0xc0f9b0f9,
869 0xe0f9d0f9,
870 0xf7f0f0f9,
871 0x0188fe00,
872 0x87f180f9,
873 0x84b605d0,
874 0x0088cf06,
875 0xf10180b6,
876 0xb605d007,
877 0x08d00604,
878 0xf004bd00,
879 0x84b60887,
880 0x0088cf06,
881 0xf40289c4,
882 0x0080230b,
883 0x58e7f085,
884 0x98cb21f4,
885 0x96b08509,
886 0x110bf400,
887 0xb63407f0,
888 0x09d00604,
889 0x8004bd00,
890/* 0x016e: intr_skip_watchdog */
891 0x89e48409,
892 0x0bf40800,
893 0x8897f148,
894 0x0694b606,
895 0xc40099cf,
896 0x0bf4029a,
897 0xc0c7f12c,
898 0x06c4b604,
899 0xf900cccf,
900 0x48e7f1c0,
901 0x53e3f14f,
902 0x00d7f054,
903 0x02b921f5,
904 0x07f1c0fc,
905 0x04b604c0,
906 0x000cd006,
907/* 0x01ae: intr_subintr_skip_fifo */
908 0x07f104bd,
909 0x04b60688,
910 0x0009d006,
911/* 0x01ba: intr_skip_subintr */
912 0x89c404bd,
913 0x070bf420,
914 0xffbfa4f1,
915/* 0x01c4: intr_skip_pause */
916 0xf44089c4,
917 0xa4f1070b,
918/* 0x01ce: intr_skip_user0 */
919 0x07f0ffbf,
920 0x0604b604,
921 0xbd0008d0,
922 0xfe80fc04,
923 0xf0fc0088,
924 0xd0fce0fc,
925 0xb0fcc0fc,
926 0x90fca0fc,
927 0x00fc80fc,
928 0xf80032f4,
929/* 0x01f5: timer */
930 0x1032f401,
931 0xb003f898,
932 0x1cf40086,
933 0x03fe8051,
934 0xb63807f0,
935 0x08d00604,
936 0xf004bd00,
937 0x84b60887,
938 0x0088cf06,
939 0xf40284f0,
940 0x87f0261b,
941 0x0684b634,
942 0xb80088cf,
943 0x0bf406e0,
944 0x06e8b809,
945/* 0x0233: timer_reset */
946 0xf01f1ef4,
947 0x04b63407,
948 0x000ed006,
949 0x0e8004bd,
950/* 0x0241: timer_enable */
951 0x0187f084,
952 0xb63807f0,
953 0x08d00604,
954/* 0x024f: timer_done */
955 0xf404bd00,
956 0x00f81031,
957/* 0x0254: send_proc */
958 0x90f980f9,
959 0x9805e898,
960 0x86f004e9,
961 0x0689b804,
962 0xc42a0bf4,
963 0x88940398,
964 0x1880b604,
965 0x98008ebb,
966 0x8a8000fa,
967 0x018d8000,
968 0x80028c80,
969 0x90b6038b,
970 0x0794f001,
971 0xf404e980,
972/* 0x028e: send_done */
973 0x90fc0231,
974 0x00f880fc,
975/* 0x0294: find */
976 0x87f080f9,
977 0x0131f458,
978/* 0x029c: find_loop */
979 0xb8008a98,
980 0x0bf406ae,
981 0x5880b610,
982 0x021086b1,
983 0xf4f01bf4,
984/* 0x02b2: find_done */
985 0x8eb90132,
986 0xf880fc02,
987/* 0x02b9: send */
988 0x9421f500,
989 0x9701f402,
990/* 0x02c2: recv */
991 0xe89800f8,
992 0x04e99805,
993 0xb80132f4,
994 0x0bf40689,
995 0x0389c43d,
996 0xf00180b6,
997 0xe8800784,
998 0x02ea9805,
999 0x8ffef0f9,
1000 0xb9f0f901,
1001 0x999402ef,
1002 0x00e9bb04,
1003 0x9818e0b6,
1004 0xec9803eb,
1005 0x01ed9802,
1006 0xf900ee98,
1007 0xfef0fca5,
1008 0x31f400f8,
1009/* 0x030b: recv_done */
1010 0xf8f0fc01,
1011/* 0x030d: init */
1012 0x0817f100,
1013 0x0614b601,
1014 0xe70011cf,
1015 0xb6010911,
1016 0x14fe0814,
1017 0xe017f100,
1018 0x0013f000,
1019 0xb61c07f0,
1020 0x01d00604,
1021 0xf004bd00,
1022 0x07f0ff17,
1023 0x0604b614,
1024 0xbd0001d0,
1025 0x0217f004,
1026 0x080015f1,
1027 0xb61007f0,
1028 0x01d00604,
1029 0xf104bd00,
1030 0xf0010a17,
1031 0x10fe0013,
1032 0x1031f400,
1033 0xf00117f0,
1034 0x04b63807,
1035 0x0001d006,
1036 0xf7f004bd,
1037/* 0x0371: init_proc */
1038 0x01f19858,
1039 0xf40016b0,
1040 0x15f9fa0b,
1041 0xf458f0b6,
1042/* 0x0382: host_send */
1043 0x17f1f20e,
1044 0x14b604b0,
1045 0x0011cf06,
1046 0x04a027f1,
1047 0xcf0624b6,
1048 0x12b80022,
1049 0x320bf406,
1050 0x94071ec4,
1051 0xe0b704ee,
1052 0xeb980218,
1053 0x02ec9803,
1054 0x9801ed98,
1055 0x21f500ee,
1056 0x10b602b9,
1057 0x0f1ec401,
1058 0x04b007f1,
1059 0xd00604b6,
1060 0x04bd0001,
1061/* 0x03cb: host_send_done */
1062 0xf8ba0ef4,
1063/* 0x03cd: host_recv */
1064 0x4917f100,
1065 0x5413f14e,
1066 0x06e1b852,
1067/* 0x03db: host_recv_wait */
1068 0xf1aa0bf4,
1069 0xb604cc17,
1070 0x11cf0614,
1071 0xc827f100,
1072 0x0624b604,
1073 0xf00022cf,
1074 0x12b80816,
1075 0xe60bf406,
1076 0xb60723c4,
1077 0x30b70434,
1078 0x3b800298,
1079 0x023c8003,
1080 0x80013d80,
1081 0x20b6003e,
1082 0x0f24f001,
1083 0x04c807f1,
1084 0xd00604b6,
1085 0x04bd0002,
1086 0xf04027f0,
1087 0x04b60007,
1088 0x0002d006,
1089 0x00f804bd,
1090/* 0x0430: host_init */
1091 0x008017f1,
1092 0xf11014b6,
1093 0xf1021815,
1094 0xb604d007,
1095 0x01d00604,
1096 0xf104bd00,
1097 0xb6008017,
1098 0x15f11014,
1099 0x07f10298,
1100 0x04b604dc,
1101 0x0001d006,
1102 0x17f004bd,
1103 0xc407f101,
1104 0x0604b604,
1105 0xbd0001d0,
1106/* 0x046f: memx_func_enter */
1107 0xf000f804,
1108 0x07f10467,
1109 0x04b607e0,
1110 0x0006d006,
1111/* 0x047e: memx_func_enter_wait */
1112 0x67f104bd,
1113 0x64b607c0,
1114 0x0066cf06,
1115 0xf40464f0,
1116 0x1698f30b,
1117 0x0410b600,
1118/* 0x0496: memx_func_leave */
1119 0x67f000f8,
1120 0xe407f104,
1121 0x0604b607,
1122 0xbd0006d0,
1123/* 0x04a5: memx_func_leave_wait */
1124 0xc067f104,
1125 0x0664b607,
1126 0xf00066cf,
1127 0x1bf40464,
1128/* 0x04b7: memx_func_wr32 */
1129 0x9800f8f3,
1130 0x15980016,
1131 0x0810b601,
1132 0x50f960f9,
1133 0xe0fcd0fc,
1134 0xf13f21f4,
1135 0xfd140003,
1136 0x05800506,
1137 0xb604bd00,
1138 0x1bf40242,
1139/* 0x04df: memx_func_wait */
1140 0xf000f8dd,
1141 0x84b62c87,
1142 0x0088cf06,
1143 0x98001e98,
1144 0x1c98011d,
1145 0x031b9802,
1146 0xf41010b6,
1147 0x00f89c21,
1148/* 0x04fc: memx_func_delay */
1149 0xb6001e98,
1150 0x21f40410,
1151/* 0x0507: memx_exec */
1152 0xf900f87f,
1153 0xb9d0f9e0,
1154 0xb2b902c1,
1155/* 0x0511: memx_exec_next */
1156 0x00139802,
1157 0x950410b6,
1158 0x30f01034,
1159 0xc835980c,
1160 0x12b855f9,
1161 0xec1ef406,
1162 0xe0fcd0fc,
1163 0x02b921f5,
1164/* 0x0532: memx_info */
1165 0xc7f100f8,
1166 0xb7f10354,
1167 0x21f50800,
1168 0x00f802b9,
1169/* 0x0540: memx_recv */
1170 0xf401d6b0,
1171 0xd6b0c40b,
1172 0xe90bf400,
1173/* 0x054e: memx_init */
1174 0x00f800f8,
1175/* 0x0550: perf_recv */
1176/* 0x0552: perf_init */
1177 0x00f800f8,
1178/* 0x0554: test_recv */
1179 0x05d817f1,
1180 0xcf0614b6,
1181 0x10b60011,
1182 0xd807f101,
1183 0x0604b605,
1184 0xbd0001d0,
1185 0x00e7f104,
1186 0x4fe3f1d9,
1187 0xf521f513,
1188/* 0x057b: test_init */
1189 0xf100f801,
1190 0xf50800e7,
1191 0xf801f521,
1192/* 0x0585: idle_recv */
1193/* 0x0587: idle */
1194 0xf400f800,
1195 0x17f10031,
1196 0x14b605d4,
1197 0x0011cf06,
1198 0xf10110b6,
1199 0xb605d407,
1200 0x01d00604,
1201/* 0x05a3: idle_loop */
1202 0xf004bd00,
1203 0x32f45817,
1204/* 0x05a9: idle_proc */
1205/* 0x05a9: idle_proc_exec */
1206 0xb910f902,
1207 0x21f5021e,
1208 0x10fc02c2,
1209 0xf40911f4,
1210 0x0ef40231,
1211/* 0x05bd: idle_proc_next */
1212 0x5810b6ef,
1213 0xf4061fb8,
1214 0x02f4e61b,
1215 0x0028f4dd,
1216 0x00bb0ef4,
1217 0x00000000,
1218 0x00000000,
1219 0x00000000,
1220 0x00000000,
1221 0x00000000,
1222 0x00000000,
1223 0x00000000,
1224 0x00000000,
1225 0x00000000,
1226 0x00000000,
1227 0x00000000,
1228 0x00000000,
1229};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc
new file mode 100644
index 000000000000..32d65ea254dd
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc
@@ -0,0 +1,63 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#define NVKM_PPWR_CHIPSET GF119
26
27//#define NVKM_FALCON_PC24
28#define NVKM_FALCON_UNSHIFTED_IO
29//#define NVKM_FALCON_MMIO_UAS
30//#define NVKM_FALCON_MMIO_TRAP
31
32#include "macros.fuc"
33
34.section #nvd0_pwr_data
35#define INCLUDE_PROC
36#include "kernel.fuc"
37#include "host.fuc"
38#include "memx.fuc"
39#include "perf.fuc"
40#include "test.fuc"
41#include "idle.fuc"
42#undef INCLUDE_PROC
43
44#define INCLUDE_DATA
45#include "kernel.fuc"
46#include "host.fuc"
47#include "memx.fuc"
48#include "perf.fuc"
49#include "test.fuc"
50#include "idle.fuc"
51#undef INCLUDE_DATA
52.align 256
53
54.section #nvd0_pwr_code
55#define INCLUDE_CODE
56#include "kernel.fuc"
57#include "host.fuc"
58#include "memx.fuc"
59#include "perf.fuc"
60#include "test.fuc"
61#include "idle.fuc"
62#undef INCLUDE_CODE
63.align 256
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc.h b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc.h
new file mode 100644
index 000000000000..ce65e2a4b789
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc.h
@@ -0,0 +1,1229 @@
1uint32_t nvd0_pwr_data[] = {
2/* 0x0000: proc_kern */
3 0x52544e49,
4 0x00000000,
5 0x00000000,
6 0x00000000,
7 0x00000000,
8 0x00000000,
9 0x00000000,
10 0x00000000,
11 0x00000000,
12 0x00000000,
13 0x00000000,
14 0x00000000,
15 0x00000000,
16 0x00000000,
17 0x00000000,
18 0x00000000,
19 0x00000000,
20 0x00000000,
21 0x00000000,
22 0x00000000,
23 0x00000000,
24 0x00000000,
25/* 0x0058: proc_list_head */
26 0x54534f48,
27 0x000003be,
28 0x00000367,
29 0x00000000,
30 0x00000000,
31 0x00000000,
32 0x00000000,
33 0x00000000,
34 0x00000000,
35 0x00000000,
36 0x00000000,
37 0x00000000,
38 0x00000000,
39 0x00000000,
40 0x00000000,
41 0x00000000,
42 0x00000000,
43 0x00000000,
44 0x00000000,
45 0x00000000,
46 0x00000000,
47 0x00000000,
48 0x584d454d,
49 0x000004c4,
50 0x000004b6,
51 0x00000000,
52 0x00000000,
53 0x00000000,
54 0x00000000,
55 0x00000000,
56 0x00000000,
57 0x00000000,
58 0x00000000,
59 0x00000000,
60 0x00000000,
61 0x00000000,
62 0x00000000,
63 0x00000000,
64 0x00000000,
65 0x00000000,
66 0x00000000,
67 0x00000000,
68 0x00000000,
69 0x00000000,
70 0x46524550,
71 0x000004c8,
72 0x000004c6,
73 0x00000000,
74 0x00000000,
75 0x00000000,
76 0x00000000,
77 0x00000000,
78 0x00000000,
79 0x00000000,
80 0x00000000,
81 0x00000000,
82 0x00000000,
83 0x00000000,
84 0x00000000,
85 0x00000000,
86 0x00000000,
87 0x00000000,
88 0x00000000,
89 0x00000000,
90 0x00000000,
91 0x00000000,
92 0x54534554,
93 0x000004eb,
94 0x000004ca,
95 0x00000000,
96 0x00000000,
97 0x00000000,
98 0x00000000,
99 0x00000000,
100 0x00000000,
101 0x00000000,
102 0x00000000,
103 0x00000000,
104 0x00000000,
105 0x00000000,
106 0x00000000,
107 0x00000000,
108 0x00000000,
109 0x00000000,
110 0x00000000,
111 0x00000000,
112 0x00000000,
113 0x00000000,
114 0x454c4449,
115 0x000004f7,
116 0x000004f5,
117 0x00000000,
118 0x00000000,
119 0x00000000,
120 0x00000000,
121 0x00000000,
122 0x00000000,
123 0x00000000,
124 0x00000000,
125 0x00000000,
126 0x00000000,
127 0x00000000,
128 0x00000000,
129 0x00000000,
130 0x00000000,
131 0x00000000,
132 0x00000000,
133 0x00000000,
134 0x00000000,
135 0x00000000,
136/* 0x0210: proc_list_tail */
137/* 0x0210: time_prev */
138 0x00000000,
139/* 0x0214: time_next */
140 0x00000000,
141/* 0x0218: fifo_queue */
142 0x00000000,
143 0x00000000,
144 0x00000000,
145 0x00000000,
146 0x00000000,
147 0x00000000,
148 0x00000000,
149 0x00000000,
150 0x00000000,
151 0x00000000,
152 0x00000000,
153 0x00000000,
154 0x00000000,
155 0x00000000,
156 0x00000000,
157 0x00000000,
158 0x00000000,
159 0x00000000,
160 0x00000000,
161 0x00000000,
162 0x00000000,
163 0x00000000,
164 0x00000000,
165 0x00000000,
166 0x00000000,
167 0x00000000,
168 0x00000000,
169 0x00000000,
170 0x00000000,
171 0x00000000,
172 0x00000000,
173 0x00000000,
174/* 0x0298: rfifo_queue */
175 0x00000000,
176 0x00000000,
177 0x00000000,
178 0x00000000,
179 0x00000000,
180 0x00000000,
181 0x00000000,
182 0x00000000,
183 0x00000000,
184 0x00000000,
185 0x00000000,
186 0x00000000,
187 0x00000000,
188 0x00000000,
189 0x00000000,
190 0x00000000,
191 0x00000000,
192 0x00000000,
193 0x00000000,
194 0x00000000,
195 0x00000000,
196 0x00000000,
197 0x00000000,
198 0x00000000,
199 0x00000000,
200 0x00000000,
201 0x00000000,
202 0x00000000,
203 0x00000000,
204 0x00000000,
205 0x00000000,
206 0x00000000,
207/* 0x0318: memx_func_head */
208 0x00010000,
209 0x00000000,
210 0x000003f4,
211/* 0x0324: memx_func_next */
212 0x00000001,
213 0x00000000,
214 0x00000415,
215 0x00000002,
216 0x00000002,
217 0x00000430,
218 0x00040003,
219 0x00000000,
220 0x00000458,
221 0x00010004,
222 0x00000000,
223 0x00000472,
224/* 0x0354: memx_func_tail */
225/* 0x0354: memx_data_head */
226 0x00000000,
227 0x00000000,
228 0x00000000,
229 0x00000000,
230 0x00000000,
231 0x00000000,
232 0x00000000,
233 0x00000000,
234 0x00000000,
235 0x00000000,
236 0x00000000,
237 0x00000000,
238 0x00000000,
239 0x00000000,
240 0x00000000,
241 0x00000000,
242 0x00000000,
243 0x00000000,
244 0x00000000,
245 0x00000000,
246 0x00000000,
247 0x00000000,
248 0x00000000,
249 0x00000000,
250 0x00000000,
251 0x00000000,
252 0x00000000,
253 0x00000000,
254 0x00000000,
255 0x00000000,
256 0x00000000,
257 0x00000000,
258 0x00000000,
259 0x00000000,
260 0x00000000,
261 0x00000000,
262 0x00000000,
263 0x00000000,
264 0x00000000,
265 0x00000000,
266 0x00000000,
267 0x00000000,
268 0x00000000,
269 0x00000000,
270 0x00000000,
271 0x00000000,
272 0x00000000,
273 0x00000000,
274 0x00000000,
275 0x00000000,
276 0x00000000,
277 0x00000000,
278 0x00000000,
279 0x00000000,
280 0x00000000,
281 0x00000000,
282 0x00000000,
283 0x00000000,
284 0x00000000,
285 0x00000000,
286 0x00000000,
287 0x00000000,
288 0x00000000,
289 0x00000000,
290 0x00000000,
291 0x00000000,
292 0x00000000,
293 0x00000000,
294 0x00000000,
295 0x00000000,
296 0x00000000,
297 0x00000000,
298 0x00000000,
299 0x00000000,
300 0x00000000,
301 0x00000000,
302 0x00000000,
303 0x00000000,
304 0x00000000,
305 0x00000000,
306 0x00000000,
307 0x00000000,
308 0x00000000,
309 0x00000000,
310 0x00000000,
311 0x00000000,
312 0x00000000,
313 0x00000000,
314 0x00000000,
315 0x00000000,
316 0x00000000,
317 0x00000000,
318 0x00000000,
319 0x00000000,
320 0x00000000,
321 0x00000000,
322 0x00000000,
323 0x00000000,
324 0x00000000,
325 0x00000000,
326 0x00000000,
327 0x00000000,
328 0x00000000,
329 0x00000000,
330 0x00000000,
331 0x00000000,
332 0x00000000,
333 0x00000000,
334 0x00000000,
335 0x00000000,
336 0x00000000,
337 0x00000000,
338 0x00000000,
339 0x00000000,
340 0x00000000,
341 0x00000000,
342 0x00000000,
343 0x00000000,
344 0x00000000,
345 0x00000000,
346 0x00000000,
347 0x00000000,
348 0x00000000,
349 0x00000000,
350 0x00000000,
351 0x00000000,
352 0x00000000,
353 0x00000000,
354 0x00000000,
355 0x00000000,
356 0x00000000,
357 0x00000000,
358 0x00000000,
359 0x00000000,
360 0x00000000,
361 0x00000000,
362 0x00000000,
363 0x00000000,
364 0x00000000,
365 0x00000000,
366 0x00000000,
367 0x00000000,
368 0x00000000,
369 0x00000000,
370 0x00000000,
371 0x00000000,
372 0x00000000,
373 0x00000000,
374 0x00000000,
375 0x00000000,
376 0x00000000,
377 0x00000000,
378 0x00000000,
379 0x00000000,
380 0x00000000,
381 0x00000000,
382 0x00000000,
383 0x00000000,
384 0x00000000,
385 0x00000000,
386 0x00000000,
387 0x00000000,
388 0x00000000,
389 0x00000000,
390 0x00000000,
391 0x00000000,
392 0x00000000,
393 0x00000000,
394 0x00000000,
395 0x00000000,
396 0x00000000,
397 0x00000000,
398 0x00000000,
399 0x00000000,
400 0x00000000,
401 0x00000000,
402 0x00000000,
403 0x00000000,
404 0x00000000,
405 0x00000000,
406 0x00000000,
407 0x00000000,
408 0x00000000,
409 0x00000000,
410 0x00000000,
411 0x00000000,
412 0x00000000,
413 0x00000000,
414 0x00000000,
415 0x00000000,
416 0x00000000,
417 0x00000000,
418 0x00000000,
419 0x00000000,
420 0x00000000,
421 0x00000000,
422 0x00000000,
423 0x00000000,
424 0x00000000,
425 0x00000000,
426 0x00000000,
427 0x00000000,
428 0x00000000,
429 0x00000000,
430 0x00000000,
431 0x00000000,
432 0x00000000,
433 0x00000000,
434 0x00000000,
435 0x00000000,
436 0x00000000,
437 0x00000000,
438 0x00000000,
439 0x00000000,
440 0x00000000,
441 0x00000000,
442 0x00000000,
443 0x00000000,
444 0x00000000,
445 0x00000000,
446 0x00000000,
447 0x00000000,
448 0x00000000,
449 0x00000000,
450 0x00000000,
451 0x00000000,
452 0x00000000,
453 0x00000000,
454 0x00000000,
455 0x00000000,
456 0x00000000,
457 0x00000000,
458 0x00000000,
459 0x00000000,
460 0x00000000,
461 0x00000000,
462 0x00000000,
463 0x00000000,
464 0x00000000,
465 0x00000000,
466 0x00000000,
467 0x00000000,
468 0x00000000,
469 0x00000000,
470 0x00000000,
471 0x00000000,
472 0x00000000,
473 0x00000000,
474 0x00000000,
475 0x00000000,
476 0x00000000,
477 0x00000000,
478 0x00000000,
479 0x00000000,
480 0x00000000,
481 0x00000000,
482 0x00000000,
483 0x00000000,
484 0x00000000,
485 0x00000000,
486 0x00000000,
487 0x00000000,
488 0x00000000,
489 0x00000000,
490 0x00000000,
491 0x00000000,
492 0x00000000,
493 0x00000000,
494 0x00000000,
495 0x00000000,
496 0x00000000,
497 0x00000000,
498 0x00000000,
499 0x00000000,
500 0x00000000,
501 0x00000000,
502 0x00000000,
503 0x00000000,
504 0x00000000,
505 0x00000000,
506 0x00000000,
507 0x00000000,
508 0x00000000,
509 0x00000000,
510 0x00000000,
511 0x00000000,
512 0x00000000,
513 0x00000000,
514 0x00000000,
515 0x00000000,
516 0x00000000,
517 0x00000000,
518 0x00000000,
519 0x00000000,
520 0x00000000,
521 0x00000000,
522 0x00000000,
523 0x00000000,
524 0x00000000,
525 0x00000000,
526 0x00000000,
527 0x00000000,
528 0x00000000,
529 0x00000000,
530 0x00000000,
531 0x00000000,
532 0x00000000,
533 0x00000000,
534 0x00000000,
535 0x00000000,
536 0x00000000,
537 0x00000000,
538 0x00000000,
539 0x00000000,
540 0x00000000,
541 0x00000000,
542 0x00000000,
543 0x00000000,
544 0x00000000,
545 0x00000000,
546 0x00000000,
547 0x00000000,
548 0x00000000,
549 0x00000000,
550 0x00000000,
551 0x00000000,
552 0x00000000,
553 0x00000000,
554 0x00000000,
555 0x00000000,
556 0x00000000,
557 0x00000000,
558 0x00000000,
559 0x00000000,
560 0x00000000,
561 0x00000000,
562 0x00000000,
563 0x00000000,
564 0x00000000,
565 0x00000000,
566 0x00000000,
567 0x00000000,
568 0x00000000,
569 0x00000000,
570 0x00000000,
571 0x00000000,
572 0x00000000,
573 0x00000000,
574 0x00000000,
575 0x00000000,
576 0x00000000,
577 0x00000000,
578 0x00000000,
579 0x00000000,
580 0x00000000,
581 0x00000000,
582 0x00000000,
583 0x00000000,
584 0x00000000,
585 0x00000000,
586 0x00000000,
587 0x00000000,
588 0x00000000,
589 0x00000000,
590 0x00000000,
591 0x00000000,
592 0x00000000,
593 0x00000000,
594 0x00000000,
595 0x00000000,
596 0x00000000,
597 0x00000000,
598 0x00000000,
599 0x00000000,
600 0x00000000,
601 0x00000000,
602 0x00000000,
603 0x00000000,
604 0x00000000,
605 0x00000000,
606 0x00000000,
607 0x00000000,
608 0x00000000,
609 0x00000000,
610 0x00000000,
611 0x00000000,
612 0x00000000,
613 0x00000000,
614 0x00000000,
615 0x00000000,
616 0x00000000,
617 0x00000000,
618 0x00000000,
619 0x00000000,
620 0x00000000,
621 0x00000000,
622 0x00000000,
623 0x00000000,
624 0x00000000,
625 0x00000000,
626 0x00000000,
627 0x00000000,
628 0x00000000,
629 0x00000000,
630 0x00000000,
631 0x00000000,
632 0x00000000,
633 0x00000000,
634 0x00000000,
635 0x00000000,
636 0x00000000,
637 0x00000000,
638 0x00000000,
639 0x00000000,
640 0x00000000,
641 0x00000000,
642 0x00000000,
643 0x00000000,
644 0x00000000,
645 0x00000000,
646 0x00000000,
647 0x00000000,
648 0x00000000,
649 0x00000000,
650 0x00000000,
651 0x00000000,
652 0x00000000,
653 0x00000000,
654 0x00000000,
655 0x00000000,
656 0x00000000,
657 0x00000000,
658 0x00000000,
659 0x00000000,
660 0x00000000,
661 0x00000000,
662 0x00000000,
663 0x00000000,
664 0x00000000,
665 0x00000000,
666 0x00000000,
667 0x00000000,
668 0x00000000,
669 0x00000000,
670 0x00000000,
671 0x00000000,
672 0x00000000,
673 0x00000000,
674 0x00000000,
675 0x00000000,
676 0x00000000,
677 0x00000000,
678 0x00000000,
679 0x00000000,
680 0x00000000,
681 0x00000000,
682 0x00000000,
683 0x00000000,
684 0x00000000,
685 0x00000000,
686 0x00000000,
687 0x00000000,
688 0x00000000,
689 0x00000000,
690 0x00000000,
691 0x00000000,
692 0x00000000,
693 0x00000000,
694 0x00000000,
695 0x00000000,
696 0x00000000,
697 0x00000000,
698 0x00000000,
699 0x00000000,
700 0x00000000,
701 0x00000000,
702 0x00000000,
703 0x00000000,
704 0x00000000,
705 0x00000000,
706 0x00000000,
707 0x00000000,
708 0x00000000,
709 0x00000000,
710 0x00000000,
711 0x00000000,
712 0x00000000,
713 0x00000000,
714 0x00000000,
715 0x00000000,
716 0x00000000,
717 0x00000000,
718 0x00000000,
719 0x00000000,
720 0x00000000,
721 0x00000000,
722 0x00000000,
723 0x00000000,
724 0x00000000,
725 0x00000000,
726 0x00000000,
727 0x00000000,
728 0x00000000,
729 0x00000000,
730 0x00000000,
731 0x00000000,
732 0x00000000,
733 0x00000000,
734 0x00000000,
735 0x00000000,
736 0x00000000,
737 0x00000000,
738/* 0x0b54: memx_data_tail */
739 0x00000000,
740 0x00000000,
741 0x00000000,
742 0x00000000,
743 0x00000000,
744 0x00000000,
745 0x00000000,
746 0x00000000,
747 0x00000000,
748 0x00000000,
749 0x00000000,
750 0x00000000,
751 0x00000000,
752 0x00000000,
753 0x00000000,
754 0x00000000,
755 0x00000000,
756 0x00000000,
757 0x00000000,
758 0x00000000,
759 0x00000000,
760 0x00000000,
761 0x00000000,
762 0x00000000,
763 0x00000000,
764 0x00000000,
765 0x00000000,
766 0x00000000,
767 0x00000000,
768 0x00000000,
769 0x00000000,
770 0x00000000,
771 0x00000000,
772 0x00000000,
773 0x00000000,
774 0x00000000,
775 0x00000000,
776 0x00000000,
777 0x00000000,
778 0x00000000,
779 0x00000000,
780 0x00000000,
781 0x00000000,
782};
783
784uint32_t nvd0_pwr_code[] = {
785 0x02bf0ef5,
786/* 0x0004: rd32 */
787 0x07a007f1,
788 0xbd000ed0,
789 0x01e7f004,
790 0xf101e3f0,
791 0xd007ac07,
792 0x04bd000e,
793/* 0x001c: rd32_wait */
794 0x07ace7f1,
795 0xf100eecf,
796 0xf47000e4,
797 0xd7f1f51b,
798 0xddcf07a4,
799/* 0x0033: wr32 */
800 0xf100f800,
801 0xd007a007,
802 0x04bd000e,
803 0x07a407f1,
804 0xbd000dd0,
805 0x02e7f004,
806 0xf0f0e5f0,
807 0x07f101e3,
808 0x0ed007ac,
809/* 0x0057: wr32_wait */
810 0xf104bd00,
811 0xcf07ace7,
812 0xe4f100ee,
813 0x1bf47000,
814/* 0x0067: nsec */
815 0xf000f8f5,
816 0x88cf2c87,
817/* 0x006d: nsec_loop */
818 0x2c97f000,
819 0xbb0099cf,
820 0x9eb80298,
821 0xf41ef406,
822/* 0x007e: wait */
823 0x87f000f8,
824 0x0088cf2c,
825/* 0x0084: wait_loop */
826 0xf402eeb9,
827 0xdab90421,
828 0x04adfd02,
829 0xf406acb8,
830 0x97f0120b,
831 0x0099cf2c,
832 0xb80298bb,
833 0x1ef4069b,
834/* 0x00a5: wait_done */
835/* 0x00a7: intr_watchdog */
836 0x9800f8e2,
837 0x96b003e9,
838 0x2a0bf400,
839 0xbb840a98,
840 0x1cf4029a,
841 0x01d7f00f,
842 0x020621f5,
843 0x0ef494bd,
844/* 0x00c5: intr_watchdog_next_time */
845 0x850a9815,
846 0xf400a6b0,
847 0x9ab8090b,
848 0x061cf406,
849/* 0x00d4: intr_watchdog_next_time_set */
850/* 0x00d7: intr_watchdog_next_proc */
851 0x80850980,
852 0xe0b603e9,
853 0x10e6b158,
854 0xc61bf402,
855/* 0x00e6: intr */
856 0x00f900f8,
857 0x80f904bd,
858 0xa0f990f9,
859 0xc0f9b0f9,
860 0xe0f9d0f9,
861 0xf7f0f0f9,
862 0x0188fe00,
863 0x87f180f9,
864 0x88cf05d0,
865 0x0180b600,
866 0x05d007f1,
867 0xbd0008d0,
868 0x0887f004,
869 0xc40088cf,
870 0x0bf40289,
871 0x85008020,
872 0xf458e7f0,
873 0x0998a721,
874 0x0096b085,
875 0xf00e0bf4,
876 0x09d03407,
877 0x8004bd00,
878/* 0x013e: intr_skip_watchdog */
879 0x89e48409,
880 0x0bf40800,
881 0x8897f13c,
882 0x0099cf06,
883 0xf4029ac4,
884 0xc7f1260b,
885 0xcccf04c0,
886 0xf1c0f900,
887 0xf14f48e7,
888 0xf05453e3,
889 0x21f500d7,
890 0xc0fc026b,
891 0x04c007f1,
892 0xbd000cd0,
893/* 0x0175: intr_subintr_skip_fifo */
894 0x8807f104,
895 0x0009d006,
896/* 0x017e: intr_skip_subintr */
897 0x89c404bd,
898 0x070bf420,
899 0xffbfa4f1,
900/* 0x0188: intr_skip_pause */
901 0xf44089c4,
902 0xa4f1070b,
903/* 0x0192: intr_skip_user0 */
904 0x07f0ffbf,
905 0x0008d004,
906 0x80fc04bd,
907 0xfc0088fe,
908 0xfce0fcf0,
909 0xfcc0fcd0,
910 0xfca0fcb0,
911 0xfc80fc90,
912 0x0032f400,
913/* 0x01b6: timer */
914 0x32f401f8,
915 0x03f89810,
916 0xf40086b0,
917 0xfe80421c,
918 0x3807f003,
919 0xbd0008d0,
920 0x0887f004,
921 0xf00088cf,
922 0x1bf40284,
923 0x3487f020,
924 0xb80088cf,
925 0x0bf406e0,
926 0x06e8b809,
927/* 0x01eb: timer_reset */
928 0xf0191ef4,
929 0x0ed03407,
930 0x8004bd00,
931/* 0x01f6: timer_enable */
932 0x87f0840e,
933 0x3807f001,
934 0xbd0008d0,
935/* 0x0201: timer_done */
936 0x1031f404,
937/* 0x0206: send_proc */
938 0x80f900f8,
939 0xe89890f9,
940 0x04e99805,
941 0xb80486f0,
942 0x0bf40689,
943 0x0398c42a,
944 0xb6048894,
945 0x8ebb1880,
946 0x00fa9800,
947 0x80008a80,
948 0x8c80018d,
949 0x038b8002,
950 0xf00190b6,
951 0xe9800794,
952 0x0231f404,
953/* 0x0240: send_done */
954 0x80fc90fc,
955/* 0x0246: find */
956 0x80f900f8,
957 0xf45887f0,
958/* 0x024e: find_loop */
959 0x8a980131,
960 0x06aeb800,
961 0xb6100bf4,
962 0x86b15880,
963 0x1bf40210,
964 0x0132f4f0,
965/* 0x0264: find_done */
966 0xfc028eb9,
967/* 0x026b: send */
968 0xf500f880,
969 0xf4024621,
970 0x00f89701,
971/* 0x0274: recv */
972 0x9805e898,
973 0x32f404e9,
974 0x0689b801,
975 0xc43d0bf4,
976 0x80b60389,
977 0x0784f001,
978 0x9805e880,
979 0xf0f902ea,
980 0xf9018ffe,
981 0x02efb9f0,
982 0xbb049994,
983 0xe0b600e9,
984 0x03eb9818,
985 0x9802ec98,
986 0xee9801ed,
987 0xfca5f900,
988 0x00f8fef0,
989 0xfc0131f4,
990/* 0x02bd: recv_done */
991/* 0x02bf: init */
992 0xf100f8f0,
993 0xcf010817,
994 0x11e70011,
995 0x14b60109,
996 0x0014fe08,
997 0x00e017f1,
998 0xf00013f0,
999 0x01d01c07,
1000 0xf004bd00,
1001 0x07f0ff17,
1002 0x0001d014,
1003 0x17f004bd,
1004 0x0015f102,
1005 0x1007f008,
1006 0xbd0001d0,
1007 0xe617f104,
1008 0x0013f000,
1009 0xf40010fe,
1010 0x17f01031,
1011 0x3807f001,
1012 0xbd0001d0,
1013 0x58f7f004,
1014/* 0x0314: init_proc */
1015 0xb001f198,
1016 0x0bf40016,
1017 0xb615f9fa,
1018 0x0ef458f0,
1019/* 0x0325: host_send */
1020 0xb017f1f2,
1021 0x0011cf04,
1022 0x04a027f1,
1023 0xb80022cf,
1024 0x0bf40612,
1025 0x071ec42f,
1026 0xb704ee94,
1027 0x980218e0,
1028 0xec9803eb,
1029 0x01ed9802,
1030 0xf500ee98,
1031 0xb6026b21,
1032 0x1ec40110,
1033 0xb007f10f,
1034 0x0001d004,
1035 0x0ef404bd,
1036/* 0x0365: host_send_done */
1037/* 0x0367: host_recv */
1038 0xf100f8c3,
1039 0xf14e4917,
1040 0xb8525413,
1041 0x0bf406e1,
1042/* 0x0375: host_recv_wait */
1043 0xcc17f1b3,
1044 0x0011cf04,
1045 0x04c827f1,
1046 0xf00022cf,
1047 0x12b80816,
1048 0xec0bf406,
1049 0xb60723c4,
1050 0x30b70434,
1051 0x3b800298,
1052 0x023c8003,
1053 0x80013d80,
1054 0x20b6003e,
1055 0x0f24f001,
1056 0x04c807f1,
1057 0xbd0002d0,
1058 0x4027f004,
1059 0xd00007f0,
1060 0x04bd0002,
1061/* 0x03be: host_init */
1062 0x17f100f8,
1063 0x14b60080,
1064 0x1815f110,
1065 0xd007f102,
1066 0x0001d004,
1067 0x17f104bd,
1068 0x14b60080,
1069 0x9815f110,
1070 0xdc07f102,
1071 0x0001d004,
1072 0x17f004bd,
1073 0xc407f101,
1074 0x0001d004,
1075 0x00f804bd,
1076/* 0x03f4: memx_func_enter */
1077 0xf10467f0,
1078 0xd007e007,
1079 0x04bd0006,
1080/* 0x0400: memx_func_enter_wait */
1081 0x07c067f1,
1082 0xf00066cf,
1083 0x0bf40464,
1084 0x001698f6,
1085 0xf80410b6,
1086/* 0x0415: memx_func_leave */
1087 0x0467f000,
1088 0x07e407f1,
1089 0xbd0006d0,
1090/* 0x0421: memx_func_leave_wait */
1091 0xc067f104,
1092 0x0066cf07,
1093 0xf40464f0,
1094 0x00f8f61b,
1095/* 0x0430: memx_func_wr32 */
1096 0x98001698,
1097 0x10b60115,
1098 0xf960f908,
1099 0xfcd0fc50,
1100 0x3321f4e0,
1101 0x140003f1,
1102 0x800506fd,
1103 0x04bd0005,
1104 0xf40242b6,
1105 0x00f8dd1b,
1106/* 0x0458: memx_func_wait */
1107 0xcf2c87f0,
1108 0x1e980088,
1109 0x011d9800,
1110 0x98021c98,
1111 0x10b6031b,
1112 0x7e21f410,
1113/* 0x0472: memx_func_delay */
1114 0x1e9800f8,
1115 0x0410b600,
1116 0xf86721f4,
1117/* 0x047d: memx_exec */
1118 0xf9e0f900,
1119 0x02c1b9d0,
1120/* 0x0487: memx_exec_next */
1121 0x9802b2b9,
1122 0x10b60013,
1123 0x10349504,
1124 0x980c30f0,
1125 0x55f9c835,
1126 0xf40612b8,
1127 0xd0fcec1e,
1128 0x21f5e0fc,
1129 0x00f8026b,
1130/* 0x04a8: memx_info */
1131 0x0354c7f1,
1132 0x0800b7f1,
1133 0x026b21f5,
1134/* 0x04b6: memx_recv */
1135 0xd6b000f8,
1136 0xc40bf401,
1137 0xf400d6b0,
1138 0x00f8e90b,
1139/* 0x04c4: memx_init */
1140/* 0x04c6: perf_recv */
1141 0x00f800f8,
1142/* 0x04c8: perf_init */
1143/* 0x04ca: test_recv */
1144 0x17f100f8,
1145 0x11cf05d8,
1146 0x0110b600,
1147 0x05d807f1,
1148 0xbd0001d0,
1149 0x00e7f104,
1150 0x4fe3f1d9,
1151 0xb621f513,
1152/* 0x04eb: test_init */
1153 0xf100f801,
1154 0xf50800e7,
1155 0xf801b621,
1156/* 0x04f5: idle_recv */
1157/* 0x04f7: idle */
1158 0xf400f800,
1159 0x17f10031,
1160 0x11cf05d4,
1161 0x0110b600,
1162 0x05d407f1,
1163 0xbd0001d0,
1164/* 0x050d: idle_loop */
1165 0x5817f004,
1166/* 0x0513: idle_proc */
1167/* 0x0513: idle_proc_exec */
1168 0xf90232f4,
1169 0x021eb910,
1170 0x027421f5,
1171 0x11f410fc,
1172 0x0231f409,
1173/* 0x0527: idle_proc_next */
1174 0xb6ef0ef4,
1175 0x1fb85810,
1176 0xe61bf406,
1177 0xf4dd02f4,
1178 0x0ef40028,
1179 0x000000c1,
1180 0x00000000,
1181 0x00000000,
1182 0x00000000,
1183 0x00000000,
1184 0x00000000,
1185 0x00000000,
1186 0x00000000,
1187 0x00000000,
1188 0x00000000,
1189 0x00000000,
1190 0x00000000,
1191 0x00000000,
1192 0x00000000,
1193 0x00000000,
1194 0x00000000,
1195 0x00000000,
1196 0x00000000,
1197 0x00000000,
1198 0x00000000,
1199 0x00000000,
1200 0x00000000,
1201 0x00000000,
1202 0x00000000,
1203 0x00000000,
1204 0x00000000,
1205 0x00000000,
1206 0x00000000,
1207 0x00000000,
1208 0x00000000,
1209 0x00000000,
1210 0x00000000,
1211 0x00000000,
1212 0x00000000,
1213 0x00000000,
1214 0x00000000,
1215 0x00000000,
1216 0x00000000,
1217 0x00000000,
1218 0x00000000,
1219 0x00000000,
1220 0x00000000,
1221 0x00000000,
1222 0x00000000,
1223 0x00000000,
1224 0x00000000,
1225 0x00000000,
1226 0x00000000,
1227 0x00000000,
1228 0x00000000,
1229};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/os.h b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/os.h
new file mode 100644
index 000000000000..5fb0cccc6c64
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/os.h
@@ -0,0 +1,27 @@
1#ifndef __NVKM_PWR_OS_H__
2#define __NVKM_PWR_OS_H__
3
4/* Process names */
5#define PROC_KERN 0x52544e49
6#define PROC_IDLE 0x454c4449
7#define PROC_HOST 0x54534f48
8#define PROC_MEMX 0x584d454d
9#define PROC_PERF 0x46524550
10#define PROC_TEST 0x54534554
11
12/* KERN: message identifiers */
13#define KMSG_FIFO 0x00000000
14#define KMSG_ALARM 0x00000001
15
16/* MEMX: message identifiers */
17#define MEMX_MSG_INFO 0
18#define MEMX_MSG_EXEC 1
19
20/* MEMX: script opcode definitions */
21#define MEMX_ENTER 0
22#define MEMX_LEAVE 1
23#define MEMX_WR32 2
24#define MEMX_WAIT 3
25#define MEMX_DELAY 4
26
27#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/perf.fuc b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/perf.fuc
new file mode 100644
index 000000000000..38eadf705cbf
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/perf.fuc
@@ -0,0 +1,57 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#ifdef INCLUDE_PROC
26process(PROC_PERF, #perf_init, #perf_recv)
27#endif
28
29/******************************************************************************
30 * PERF data segment
31 *****************************************************************************/
32#ifdef INCLUDE_DATA
33#endif
34
35/******************************************************************************
36 * PERF code segment
37 *****************************************************************************/
38#ifdef INCLUDE_CODE
39
40// description
41//
42// $r15 - current (perf)
43// $r14 - sender process name
44// $r13 - message
45// $r12 - data0
46// $r11 - data1
47// $r0 - zero
48perf_recv:
49 ret
50
51// description
52//
53// $r15 - current (perf)
54// $r0 - zero
55perf_init:
56 ret
57#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/test.fuc b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/test.fuc
new file mode 100644
index 000000000000..0c3a71bf5459
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/test.fuc
@@ -0,0 +1,64 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#ifdef INCLUDE_PROC
26process(PROC_TEST, #test_init, #test_recv)
27#endif
28
29/******************************************************************************
30 * TEST data segment
31 *****************************************************************************/
32#ifdef INCLUDE_DATA
33#endif
34
35/******************************************************************************
36 * TEST code segment
37 *****************************************************************************/
38#ifdef INCLUDE_CODE
39// description
40//
41// $r15 - current (test)
42// $r14 - sender process name
43// $r13 - message
44// $r12 - data0
45// $r11 - data1
46// $r0 - zero
47test_recv:
48 nv_iord($r1, NV_PPWR_DSCRATCH(2))
49 add b32 $r1 1
50 nv_iowr(NV_PPWR_DSCRATCH(2), $r1)
51 mov $r14 -0x2700 /* 0xd900, envyas grrr! */
52 sethi $r14 0x134f0000
53 call(timer)
54 ret
55
56// description
57//
58// $r15 - current (test)
59// $r0 - zero
60test_init:
61 mov $r14 0x800
62 call(timer)
63 ret
64#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/memx.c b/drivers/gpu/drm/nouveau/core/subdev/pwr/memx.c
new file mode 100644
index 000000000000..03de3107d29f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/memx.c
@@ -0,0 +1,121 @@
1#ifndef __NVKM_PWR_MEMX_H__
2#define __NVKM_PWR_MEMX_H__
3
4#include <subdev/pwr.h>
5#include <subdev/pwr/fuc/os.h>
6
7struct nouveau_memx {
8 struct nouveau_pwr *ppwr;
9 u32 base;
10 u32 size;
11 struct {
12 u32 mthd;
13 u32 size;
14 u32 data[64];
15 } c;
16};
17
18static void
19memx_out(struct nouveau_memx *memx)
20{
21 struct nouveau_pwr *ppwr = memx->ppwr;
22 int i;
23
24 if (memx->c.size) {
25 nv_wr32(ppwr, 0x10a1c4, (memx->c.size << 16) | memx->c.mthd);
26 for (i = 0; i < memx->c.size; i++)
27 nv_wr32(ppwr, 0x10a1c4, memx->c.data[i]);
28 memx->c.size = 0;
29 }
30}
31
32static void
33memx_cmd(struct nouveau_memx *memx, u32 mthd, u32 size, u32 data[])
34{
35 if ((memx->c.size + size >= ARRAY_SIZE(memx->c.data)) ||
36 (memx->c.size && memx->c.mthd != mthd))
37 memx_out(memx);
38 memcpy(&memx->c.data[memx->c.size], data, size * sizeof(data[0]));
39 memx->c.size += size;
40 memx->c.mthd = mthd;
41}
42
43int
44nouveau_memx_init(struct nouveau_pwr *ppwr, struct nouveau_memx **pmemx)
45{
46 struct nouveau_memx *memx;
47 u32 reply[2];
48 int ret;
49
50 ret = ppwr->message(ppwr, reply, PROC_MEMX, MEMX_MSG_INFO, 0, 0);
51 if (ret)
52 return ret;
53
54 memx = *pmemx = kzalloc(sizeof(*memx), GFP_KERNEL);
55 if (!memx)
56 return -ENOMEM;
57 memx->ppwr = ppwr;
58 memx->base = reply[0];
59 memx->size = reply[1];
60
61 /* acquire data segment access */
62 do {
63 nv_wr32(ppwr, 0x10a580, 0x00000003);
64 } while (nv_rd32(ppwr, 0x10a580) != 0x00000003);
65 nv_wr32(ppwr, 0x10a1c0, 0x01000000 | memx->base);
66 nv_wr32(ppwr, 0x10a1c4, 0x00010000 | MEMX_ENTER);
67 nv_wr32(ppwr, 0x10a1c4, 0x00000000);
68 return 0;
69}
70
71int
72nouveau_memx_fini(struct nouveau_memx **pmemx, bool exec)
73{
74 struct nouveau_memx *memx = *pmemx;
75 struct nouveau_pwr *ppwr = memx->ppwr;
76 u32 finish, reply[2];
77
78 /* flush the cache... */
79 memx_out(memx);
80
81 /* release data segment access */
82 nv_wr32(ppwr, 0x10a1c4, 0x00000000 | MEMX_LEAVE);
83 finish = nv_rd32(ppwr, 0x10a1c0) & 0x00ffffff;
84 nv_wr32(ppwr, 0x10a580, 0x00000000);
85
86 /* call MEMX process to execute the script, and wait for reply */
87 if (exec) {
88 ppwr->message(ppwr, reply, PROC_MEMX, MEMX_MSG_EXEC,
89 memx->base, finish);
90 }
91
92 kfree(memx);
93 return 0;
94}
95
96void
97nouveau_memx_wr32(struct nouveau_memx *memx, u32 addr, u32 data)
98{
99 nv_debug(memx->ppwr, "R[%06x] = 0x%08x\n", addr, data);
100 memx_cmd(memx, MEMX_WR32, 2, (u32[]){ addr, data });
101}
102
103void
104nouveau_memx_wait(struct nouveau_memx *memx,
105 u32 addr, u32 mask, u32 data, u32 nsec)
106{
107 nv_debug(memx->ppwr, "R[%06x] & 0x%08x == 0x%08x, %d us\n",
108 addr, mask, data, nsec);
109 memx_cmd(memx, MEMX_WAIT, 4, (u32[]){ addr, ~mask, data, nsec });
110 memx_out(memx); /* fuc can't handle multiple */
111}
112
113void
114nouveau_memx_nsec(struct nouveau_memx *memx, u32 nsec)
115{
116 nv_debug(memx->ppwr, " DELAY = %d ns\n", nsec);
117 memx_cmd(memx, MEMX_DELAY, 1, (u32[]){ nsec });
118 memx_out(memx); /* fuc can't handle multiple */
119}
120
121#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/nv108.c b/drivers/gpu/drm/nouveau/core/subdev/pwr/nv108.c
new file mode 100644
index 000000000000..52c85414866a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/nv108.c
@@ -0,0 +1,62 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/pwr.h>
26
27#include "fuc/nv108.fuc.h"
28
29struct nv108_pwr_priv {
30 struct nouveau_pwr base;
31};
32
33static int
34nv108_pwr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
35 struct nouveau_oclass *oclass, void *data, u32 size,
36 struct nouveau_object **pobject)
37{
38 struct nv108_pwr_priv *priv;
39 int ret;
40
41 ret = nouveau_pwr_create(parent, engine, oclass, &priv);
42 *pobject = nv_object(priv);
43 if (ret)
44 return ret;
45
46 priv->base.code.data = nv108_pwr_code;
47 priv->base.code.size = sizeof(nv108_pwr_code);
48 priv->base.data.data = nv108_pwr_data;
49 priv->base.data.size = sizeof(nv108_pwr_data);
50 return 0;
51}
52
53struct nouveau_oclass
54nv108_pwr_oclass = {
55 .handle = NV_SUBDEV(PWR, 0x00),
56 .ofuncs = &(struct nouveau_ofuncs) {
57 .ctor = nv108_pwr_ctor,
58 .dtor = _nouveau_pwr_dtor,
59 .init = _nouveau_pwr_init,
60 .fini = _nouveau_pwr_fini,
61 },
62};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/nva3.c b/drivers/gpu/drm/nouveau/core/subdev/pwr/nva3.c
new file mode 100644
index 000000000000..c132b7ca9747
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/nva3.c
@@ -0,0 +1,71 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/pwr.h>
26
27#include "fuc/nva3.fuc.h"
28
29struct nva3_pwr_priv {
30 struct nouveau_pwr base;
31};
32
33static int
34nva3_pwr_init(struct nouveau_object *object)
35{
36 struct nva3_pwr_priv *priv = (void *)object;
37 nv_mask(priv, 0x022210, 0x00000001, 0x00000000);
38 nv_mask(priv, 0x022210, 0x00000001, 0x00000001);
39 return nouveau_pwr_init(&priv->base);
40}
41
42static int
43nva3_pwr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
44 struct nouveau_oclass *oclass, void *data, u32 size,
45 struct nouveau_object **pobject)
46{
47 struct nva3_pwr_priv *priv;
48 int ret;
49
50 ret = nouveau_pwr_create(parent, engine, oclass, &priv);
51 *pobject = nv_object(priv);
52 if (ret)
53 return ret;
54
55 priv->base.code.data = nva3_pwr_code;
56 priv->base.code.size = sizeof(nva3_pwr_code);
57 priv->base.data.data = nva3_pwr_data;
58 priv->base.data.size = sizeof(nva3_pwr_data);
59 return 0;
60}
61
62struct nouveau_oclass
63nva3_pwr_oclass = {
64 .handle = NV_SUBDEV(PWR, 0xa3),
65 .ofuncs = &(struct nouveau_ofuncs) {
66 .ctor = nva3_pwr_ctor,
67 .dtor = _nouveau_pwr_dtor,
68 .init = nva3_pwr_init,
69 .fini = _nouveau_pwr_fini,
70 },
71};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/pwr/nvc0.c
new file mode 100644
index 000000000000..495f6857428d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/nvc0.c
@@ -0,0 +1,62 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/pwr.h>
26
27#include "fuc/nvc0.fuc.h"
28
29struct nvc0_pwr_priv {
30 struct nouveau_pwr base;
31};
32
33static int
34nvc0_pwr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
35 struct nouveau_oclass *oclass, void *data, u32 size,
36 struct nouveau_object **pobject)
37{
38 struct nvc0_pwr_priv *priv;
39 int ret;
40
41 ret = nouveau_pwr_create(parent, engine, oclass, &priv);
42 *pobject = nv_object(priv);
43 if (ret)
44 return ret;
45
46 priv->base.code.data = nvc0_pwr_code;
47 priv->base.code.size = sizeof(nvc0_pwr_code);
48 priv->base.data.data = nvc0_pwr_data;
49 priv->base.data.size = sizeof(nvc0_pwr_data);
50 return 0;
51}
52
53struct nouveau_oclass
54nvc0_pwr_oclass = {
55 .handle = NV_SUBDEV(PWR, 0xc0),
56 .ofuncs = &(struct nouveau_ofuncs) {
57 .ctor = nvc0_pwr_ctor,
58 .dtor = _nouveau_pwr_dtor,
59 .init = _nouveau_pwr_init,
60 .fini = _nouveau_pwr_fini,
61 },
62};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/nvd0.c b/drivers/gpu/drm/nouveau/core/subdev/pwr/nvd0.c
new file mode 100644
index 000000000000..043aa142fe82
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/nvd0.c
@@ -0,0 +1,62 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/pwr.h>
26
27#include "fuc/nvd0.fuc.h"
28
29struct nvd0_pwr_priv {
30 struct nouveau_pwr base;
31};
32
33static int
34nvd0_pwr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
35 struct nouveau_oclass *oclass, void *data, u32 size,
36 struct nouveau_object **pobject)
37{
38 struct nvd0_pwr_priv *priv;
39 int ret;
40
41 ret = nouveau_pwr_create(parent, engine, oclass, &priv);
42 *pobject = nv_object(priv);
43 if (ret)
44 return ret;
45
46 priv->base.code.data = nvd0_pwr_code;
47 priv->base.code.size = sizeof(nvd0_pwr_code);
48 priv->base.data.data = nvd0_pwr_data;
49 priv->base.data.size = sizeof(nvd0_pwr_data);
50 return 0;
51}
52
53struct nouveau_oclass
54nvd0_pwr_oclass = {
55 .handle = NV_SUBDEV(PWR, 0xd0),
56 .ofuncs = &(struct nouveau_ofuncs) {
57 .ctor = nvd0_pwr_ctor,
58 .dtor = _nouveau_pwr_dtor,
59 .init = _nouveau_pwr_init,
60 .fini = _nouveau_pwr_fini,
61 },
62};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/base.c b/drivers/gpu/drm/nouveau/core/subdev/therm/base.c
index f1de7a9c572b..80e584a1bd1c 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/base.c
@@ -92,10 +92,11 @@ nouveau_therm_update(struct nouveau_therm *therm, int mode)
92 struct nouveau_timer *ptimer = nouveau_timer(therm); 92 struct nouveau_timer *ptimer = nouveau_timer(therm);
93 struct nouveau_therm_priv *priv = (void *)therm; 93 struct nouveau_therm_priv *priv = (void *)therm;
94 unsigned long flags; 94 unsigned long flags;
95 int duty; 95 bool immd = true;
96 bool poll = true;
97 int duty = -1;
96 98
97 spin_lock_irqsave(&priv->lock, flags); 99 spin_lock_irqsave(&priv->lock, flags);
98 nv_debug(therm, "FAN speed check\n");
99 if (mode < 0) 100 if (mode < 0)
100 mode = priv->mode; 101 mode = priv->mode;
101 priv->mode = mode; 102 priv->mode = mode;
@@ -106,28 +107,49 @@ nouveau_therm_update(struct nouveau_therm *therm, int mode)
106 duty = nouveau_therm_fan_get(therm); 107 duty = nouveau_therm_fan_get(therm);
107 if (duty < 0) 108 if (duty < 0)
108 duty = 100; 109 duty = 100;
110 poll = false;
109 break; 111 break;
110 case NOUVEAU_THERM_CTRL_AUTO: 112 case NOUVEAU_THERM_CTRL_AUTO:
111 if (priv->fan->bios.nr_fan_trip) 113 if (priv->fan->bios.nr_fan_trip) {
112 duty = nouveau_therm_update_trip(therm); 114 duty = nouveau_therm_update_trip(therm);
113 else 115 } else
116 if (priv->fan->bios.linear_min_temp ||
117 priv->fan->bios.linear_max_temp) {
114 duty = nouveau_therm_update_linear(therm); 118 duty = nouveau_therm_update_linear(therm);
119 } else {
120 if (priv->cstate)
121 duty = priv->cstate;
122 poll = false;
123 }
124 immd = false;
115 break; 125 break;
116 case NOUVEAU_THERM_CTRL_NONE: 126 case NOUVEAU_THERM_CTRL_NONE:
117 default: 127 default:
118 ptimer->alarm_cancel(ptimer, &priv->alarm); 128 ptimer->alarm_cancel(ptimer, &priv->alarm);
119 goto done; 129 poll = false;
120 } 130 }
121 131
122 nv_debug(therm, "FAN target request: %d%%\n", duty); 132 if (list_empty(&priv->alarm.head) && poll)
123 nouveau_therm_fan_set(therm, (mode != NOUVEAU_THERM_CTRL_AUTO), duty);
124
125done:
126 if (list_empty(&priv->alarm.head) && (mode == NOUVEAU_THERM_CTRL_AUTO))
127 ptimer->alarm(ptimer, 1000000000ULL, &priv->alarm); 133 ptimer->alarm(ptimer, 1000000000ULL, &priv->alarm);
128 else if (!list_empty(&priv->alarm.head))
129 nv_debug(therm, "therm fan alarm list is not empty\n");
130 spin_unlock_irqrestore(&priv->lock, flags); 134 spin_unlock_irqrestore(&priv->lock, flags);
135
136 if (duty >= 0) {
137 nv_debug(therm, "FAN target request: %d%%\n", duty);
138 nouveau_therm_fan_set(therm, immd, duty);
139 }
140}
141
142int
143nouveau_therm_cstate(struct nouveau_therm *ptherm, int fan, int dir)
144{
145 struct nouveau_therm_priv *priv = (void *)ptherm;
146 if (!dir || (dir < 0 && fan < priv->cstate) ||
147 (dir > 0 && fan > priv->cstate)) {
148 nv_debug(ptherm, "default fan speed -> %d%%\n", fan);
149 priv->cstate = fan;
150 nouveau_therm_update(ptherm, -1);
151 }
152 return 0;
131} 153}
132 154
133static void 155static void
@@ -149,14 +171,15 @@ nouveau_therm_fan_mode(struct nouveau_therm *therm, int mode)
149 "automatic" 171 "automatic"
150 }; 172 };
151 173
152 /* The default PDAEMON ucode interferes with fan management */ 174 /* The default PPWR ucode on fermi interferes with fan management */
153 if ((mode >= ARRAY_SIZE(name)) || 175 if ((mode >= ARRAY_SIZE(name)) ||
154 (mode != NOUVEAU_THERM_CTRL_NONE && device->card_type >= NV_C0)) 176 (mode != NOUVEAU_THERM_CTRL_NONE && device->card_type >= NV_C0 &&
177 !nouveau_subdev(device, NVDEV_SUBDEV_PWR)))
155 return -EINVAL; 178 return -EINVAL;
156 179
157 /* do not allow automatic fan management if the thermal sensor is 180 /* do not allow automatic fan management if the thermal sensor is
158 * not available */ 181 * not available */
159 if (priv->mode == 2 && therm->temp_get(therm) < 0) 182 if (priv->mode == NOUVEAU_THERM_CTRL_AUTO && therm->temp_get(therm) < 0)
160 return -EINVAL; 183 return -EINVAL;
161 184
162 if (priv->mode == mode) 185 if (priv->mode == mode)
@@ -335,7 +358,7 @@ nouveau_therm_preinit(struct nouveau_therm *therm)
335 nouveau_therm_ic_ctor(therm); 358 nouveau_therm_ic_ctor(therm);
336 nouveau_therm_fan_ctor(therm); 359 nouveau_therm_fan_ctor(therm);
337 360
338 nouveau_therm_fan_mode(therm, NOUVEAU_THERM_CTRL_NONE); 361 nouveau_therm_fan_mode(therm, NOUVEAU_THERM_CTRL_AUTO);
339 nouveau_therm_sensor_preinit(therm); 362 nouveau_therm_sensor_preinit(therm);
340 return 0; 363 return 0;
341} 364}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/fan.c b/drivers/gpu/drm/nouveau/core/subdev/therm/fan.c
index 39f47b950ad1..95f6129eeede 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/fan.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/fan.c
@@ -185,8 +185,11 @@ nouveau_therm_fan_set_defaults(struct nouveau_therm *therm)
185 priv->fan->bios.max_duty = 100; 185 priv->fan->bios.max_duty = 100;
186 priv->fan->bios.bump_period = 500; 186 priv->fan->bios.bump_period = 500;
187 priv->fan->bios.slow_down_period = 2000; 187 priv->fan->bios.slow_down_period = 2000;
188/*XXX: talk to mupuf */
189#if 0
188 priv->fan->bios.linear_min_temp = 40; 190 priv->fan->bios.linear_min_temp = 40;
189 priv->fan->bios.linear_max_temp = 85; 191 priv->fan->bios.linear_max_temp = 85;
192#endif
190} 193}
191 194
192static void 195static void
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/fantog.c b/drivers/gpu/drm/nouveau/core/subdev/therm/fantog.c
index e601773ee475..f69dab11f720 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/fantog.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/fantog.c
@@ -97,6 +97,13 @@ nouveau_fantog_create(struct nouveau_therm *therm, struct dcb_gpio_func *func)
97{ 97{
98 struct nouveau_therm_priv *tpriv = (void *)therm; 98 struct nouveau_therm_priv *tpriv = (void *)therm;
99 struct nouveau_fantog_priv *priv; 99 struct nouveau_fantog_priv *priv;
100 int ret;
101
102 if (therm->pwm_ctrl) {
103 ret = therm->pwm_ctrl(therm, func->line, false);
104 if (ret)
105 return ret;
106 }
100 107
101 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 108 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
102 tpriv->fan = &priv->base; 109 tpriv->fan = &priv->base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c b/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c
index 8b3adec5fbb1..13b850076443 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c
@@ -55,28 +55,28 @@ probe_monitoring_device(struct nouveau_i2c_port *i2c,
55 return true; 55 return true;
56} 56}
57 57
58static struct i2c_board_info 58static struct nouveau_i2c_board_info
59nv_board_infos[] = { 59nv_board_infos[] = {
60 { I2C_BOARD_INFO("w83l785ts", 0x2d) }, 60 { { I2C_BOARD_INFO("w83l785ts", 0x2d) }, 0 },
61 { I2C_BOARD_INFO("w83781d", 0x2d) }, 61 { { I2C_BOARD_INFO("w83781d", 0x2d) }, 0 },
62 { I2C_BOARD_INFO("adt7473", 0x2e) }, 62 { { I2C_BOARD_INFO("adt7473", 0x2e) }, 20 },
63 { I2C_BOARD_INFO("adt7473", 0x2d) }, 63 { { I2C_BOARD_INFO("adt7473", 0x2d) }, 20 },
64 { I2C_BOARD_INFO("adt7473", 0x2c) }, 64 { { I2C_BOARD_INFO("adt7473", 0x2c) }, 20 },
65 { I2C_BOARD_INFO("f75375", 0x2e) }, 65 { { I2C_BOARD_INFO("f75375", 0x2e) }, 0 },
66 { I2C_BOARD_INFO("lm99", 0x4c) }, 66 { { I2C_BOARD_INFO("lm99", 0x4c) }, 0 },
67 { I2C_BOARD_INFO("lm90", 0x4c) }, 67 { { I2C_BOARD_INFO("lm90", 0x4c) }, 0 },
68 { I2C_BOARD_INFO("lm90", 0x4d) }, 68 { { I2C_BOARD_INFO("lm90", 0x4d) }, 0 },
69 { I2C_BOARD_INFO("adm1021", 0x18) }, 69 { { I2C_BOARD_INFO("adm1021", 0x18) }, 0 },
70 { I2C_BOARD_INFO("adm1021", 0x19) }, 70 { { I2C_BOARD_INFO("adm1021", 0x19) }, 0 },
71 { I2C_BOARD_INFO("adm1021", 0x1a) }, 71 { { I2C_BOARD_INFO("adm1021", 0x1a) }, 0 },
72 { I2C_BOARD_INFO("adm1021", 0x29) }, 72 { { I2C_BOARD_INFO("adm1021", 0x29) }, 0 },
73 { I2C_BOARD_INFO("adm1021", 0x2a) }, 73 { { I2C_BOARD_INFO("adm1021", 0x2a) }, 0 },
74 { I2C_BOARD_INFO("adm1021", 0x2b) }, 74 { { I2C_BOARD_INFO("adm1021", 0x2b) }, 0 },
75 { I2C_BOARD_INFO("adm1021", 0x4c) }, 75 { { I2C_BOARD_INFO("adm1021", 0x4c) }, 0 },
76 { I2C_BOARD_INFO("adm1021", 0x4d) }, 76 { { I2C_BOARD_INFO("adm1021", 0x4d) }, 0 },
77 { I2C_BOARD_INFO("adm1021", 0x4e) }, 77 { { I2C_BOARD_INFO("adm1021", 0x4e) }, 0 },
78 { I2C_BOARD_INFO("lm63", 0x18) }, 78 { { I2C_BOARD_INFO("lm63", 0x18) }, 0 },
79 { I2C_BOARD_INFO("lm63", 0x4e) }, 79 { { I2C_BOARD_INFO("lm63", 0x4e) }, 0 },
80 { } 80 { }
81}; 81};
82 82
@@ -89,9 +89,9 @@ nouveau_therm_ic_ctor(struct nouveau_therm *therm)
89 struct nvbios_extdev_func extdev_entry; 89 struct nvbios_extdev_func extdev_entry;
90 90
91 if (!nvbios_extdev_find(bios, NVBIOS_EXTDEV_LM89, &extdev_entry)) { 91 if (!nvbios_extdev_find(bios, NVBIOS_EXTDEV_LM89, &extdev_entry)) {
92 struct i2c_board_info board[] = { 92 struct nouveau_i2c_board_info board[] = {
93 { I2C_BOARD_INFO("lm90", extdev_entry.addr >> 1) }, 93 { { I2C_BOARD_INFO("lm90", extdev_entry.addr >> 1) }, 0},
94 { } 94 { }
95 }; 95 };
96 96
97 i2c->identify(i2c, NV_I2C_DEFAULT(0), "monitoring device", 97 i2c->identify(i2c, NV_I2C_DEFAULT(0), "monitoring device",
@@ -101,9 +101,9 @@ nouveau_therm_ic_ctor(struct nouveau_therm *therm)
101 } 101 }
102 102
103 if (!nvbios_extdev_find(bios, NVBIOS_EXTDEV_ADT7473, &extdev_entry)) { 103 if (!nvbios_extdev_find(bios, NVBIOS_EXTDEV_ADT7473, &extdev_entry)) {
104 struct i2c_board_info board[] = { 104 struct nouveau_i2c_board_info board[] = {
105 { I2C_BOARD_INFO("adt7473", extdev_entry.addr >> 1) }, 105 { { I2C_BOARD_INFO("adt7473", extdev_entry.addr >> 1) }, 20 },
106 { } 106 { }
107 }; 107 };
108 108
109 i2c->identify(i2c, NV_I2C_DEFAULT(0), "monitoring device", 109 i2c->identify(i2c, NV_I2C_DEFAULT(0), "monitoring device",
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/nv84.c b/drivers/gpu/drm/nouveau/core/subdev/therm/nv84.c
index 42ba633ccff7..1d15c52fad0c 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/nv84.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/nv84.c
@@ -126,7 +126,7 @@ nv84_therm_intr(struct nouveau_subdev *subdev)
126 126
127 spin_lock_irqsave(&priv->sensor.alarm_program_lock, flags); 127 spin_lock_irqsave(&priv->sensor.alarm_program_lock, flags);
128 128
129 intr = nv_rd32(therm, 0x20100); 129 intr = nv_rd32(therm, 0x20100) & 0x3ff;
130 130
131 /* THRS_4: downclock */ 131 /* THRS_4: downclock */
132 if (intr & 0x002) { 132 if (intr & 0x002) {
@@ -209,6 +209,19 @@ nv84_therm_ctor(struct nouveau_object *parent,
209 return nouveau_therm_preinit(&priv->base.base); 209 return nouveau_therm_preinit(&priv->base.base);
210} 210}
211 211
212int
213nv84_therm_fini(struct nouveau_object *object, bool suspend)
214{
215 /* Disable PTherm IRQs */
216 nv_wr32(object, 0x20000, 0x00000000);
217
218 /* ACK all PTherm IRQs */
219 nv_wr32(object, 0x20100, 0xffffffff);
220 nv_wr32(object, 0x1100, 0x10000); /* PBUS */
221
222 return _nouveau_therm_fini(object, suspend);
223}
224
212struct nouveau_oclass 225struct nouveau_oclass
213nv84_therm_oclass = { 226nv84_therm_oclass = {
214 .handle = NV_SUBDEV(THERM, 0x84), 227 .handle = NV_SUBDEV(THERM, 0x84),
@@ -216,6 +229,6 @@ nv84_therm_oclass = {
216 .ctor = nv84_therm_ctor, 229 .ctor = nv84_therm_ctor,
217 .dtor = _nouveau_therm_dtor, 230 .dtor = _nouveau_therm_dtor,
218 .init = _nouveau_therm_init, 231 .init = _nouveau_therm_init,
219 .fini = _nouveau_therm_fini, 232 .fini = nv84_therm_fini,
220 }, 233 },
221}; 234};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/nva3.c b/drivers/gpu/drm/nouveau/core/subdev/therm/nva3.c
index d11a7c400813..3b2c4580098b 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/nva3.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/nva3.c
@@ -94,6 +94,6 @@ nva3_therm_oclass = {
94 .ctor = nva3_therm_ctor, 94 .ctor = nva3_therm_ctor,
95 .dtor = _nouveau_therm_dtor, 95 .dtor = _nouveau_therm_dtor,
96 .init = nva3_therm_init, 96 .init = nva3_therm_init,
97 .fini = _nouveau_therm_fini, 97 .fini = nv84_therm_fini,
98 }, 98 },
99}; 99};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c b/drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c
index 54c28bdc4204..4dd4f81ae873 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c
@@ -148,6 +148,6 @@ nvd0_therm_oclass = {
148 .ctor = nvd0_therm_ctor, 148 .ctor = nvd0_therm_ctor,
149 .dtor = _nouveau_therm_dtor, 149 .dtor = _nouveau_therm_dtor,
150 .init = nvd0_therm_init, 150 .init = nvd0_therm_init,
151 .fini = _nouveau_therm_fini, 151 .fini = nv84_therm_fini,
152 }, 152 },
153}; 153};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/priv.h b/drivers/gpu/drm/nouveau/core/subdev/therm/priv.h
index dd38529262fb..96f8f95693ce 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/priv.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/priv.h
@@ -76,6 +76,7 @@ struct nouveau_therm_priv {
76 spinlock_t lock; 76 spinlock_t lock;
77 struct nouveau_therm_trip_point *last_trip; 77 struct nouveau_therm_trip_point *last_trip;
78 int mode; 78 int mode;
79 int cstate;
79 int suspend; 80 int suspend;
80 81
81 /* bios */ 82 /* bios */
@@ -144,6 +145,7 @@ int nv50_fan_pwm_get(struct nouveau_therm *, int, u32 *, u32 *);
144int nv50_fan_pwm_set(struct nouveau_therm *, int, u32, u32); 145int nv50_fan_pwm_set(struct nouveau_therm *, int, u32, u32);
145int nv50_fan_pwm_clock(struct nouveau_therm *); 146int nv50_fan_pwm_clock(struct nouveau_therm *);
146int nv84_temp_get(struct nouveau_therm *therm); 147int nv84_temp_get(struct nouveau_therm *therm);
148int nv84_therm_fini(struct nouveau_object *object, bool suspend);
147 149
148int nva3_therm_fan_sense(struct nouveau_therm *); 150int nva3_therm_fan_sense(struct nouveau_therm *);
149 151
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c b/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c
index b80a33011b93..cfde9eb44ad0 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c
@@ -180,8 +180,6 @@ alarm_timer_callback(struct nouveau_alarm *alarm)
180 180
181 spin_lock_irqsave(&priv->sensor.alarm_program_lock, flags); 181 spin_lock_irqsave(&priv->sensor.alarm_program_lock, flags);
182 182
183 nv_debug(therm, "polling the internal temperature\n");
184
185 nouveau_therm_threshold_hyst_polling(therm, &sensor->thrs_fan_boost, 183 nouveau_therm_threshold_hyst_polling(therm, &sensor->thrs_fan_boost,
186 NOUVEAU_THERM_THRS_FANBOOST); 184 NOUVEAU_THERM_THRS_FANBOOST);
187 185
diff --git a/drivers/gpu/drm/nouveau/core/subdev/timer/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/timer/nv04.c
index 57711ecb566c..c0bdd10358d7 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/timer/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/timer/nv04.c
@@ -119,16 +119,8 @@ nv04_timer_alarm_cancel(struct nouveau_timer *ptimer,
119{ 119{
120 struct nv04_timer_priv *priv = (void *)ptimer; 120 struct nv04_timer_priv *priv = (void *)ptimer;
121 unsigned long flags; 121 unsigned long flags;
122
123 /* avoid deleting an entry while the alarm intr is running */
124 spin_lock_irqsave(&priv->lock, flags); 122 spin_lock_irqsave(&priv->lock, flags);
125 123 list_del_init(&alarm->head);
126 /* delete the alarm from the list */
127 list_del(&alarm->head);
128
129 /* reset the head so as list_empty returns 1 */
130 INIT_LIST_HEAD(&alarm->head);
131
132 spin_unlock_irqrestore(&priv->lock, flags); 124 spin_unlock_irqrestore(&priv->lock, flags);
133} 125}
134 126
diff --git a/drivers/gpu/drm/nouveau/core/subdev/volt/base.c b/drivers/gpu/drm/nouveau/core/subdev/volt/base.c
new file mode 100644
index 000000000000..32794a999106
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/volt/base.c
@@ -0,0 +1,198 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/volt.h>
26
27#include <subdev/bios.h>
28#include <subdev/bios/vmap.h>
29#include <subdev/bios/volt.h>
30
31static int
32nouveau_volt_get(struct nouveau_volt *volt)
33{
34 if (volt->vid_get) {
35 int ret = volt->vid_get(volt), i;
36 if (ret >= 0) {
37 for (i = 0; i < volt->vid_nr; i++) {
38 if (volt->vid[i].vid == ret)
39 return volt->vid[i].uv;
40 }
41 ret = -EINVAL;
42 }
43 return ret;
44 }
45 return -ENODEV;
46}
47
48static int
49nouveau_volt_set(struct nouveau_volt *volt, u32 uv)
50{
51 if (volt->vid_set) {
52 int i, ret = -EINVAL;
53 for (i = 0; i < volt->vid_nr; i++) {
54 if (volt->vid[i].uv == uv) {
55 ret = volt->vid_set(volt, volt->vid[i].vid);
56 nv_debug(volt, "set %duv: %d\n", uv, ret);
57 break;
58 }
59 }
60 return ret;
61 }
62 return -ENODEV;
63}
64
65static int
66nouveau_volt_map(struct nouveau_volt *volt, u8 id)
67{
68 struct nouveau_bios *bios = nouveau_bios(volt);
69 struct nvbios_vmap_entry info;
70 u8 ver, len;
71 u16 vmap;
72
73 vmap = nvbios_vmap_entry_parse(bios, id, &ver, &len, &info);
74 if (vmap) {
75 if (info.link != 0xff) {
76 int ret = nouveau_volt_map(volt, info.link);
77 if (ret < 0)
78 return ret;
79 info.min += ret;
80 }
81 return info.min;
82 }
83
84 return id ? id * 10000 : -ENODEV;
85}
86
87static int
88nouveau_volt_set_id(struct nouveau_volt *volt, u8 id, int condition)
89{
90 int ret = nouveau_volt_map(volt, id);
91 if (ret >= 0) {
92 int prev = nouveau_volt_get(volt);
93 if (!condition || prev < 0 ||
94 (condition < 0 && ret < prev) ||
95 (condition > 0 && ret > prev)) {
96 ret = nouveau_volt_set(volt, ret);
97 } else {
98 ret = 0;
99 }
100 }
101 return ret;
102}
103
104int
105_nouveau_volt_init(struct nouveau_object *object)
106{
107 struct nouveau_volt *volt = (void *)object;
108 int ret;
109
110 ret = nouveau_subdev_init(&volt->base);
111 if (ret)
112 return ret;
113
114 ret = volt->get(volt);
115 if (ret < 0) {
116 if (ret != -ENODEV)
117 nv_debug(volt, "current voltage unknown\n");
118 return 0;
119 }
120
121 nv_info(volt, "GPU voltage: %duv\n", ret);
122 return 0;
123}
124
125void
126_nouveau_volt_dtor(struct nouveau_object *object)
127{
128 struct nouveau_volt *volt = (void *)object;
129 nouveau_subdev_destroy(&volt->base);
130}
131
132int
133nouveau_volt_create_(struct nouveau_object *parent,
134 struct nouveau_object *engine,
135 struct nouveau_oclass *oclass, int length, void **pobject)
136{
137 struct nouveau_bios *bios = nouveau_bios(parent);
138 struct nouveau_volt *volt;
139 struct nvbios_volt_entry ivid;
140 struct nvbios_volt info;
141 u8 ver, hdr, cnt, len;
142 u16 data;
143 int ret, i;
144
145 ret = nouveau_subdev_create_(parent, engine, oclass, 0, "VOLT",
146 "voltage", length, pobject);
147 volt = *pobject;
148 if (ret)
149 return ret;
150
151 volt->get = nouveau_volt_get;
152 volt->set = nouveau_volt_set;
153 volt->set_id = nouveau_volt_set_id;
154
155 data = nvbios_volt_parse(bios, &ver, &hdr, &cnt, &len, &info);
156 if (data && info.vidmask && info.base && info.step) {
157 for (i = 0; i < info.vidmask + 1; i++) {
158 if (info.base >= info.min &&
159 info.base <= info.max) {
160 volt->vid[volt->vid_nr].uv = info.base;
161 volt->vid[volt->vid_nr].vid = i;
162 volt->vid_nr++;
163 }
164 info.base += info.step;
165 }
166 volt->vid_mask = info.vidmask;
167 } else
168 if (data && info.vidmask) {
169 for (i = 0; i < cnt; i++) {
170 data = nvbios_volt_entry_parse(bios, i, &ver, &hdr,
171 &ivid);
172 if (data) {
173 volt->vid[volt->vid_nr].uv = ivid.voltage;
174 volt->vid[volt->vid_nr].vid = ivid.vid;
175 volt->vid_nr++;
176 }
177 }
178 volt->vid_mask = info.vidmask;
179 }
180
181 if (volt->vid_nr) {
182 for (i = 0; i < volt->vid_nr; i++) {
183 nv_debug(volt, "VID %02x: %duv\n",
184 volt->vid[i].vid, volt->vid[i].uv);
185 }
186
187 /*XXX: this is an assumption.. there probably exists boards
188 * out there with i2c-connected voltage controllers too..
189 */
190 ret = nouveau_voltgpio_init(volt);
191 if (ret == 0) {
192 volt->vid_get = nouveau_voltgpio_get;
193 volt->vid_set = nouveau_voltgpio_set;
194 }
195 }
196
197 return ret;
198}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/volt/gpio.c b/drivers/gpu/drm/nouveau/core/subdev/volt/gpio.c
new file mode 100644
index 000000000000..755fa91bcd09
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/volt/gpio.c
@@ -0,0 +1,96 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/volt.h>
26#include <subdev/gpio.h>
27#include <subdev/bios/gpio.h>
28
29static const u8 tags[] = {
30 DCB_GPIO_VID0, DCB_GPIO_VID1, DCB_GPIO_VID2, DCB_GPIO_VID3,
31 DCB_GPIO_VID4, DCB_GPIO_VID5, DCB_GPIO_VID6, DCB_GPIO_VID7,
32};
33
34int
35nouveau_voltgpio_get(struct nouveau_volt *volt)
36{
37 struct nouveau_gpio *gpio = nouveau_gpio(volt);
38 u8 vid = 0;
39 int i;
40
41 for (i = 0; i < ARRAY_SIZE(tags); i++) {
42 if (volt->vid_mask & (1 << i)) {
43 int ret = gpio->get(gpio, 0, tags[i], 0xff);
44 if (ret < 0)
45 return ret;
46 vid |= ret << i;
47 }
48 }
49
50 return vid;
51}
52
53int
54nouveau_voltgpio_set(struct nouveau_volt *volt, u8 vid)
55{
56 struct nouveau_gpio *gpio = nouveau_gpio(volt);
57 int i;
58
59 for (i = 0; i < ARRAY_SIZE(tags); i++, vid >>= 1) {
60 if (volt->vid_mask & (1 << i)) {
61 int ret = gpio->set(gpio, 0, tags[i], 0xff, vid & 1);
62 if (ret < 0)
63 return ret;
64 }
65 }
66
67 return 0;
68}
69
70int
71nouveau_voltgpio_init(struct nouveau_volt *volt)
72{
73 struct nouveau_gpio *gpio = nouveau_gpio(volt);
74 struct dcb_gpio_func func;
75 int i;
76
77 /* check we have gpio function info for each vid bit. on some
78 * boards (ie. nvs295) the vid mask has more bits than there
79 * are valid gpio functions... from traces, nvidia appear to
80 * just touch the existing ones, so let's mask off the invalid
81 * bits and continue with life
82 */
83 for (i = 0; i < ARRAY_SIZE(tags); i++) {
84 if (volt->vid_mask & (1 << i)) {
85 int ret = gpio->find(gpio, 0, tags[i], 0xff, &func);
86 if (ret) {
87 if (ret != -ENOENT)
88 return ret;
89 nv_debug(volt, "VID bit %d has no GPIO\n", i);
90 volt->vid_mask &= ~(1 << i);
91 }
92 }
93 }
94
95 return 0;
96}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/volt/nv40.c b/drivers/gpu/drm/nouveau/core/subdev/volt/nv40.c
new file mode 100644
index 000000000000..87d5358376a6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/volt/nv40.c
@@ -0,0 +1,56 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/volt.h>
26
27struct nv40_volt_priv {
28 struct nouveau_volt base;
29};
30
31static int
32nv40_volt_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
33 struct nouveau_oclass *oclass, void *data, u32 size,
34 struct nouveau_object **pobject)
35{
36 struct nv40_volt_priv *priv;
37 int ret;
38
39 ret = nouveau_volt_create(parent, engine, oclass, &priv);
40 *pobject = nv_object(priv);
41 if (ret)
42 return ret;
43
44 return 0;
45}
46
47struct nouveau_oclass
48nv40_volt_oclass = {
49 .handle = NV_SUBDEV(VOLT, 0x40),
50 .ofuncs = &(struct nouveau_ofuncs) {
51 .ctor = nv40_volt_ctor,
52 .dtor = _nouveau_volt_dtor,
53 .init = _nouveau_volt_init,
54 .fini = _nouveau_volt_fini,
55 },
56};
diff --git a/drivers/gpu/drm/nouveau/dispnv04/Makefile b/drivers/gpu/drm/nouveau/dispnv04/Makefile
index ea3f5b8a0f95..424a489d0f03 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/Makefile
+++ b/drivers/gpu/drm/nouveau/dispnv04/Makefile
@@ -5,6 +5,7 @@ nouveau-y += dispnv04/dac.o
5nouveau-y += dispnv04/dfp.o 5nouveau-y += dispnv04/dfp.o
6nouveau-y += dispnv04/disp.o 6nouveau-y += dispnv04/disp.o
7nouveau-y += dispnv04/hw.o 7nouveau-y += dispnv04/hw.o
8nouveau-y += dispnv04/overlay.o
8nouveau-y += dispnv04/tvmodesnv17.o 9nouveau-y += dispnv04/tvmodesnv17.o
9nouveau-y += dispnv04/tvnv04.o 10nouveau-y += dispnv04/tvnv04.o
10nouveau-y += dispnv04/tvnv17.o 11nouveau-y += dispnv04/tvnv17.o
diff --git a/drivers/gpu/drm/nouveau/dispnv04/arb.c b/drivers/gpu/drm/nouveau/dispnv04/arb.c
index 2e70462883e8..2a15b98b4d2b 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/arb.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/arb.c
@@ -210,8 +210,8 @@ nv04_update_arb(struct drm_device *dev, int VClk, int bpp,
210 sim_data.nvclk_khz = NVClk; 210 sim_data.nvclk_khz = NVClk;
211 sim_data.bpp = bpp; 211 sim_data.bpp = bpp;
212 sim_data.two_heads = nv_two_heads(dev); 212 sim_data.two_heads = nv_two_heads(dev);
213 if ((dev->pci_device & 0xffff) == 0x01a0 /*CHIPSET_NFORCE*/ || 213 if ((dev->pdev->device & 0xffff) == 0x01a0 /*CHIPSET_NFORCE*/ ||
214 (dev->pci_device & 0xffff) == 0x01f0 /*CHIPSET_NFORCE2*/) { 214 (dev->pdev->device & 0xffff) == 0x01f0 /*CHIPSET_NFORCE2*/) {
215 uint32_t type; 215 uint32_t type;
216 216
217 pci_read_config_dword(pci_get_bus_and_slot(0, 1), 0x7c, &type); 217 pci_read_config_dword(pci_get_bus_and_slot(0, 1), 0x7c, &type);
@@ -256,8 +256,8 @@ nouveau_calc_arb(struct drm_device *dev, int vclk, int bpp, int *burst, int *lwm
256 256
257 if (nv_device(drm->device)->card_type < NV_20) 257 if (nv_device(drm->device)->card_type < NV_20)
258 nv04_update_arb(dev, vclk, bpp, burst, lwm); 258 nv04_update_arb(dev, vclk, bpp, burst, lwm);
259 else if ((dev->pci_device & 0xfff0) == 0x0240 /*CHIPSET_C51*/ || 259 else if ((dev->pdev->device & 0xfff0) == 0x0240 /*CHIPSET_C51*/ ||
260 (dev->pci_device & 0xfff0) == 0x03d0 /*CHIPSET_C512*/) { 260 (dev->pdev->device & 0xfff0) == 0x03d0 /*CHIPSET_C512*/) {
261 *burst = 128; 261 *burst = 128;
262 *lwm = 0x0480; 262 *lwm = 0x0480;
263 } else 263 } else
diff --git a/drivers/gpu/drm/nouveau/dispnv04/crtc.c b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
index d4fbf11360fe..0e3270c3ffd2 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/crtc.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
@@ -326,8 +326,6 @@ nv_crtc_mode_set_vga(struct drm_crtc *crtc, struct drm_display_mode *mode)
326 regp->MiscOutReg = 0x23; /* +hsync +vsync */ 326 regp->MiscOutReg = 0x23; /* +hsync +vsync */
327 } 327 }
328 328
329 regp->MiscOutReg |= (mode->clock_index & 0x03) << 2;
330
331 /* 329 /*
332 * Time Sequencer 330 * Time Sequencer
333 */ 331 */
diff --git a/drivers/gpu/drm/nouveau/dispnv04/dfp.c b/drivers/gpu/drm/nouveau/dispnv04/dfp.c
index 93dd23ff0093..936a71c59080 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/dfp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/dfp.c
@@ -490,10 +490,10 @@ static void nv04_dfp_update_backlight(struct drm_encoder *encoder, int mode)
490 /* BIOS scripts usually take care of the backlight, thanks 490 /* BIOS scripts usually take care of the backlight, thanks
491 * Apple for your consistency. 491 * Apple for your consistency.
492 */ 492 */
493 if (dev->pci_device == 0x0174 || dev->pci_device == 0x0179 || 493 if (dev->pdev->device == 0x0174 || dev->pdev->device == 0x0179 ||
494 dev->pci_device == 0x0189 || dev->pci_device == 0x0329) { 494 dev->pdev->device == 0x0189 || dev->pdev->device == 0x0329) {
495 if (mode == DRM_MODE_DPMS_ON) { 495 if (mode == DRM_MODE_DPMS_ON) {
496 nv_mask(device, NV_PBUS_DEBUG_DUALHEAD_CTL, 0, 1 << 31); 496 nv_mask(device, NV_PBUS_DEBUG_DUALHEAD_CTL, 1 << 31, 1 << 31);
497 nv_mask(device, NV_PCRTC_GPIO_EXT, 3, 1); 497 nv_mask(device, NV_PCRTC_GPIO_EXT, 3, 1);
498 } else { 498 } else {
499 nv_mask(device, NV_PBUS_DEBUG_DUALHEAD_CTL, 1 << 31, 0); 499 nv_mask(device, NV_PBUS_DEBUG_DUALHEAD_CTL, 1 << 31, 0);
@@ -625,13 +625,15 @@ static void nv04_tmds_slave_init(struct drm_encoder *encoder)
625 struct nouveau_drm *drm = nouveau_drm(dev); 625 struct nouveau_drm *drm = nouveau_drm(dev);
626 struct nouveau_i2c *i2c = nouveau_i2c(drm->device); 626 struct nouveau_i2c *i2c = nouveau_i2c(drm->device);
627 struct nouveau_i2c_port *port = i2c->find(i2c, 2); 627 struct nouveau_i2c_port *port = i2c->find(i2c, 2);
628 struct i2c_board_info info[] = { 628 struct nouveau_i2c_board_info info[] = {
629 { 629 {
630 .type = "sil164", 630 {
631 .addr = (dcb->tmdsconf.slave_addr == 0x7 ? 0x3a : 0x38), 631 .type = "sil164",
632 .platform_data = &(struct sil164_encoder_params) { 632 .addr = (dcb->tmdsconf.slave_addr == 0x7 ? 0x3a : 0x38),
633 SIL164_INPUT_EDGE_RISING 633 .platform_data = &(struct sil164_encoder_params) {
634 } 634 SIL164_INPUT_EDGE_RISING
635 }
636 }, 0
635 }, 637 },
636 { } 638 { }
637 }; 639 };
@@ -646,7 +648,7 @@ static void nv04_tmds_slave_init(struct drm_encoder *encoder)
646 return; 648 return;
647 649
648 drm_i2c_encoder_init(dev, to_encoder_slave(encoder), 650 drm_i2c_encoder_init(dev, to_encoder_slave(encoder),
649 &port->adapter, &info[type]); 651 &port->adapter, &info[type].dev);
650} 652}
651 653
652static const struct drm_encoder_helper_funcs nv04_lvds_helper_funcs = { 654static const struct drm_encoder_helper_funcs nv04_lvds_helper_funcs = {
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.c b/drivers/gpu/drm/nouveau/dispnv04/disp.c
index 4908d3fd0486..b13ff0fc42de 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/disp.c
@@ -140,6 +140,8 @@ nv04_display_create(struct drm_device *dev)
140 func->save(encoder); 140 func->save(encoder);
141 } 141 }
142 142
143 nouveau_overlay_init(dev);
144
143 return 0; 145 return 0;
144} 146}
145 147
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.h b/drivers/gpu/drm/nouveau/dispnv04/disp.h
index 9928187f0a7d..56a28db04000 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/disp.h
+++ b/drivers/gpu/drm/nouveau/dispnv04/disp.h
@@ -123,11 +123,14 @@ int nv04_tv_create(struct drm_connector *, struct dcb_output *);
123/* nv17_tv.c */ 123/* nv17_tv.c */
124int nv17_tv_create(struct drm_connector *, struct dcb_output *); 124int nv17_tv_create(struct drm_connector *, struct dcb_output *);
125 125
126/* overlay.c */
127void nouveau_overlay_init(struct drm_device *dev);
128
126static inline bool 129static inline bool
127nv_two_heads(struct drm_device *dev) 130nv_two_heads(struct drm_device *dev)
128{ 131{
129 struct nouveau_drm *drm = nouveau_drm(dev); 132 struct nouveau_drm *drm = nouveau_drm(dev);
130 const int impl = dev->pci_device & 0x0ff0; 133 const int impl = dev->pdev->device & 0x0ff0;
131 134
132 if (nv_device(drm->device)->card_type >= NV_10 && impl != 0x0100 && 135 if (nv_device(drm->device)->card_type >= NV_10 && impl != 0x0100 &&
133 impl != 0x0150 && impl != 0x01a0 && impl != 0x0200) 136 impl != 0x0150 && impl != 0x01a0 && impl != 0x0200)
@@ -139,14 +142,14 @@ nv_two_heads(struct drm_device *dev)
139static inline bool 142static inline bool
140nv_gf4_disp_arch(struct drm_device *dev) 143nv_gf4_disp_arch(struct drm_device *dev)
141{ 144{
142 return nv_two_heads(dev) && (dev->pci_device & 0x0ff0) != 0x0110; 145 return nv_two_heads(dev) && (dev->pdev->device & 0x0ff0) != 0x0110;
143} 146}
144 147
145static inline bool 148static inline bool
146nv_two_reg_pll(struct drm_device *dev) 149nv_two_reg_pll(struct drm_device *dev)
147{ 150{
148 struct nouveau_drm *drm = nouveau_drm(dev); 151 struct nouveau_drm *drm = nouveau_drm(dev);
149 const int impl = dev->pci_device & 0x0ff0; 152 const int impl = dev->pdev->device & 0x0ff0;
150 153
151 if (impl == 0x0310 || impl == 0x0340 || nv_device(drm->device)->card_type >= NV_40) 154 if (impl == 0x0310 || impl == 0x0340 || nv_device(drm->device)->card_type >= NV_40)
152 return true; 155 return true;
diff --git a/drivers/gpu/drm/nouveau/dispnv04/hw.c b/drivers/gpu/drm/nouveau/dispnv04/hw.c
index 973056b86207..aca76af115b3 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/hw.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/hw.c
@@ -27,6 +27,7 @@
27#include "hw.h" 27#include "hw.h"
28 28
29#include <subdev/bios/pll.h> 29#include <subdev/bios/pll.h>
30#include <subdev/fb.h>
30#include <subdev/clock.h> 31#include <subdev/clock.h>
31#include <subdev/timer.h> 32#include <subdev/timer.h>
32 33
@@ -220,7 +221,7 @@ nouveau_hw_get_clock(struct drm_device *dev, enum nvbios_pll_type plltype)
220 int ret; 221 int ret;
221 222
222 if (plltype == PLL_MEMORY && 223 if (plltype == PLL_MEMORY &&
223 (dev->pci_device & 0x0ff0) == CHIPSET_NFORCE) { 224 (dev->pdev->device & 0x0ff0) == CHIPSET_NFORCE) {
224 uint32_t mpllP; 225 uint32_t mpllP;
225 226
226 pci_read_config_dword(pci_get_bus_and_slot(0, 3), 0x6c, &mpllP); 227 pci_read_config_dword(pci_get_bus_and_slot(0, 3), 0x6c, &mpllP);
@@ -230,7 +231,7 @@ nouveau_hw_get_clock(struct drm_device *dev, enum nvbios_pll_type plltype)
230 return 400000 / mpllP; 231 return 400000 / mpllP;
231 } else 232 } else
232 if (plltype == PLL_MEMORY && 233 if (plltype == PLL_MEMORY &&
233 (dev->pci_device & 0xff0) == CHIPSET_NFORCE2) { 234 (dev->pdev->device & 0xff0) == CHIPSET_NFORCE2) {
234 uint32_t clock; 235 uint32_t clock;
235 236
236 pci_read_config_dword(pci_get_bus_and_slot(0, 5), 0x4c, &clock); 237 pci_read_config_dword(pci_get_bus_and_slot(0, 5), 0x4c, &clock);
@@ -664,6 +665,7 @@ nv_load_state_ext(struct drm_device *dev, int head,
664 struct nouveau_drm *drm = nouveau_drm(dev); 665 struct nouveau_drm *drm = nouveau_drm(dev);
665 struct nouveau_device *device = nv_device(drm->device); 666 struct nouveau_device *device = nv_device(drm->device);
666 struct nouveau_timer *ptimer = nouveau_timer(device); 667 struct nouveau_timer *ptimer = nouveau_timer(device);
668 struct nouveau_fb *pfb = nouveau_fb(device);
667 struct nv04_crtc_reg *regp = &state->crtc_reg[head]; 669 struct nv04_crtc_reg *regp = &state->crtc_reg[head];
668 uint32_t reg900; 670 uint32_t reg900;
669 int i; 671 int i;
@@ -680,10 +682,10 @@ nv_load_state_ext(struct drm_device *dev, int head,
680 nv_wr32(device, NV_PVIDEO_INTR_EN, 0); 682 nv_wr32(device, NV_PVIDEO_INTR_EN, 0);
681 nv_wr32(device, NV_PVIDEO_OFFSET_BUFF(0), 0); 683 nv_wr32(device, NV_PVIDEO_OFFSET_BUFF(0), 0);
682 nv_wr32(device, NV_PVIDEO_OFFSET_BUFF(1), 0); 684 nv_wr32(device, NV_PVIDEO_OFFSET_BUFF(1), 0);
683 nv_wr32(device, NV_PVIDEO_LIMIT(0), 0); //drm->fb_available_size - 1); 685 nv_wr32(device, NV_PVIDEO_LIMIT(0), pfb->ram->size - 1);
684 nv_wr32(device, NV_PVIDEO_LIMIT(1), 0); //drm->fb_available_size - 1); 686 nv_wr32(device, NV_PVIDEO_LIMIT(1), pfb->ram->size - 1);
685 nv_wr32(device, NV_PVIDEO_UVPLANE_LIMIT(0), 0); //drm->fb_available_size - 1); 687 nv_wr32(device, NV_PVIDEO_UVPLANE_LIMIT(0), pfb->ram->size - 1);
686 nv_wr32(device, NV_PVIDEO_UVPLANE_LIMIT(1), 0); //drm->fb_available_size - 1); 688 nv_wr32(device, NV_PVIDEO_UVPLANE_LIMIT(1), pfb->ram->size - 1);
687 nv_wr32(device, NV_PBUS_POWERCTRL_2, 0); 689 nv_wr32(device, NV_PBUS_POWERCTRL_2, 0);
688 690
689 NVWriteCRTC(dev, head, NV_PCRTC_CURSOR_CONFIG, regp->cursor_cfg); 691 NVWriteCRTC(dev, head, NV_PCRTC_CURSOR_CONFIG, regp->cursor_cfg);
@@ -740,7 +742,7 @@ nv_load_state_ext(struct drm_device *dev, int head,
740 } 742 }
741 /* NV11 and NV20 stop at 0x52. */ 743 /* NV11 and NV20 stop at 0x52. */
742 if (nv_gf4_disp_arch(dev)) { 744 if (nv_gf4_disp_arch(dev)) {
743 if (nv_device(drm->device)->card_type == NV_10) { 745 if (nv_device(drm->device)->card_type < NV_20) {
744 /* Not waiting for vertical retrace before modifying 746 /* Not waiting for vertical retrace before modifying
745 CRE_53/CRE_54 causes lockups. */ 747 CRE_53/CRE_54 causes lockups. */
746 nouveau_timer_wait_eq(ptimer, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x8); 748 nouveau_timer_wait_eq(ptimer, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x8);
diff --git a/drivers/gpu/drm/nouveau/dispnv04/overlay.c b/drivers/gpu/drm/nouveau/dispnv04/overlay.c
new file mode 100644
index 000000000000..3618ac6b6316
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv04/overlay.c
@@ -0,0 +1,320 @@
1/*
2 * Copyright 2013 Ilia Mirkin
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
18 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
19 * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
20 * SOFTWARE.
21 *
22 * Implementation based on the pre-KMS implementation in xf86-video-nouveau,
23 * written by Arthur Huillet.
24 */
25
26#include <drm/drmP.h>
27#include <drm/drm_crtc.h>
28#include <drm/drm_fourcc.h>
29
30#include "nouveau_drm.h"
31
32#include "nouveau_bo.h"
33#include "nouveau_connector.h"
34#include "nouveau_display.h"
35#include "nvreg.h"
36
37
38struct nouveau_plane {
39 struct drm_plane base;
40 bool flip;
41 struct nouveau_bo *cur;
42
43 struct {
44 struct drm_property *colorkey;
45 struct drm_property *contrast;
46 struct drm_property *brightness;
47 struct drm_property *hue;
48 struct drm_property *saturation;
49 struct drm_property *iturbt_709;
50 } props;
51
52 int colorkey;
53 int contrast;
54 int brightness;
55 int hue;
56 int saturation;
57 int iturbt_709;
58};
59
60static uint32_t formats[] = {
61 DRM_FORMAT_NV12,
62 DRM_FORMAT_UYVY,
63};
64
65/* Sine can be approximated with
66 * http://en.wikipedia.org/wiki/Bhaskara_I's_sine_approximation_formula
67 * sin(x degrees) ~= 4 x (180 - x) / (40500 - x (180 - x) )
68 * Note that this only works for the range [0, 180].
69 * Also note that sin(x) == -sin(x - 180)
70 */
71static inline int
72sin_mul(int degrees, int factor)
73{
74 if (degrees > 180) {
75 degrees -= 180;
76 factor *= -1;
77 }
78 return factor * 4 * degrees * (180 - degrees) /
79 (40500 - degrees * (180 - degrees));
80}
81
82/* cos(x) = sin(x + 90) */
83static inline int
84cos_mul(int degrees, int factor)
85{
86 return sin_mul((degrees + 90) % 360, factor);
87}
88
89static int
90nv10_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
91 struct drm_framebuffer *fb, int crtc_x, int crtc_y,
92 unsigned int crtc_w, unsigned int crtc_h,
93 uint32_t src_x, uint32_t src_y,
94 uint32_t src_w, uint32_t src_h)
95{
96 struct nouveau_device *dev = nouveau_dev(plane->dev);
97 struct nouveau_plane *nv_plane = (struct nouveau_plane *)plane;
98 struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb);
99 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
100 struct nouveau_bo *cur = nv_plane->cur;
101 bool flip = nv_plane->flip;
102 int format = ALIGN(src_w * 4, 0x100);
103 int soff = NV_PCRTC0_SIZE * nv_crtc->index;
104 int soff2 = NV_PCRTC0_SIZE * !nv_crtc->index;
105 int ret;
106
107 if (format > 0xffff)
108 return -EINVAL;
109
110 ret = nouveau_bo_pin(nv_fb->nvbo, TTM_PL_FLAG_VRAM);
111 if (ret)
112 return ret;
113
114 nv_plane->cur = nv_fb->nvbo;
115
116 /* Source parameters given in 16.16 fixed point, ignore fractional. */
117 src_x = src_x >> 16;
118 src_y = src_y >> 16;
119 src_w = src_w >> 16;
120 src_h = src_h >> 16;
121
122 nv_mask(dev, NV_PCRTC_ENGINE_CTRL + soff, NV_CRTC_FSEL_OVERLAY, NV_CRTC_FSEL_OVERLAY);
123 nv_mask(dev, NV_PCRTC_ENGINE_CTRL + soff2, NV_CRTC_FSEL_OVERLAY, 0);
124
125 nv_wr32(dev, NV_PVIDEO_BASE(flip), 0);
126 nv_wr32(dev, NV_PVIDEO_OFFSET_BUFF(flip), nv_fb->nvbo->bo.offset);
127 nv_wr32(dev, NV_PVIDEO_SIZE_IN(flip), src_h << 16 | src_w);
128 nv_wr32(dev, NV_PVIDEO_POINT_IN(flip), src_y << 16 | src_x);
129 nv_wr32(dev, NV_PVIDEO_DS_DX(flip), (src_w << 20) / crtc_w);
130 nv_wr32(dev, NV_PVIDEO_DT_DY(flip), (src_h << 20) / crtc_h);
131 nv_wr32(dev, NV_PVIDEO_POINT_OUT(flip), crtc_y << 16 | crtc_x);
132 nv_wr32(dev, NV_PVIDEO_SIZE_OUT(flip), crtc_h << 16 | crtc_w);
133
134 if (fb->pixel_format == DRM_FORMAT_NV12) {
135 format |= NV_PVIDEO_FORMAT_COLOR_LE_CR8YB8CB8YA8;
136 format |= NV_PVIDEO_FORMAT_PLANAR;
137 }
138 if (nv_plane->iturbt_709)
139 format |= NV_PVIDEO_FORMAT_MATRIX_ITURBT709;
140 if (nv_plane->colorkey & (1 << 24))
141 format |= NV_PVIDEO_FORMAT_DISPLAY_COLOR_KEY;
142
143 if (fb->pixel_format == DRM_FORMAT_NV12) {
144 nv_wr32(dev, NV_PVIDEO_UVPLANE_BASE(flip), 0);
145 nv_wr32(dev, NV_PVIDEO_UVPLANE_OFFSET_BUFF(flip),
146 nv_fb->nvbo->bo.offset + fb->offsets[1]);
147 }
148 nv_wr32(dev, NV_PVIDEO_FORMAT(flip), format);
149 nv_wr32(dev, NV_PVIDEO_STOP, 0);
150 /* TODO: wait for vblank? */
151 nv_wr32(dev, NV_PVIDEO_BUFFER, flip ? 0x10 : 0x1);
152 nv_plane->flip = !flip;
153
154 if (cur)
155 nouveau_bo_unpin(cur);
156
157 return 0;
158}
159
160static int
161nv10_disable_plane(struct drm_plane *plane)
162{
163 struct nouveau_device *dev = nouveau_dev(plane->dev);
164 struct nouveau_plane *nv_plane = (struct nouveau_plane *)plane;
165
166 nv_wr32(dev, NV_PVIDEO_STOP, 1);
167 if (nv_plane->cur) {
168 nouveau_bo_unpin(nv_plane->cur);
169 nv_plane->cur = NULL;
170 }
171
172 return 0;
173}
174
175static void
176nv10_destroy_plane(struct drm_plane *plane)
177{
178 nv10_disable_plane(plane);
179 drm_plane_cleanup(plane);
180 kfree(plane);
181}
182
183static void
184nv10_set_params(struct nouveau_plane *plane)
185{
186 struct nouveau_device *dev = nouveau_dev(plane->base.dev);
187 u32 luma = (plane->brightness - 512) << 16 | plane->contrast;
188 u32 chroma = ((sin_mul(plane->hue, plane->saturation) & 0xffff) << 16) |
189 (cos_mul(plane->hue, plane->saturation) & 0xffff);
190 u32 format = 0;
191
192 nv_wr32(dev, NV_PVIDEO_LUMINANCE(0), luma);
193 nv_wr32(dev, NV_PVIDEO_LUMINANCE(1), luma);
194 nv_wr32(dev, NV_PVIDEO_CHROMINANCE(0), chroma);
195 nv_wr32(dev, NV_PVIDEO_CHROMINANCE(1), chroma);
196 nv_wr32(dev, NV_PVIDEO_COLOR_KEY, plane->colorkey & 0xffffff);
197
198 if (plane->cur) {
199 if (plane->iturbt_709)
200 format |= NV_PVIDEO_FORMAT_MATRIX_ITURBT709;
201 if (plane->colorkey & (1 << 24))
202 format |= NV_PVIDEO_FORMAT_DISPLAY_COLOR_KEY;
203 nv_mask(dev, NV_PVIDEO_FORMAT(plane->flip),
204 NV_PVIDEO_FORMAT_MATRIX_ITURBT709 |
205 NV_PVIDEO_FORMAT_DISPLAY_COLOR_KEY,
206 format);
207 }
208}
209
210static int
211nv10_set_property(struct drm_plane *plane,
212 struct drm_property *property,
213 uint64_t value)
214{
215 struct nouveau_plane *nv_plane = (struct nouveau_plane *)plane;
216
217 if (property == nv_plane->props.colorkey)
218 nv_plane->colorkey = value;
219 else if (property == nv_plane->props.contrast)
220 nv_plane->contrast = value;
221 else if (property == nv_plane->props.brightness)
222 nv_plane->brightness = value;
223 else if (property == nv_plane->props.hue)
224 nv_plane->hue = value;
225 else if (property == nv_plane->props.saturation)
226 nv_plane->saturation = value;
227 else if (property == nv_plane->props.iturbt_709)
228 nv_plane->iturbt_709 = value;
229 else
230 return -EINVAL;
231
232 nv10_set_params(nv_plane);
233 return 0;
234}
235
236static const struct drm_plane_funcs nv10_plane_funcs = {
237 .update_plane = nv10_update_plane,
238 .disable_plane = nv10_disable_plane,
239 .set_property = nv10_set_property,
240 .destroy = nv10_destroy_plane,
241};
242
243static void
244nv10_overlay_init(struct drm_device *device)
245{
246 struct nouveau_device *dev = nouveau_dev(device);
247 struct nouveau_plane *plane = kzalloc(sizeof(struct nouveau_plane), GFP_KERNEL);
248 int ret;
249
250 if (!plane)
251 return;
252
253 ret = drm_plane_init(device, &plane->base, 3 /* both crtc's */,
254 &nv10_plane_funcs,
255 formats, ARRAY_SIZE(formats), false);
256 if (ret)
257 goto err;
258
259 /* Set up the plane properties */
260 plane->props.colorkey = drm_property_create_range(
261 device, 0, "colorkey", 0, 0x01ffffff);
262 plane->props.contrast = drm_property_create_range(
263 device, 0, "contrast", 0, 8192 - 1);
264 plane->props.brightness = drm_property_create_range(
265 device, 0, "brightness", 0, 1024);
266 plane->props.hue = drm_property_create_range(
267 device, 0, "hue", 0, 359);
268 plane->props.saturation = drm_property_create_range(
269 device, 0, "saturation", 0, 8192 - 1);
270 plane->props.iturbt_709 = drm_property_create_range(
271 device, 0, "iturbt_709", 0, 1);
272 if (!plane->props.colorkey ||
273 !plane->props.contrast ||
274 !plane->props.brightness ||
275 !plane->props.hue ||
276 !plane->props.saturation ||
277 !plane->props.iturbt_709)
278 goto cleanup;
279
280 plane->colorkey = 0;
281 drm_object_attach_property(&plane->base.base,
282 plane->props.colorkey, plane->colorkey);
283
284 plane->contrast = 0x1000;
285 drm_object_attach_property(&plane->base.base,
286 plane->props.contrast, plane->contrast);
287
288 plane->brightness = 512;
289 drm_object_attach_property(&plane->base.base,
290 plane->props.brightness, plane->brightness);
291
292 plane->hue = 0;
293 drm_object_attach_property(&plane->base.base,
294 plane->props.hue, plane->hue);
295
296 plane->saturation = 0x1000;
297 drm_object_attach_property(&plane->base.base,
298 plane->props.saturation, plane->saturation);
299
300 plane->iturbt_709 = 0;
301 drm_object_attach_property(&plane->base.base,
302 plane->props.iturbt_709, plane->iturbt_709);
303
304 nv10_set_params(plane);
305 nv_wr32(dev, NV_PVIDEO_STOP, 1);
306 return;
307cleanup:
308 drm_plane_cleanup(&plane->base);
309err:
310 kfree(plane);
311 nv_error(dev, "Failed to create plane\n");
312}
313
314void
315nouveau_overlay_init(struct drm_device *device)
316{
317 struct nouveau_device *dev = nouveau_dev(device);
318 if (dev->chipset >= 0x10 && dev->chipset <= 0x40)
319 nv10_overlay_init(device);
320}
diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
index bf13db4e8631..cc4b208ce546 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
@@ -37,15 +37,18 @@
37 37
38#include <subdev/i2c.h> 38#include <subdev/i2c.h>
39 39
40static struct i2c_board_info nv04_tv_encoder_info[] = { 40static struct nouveau_i2c_board_info nv04_tv_encoder_info[] = {
41 { 41 {
42 I2C_BOARD_INFO("ch7006", 0x75), 42 {
43 .platform_data = &(struct ch7006_encoder_params) { 43 I2C_BOARD_INFO("ch7006", 0x75),
44 CH7006_FORMAT_RGB24m12I, CH7006_CLOCK_MASTER, 44 .platform_data = &(struct ch7006_encoder_params) {
45 0, 0, 0, 45 CH7006_FORMAT_RGB24m12I, CH7006_CLOCK_MASTER,
46 CH7006_SYNC_SLAVE, CH7006_SYNC_SEPARATED, 46 0, 0, 0,
47 CH7006_POUT_3_3V, CH7006_ACTIVE_HSYNC 47 CH7006_SYNC_SLAVE, CH7006_SYNC_SEPARATED,
48 } 48 CH7006_POUT_3_3V, CH7006_ACTIVE_HSYNC
49 }
50 },
51 0
49 }, 52 },
50 { } 53 { }
51}; 54};
@@ -229,7 +232,8 @@ nv04_tv_create(struct drm_connector *connector, struct dcb_output *entry)
229 232
230 /* Run the slave-specific initialization */ 233 /* Run the slave-specific initialization */
231 ret = drm_i2c_encoder_init(dev, to_encoder_slave(encoder), 234 ret = drm_i2c_encoder_init(dev, to_encoder_slave(encoder),
232 &port->adapter, &nv04_tv_encoder_info[type]); 235 &port->adapter,
236 &nv04_tv_encoder_info[type].dev);
233 if (ret < 0) 237 if (ret < 0)
234 goto fail_cleanup; 238 goto fail_cleanup;
235 239
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
index 8f467e7bfd19..6828d81ed7b9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -87,6 +87,7 @@ nouveau_abi16_swclass(struct nouveau_drm *drm)
87 case NV_04: 87 case NV_04:
88 return 0x006e; 88 return 0x006e;
89 case NV_10: 89 case NV_10:
90 case NV_11:
90 case NV_20: 91 case NV_20:
91 case NV_30: 92 case NV_30:
92 case NV_40: 93 case NV_40:
@@ -130,7 +131,7 @@ nouveau_abi16_chan_fini(struct nouveau_abi16 *abi16,
130 if (chan->ntfy) { 131 if (chan->ntfy) {
131 nouveau_bo_vma_del(chan->ntfy, &chan->ntfy_vma); 132 nouveau_bo_vma_del(chan->ntfy, &chan->ntfy_vma);
132 nouveau_bo_unpin(chan->ntfy); 133 nouveau_bo_unpin(chan->ntfy);
133 drm_gem_object_unreference_unlocked(chan->ntfy->gem); 134 drm_gem_object_unreference_unlocked(&chan->ntfy->gem);
134 } 135 }
135 136
136 if (chan->heap.block_size) 137 if (chan->heap.block_size)
@@ -178,10 +179,10 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
178 getparam->value = device->chipset; 179 getparam->value = device->chipset;
179 break; 180 break;
180 case NOUVEAU_GETPARAM_PCI_VENDOR: 181 case NOUVEAU_GETPARAM_PCI_VENDOR:
181 getparam->value = dev->pci_vendor; 182 getparam->value = dev->pdev->vendor;
182 break; 183 break;
183 case NOUVEAU_GETPARAM_PCI_DEVICE: 184 case NOUVEAU_GETPARAM_PCI_DEVICE:
184 getparam->value = dev->pci_device; 185 getparam->value = dev->pdev->device;
185 break; 186 break;
186 case NOUVEAU_GETPARAM_BUS_TYPE: 187 case NOUVEAU_GETPARAM_BUS_TYPE:
187 if (drm_pci_device_is_agp(dev)) 188 if (drm_pci_device_is_agp(dev))
@@ -297,7 +298,7 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
297 else 298 else
298 init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_GART; 299 init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_GART;
299 300
300 if (device->card_type < NV_C0) { 301 if (device->card_type < NV_10) {
301 init->subchan[0].handle = 0x00000000; 302 init->subchan[0].handle = 0x00000000;
302 init->subchan[0].grclass = 0x0000; 303 init->subchan[0].grclass = 0x0000;
303 init->subchan[1].handle = NvSw; 304 init->subchan[1].handle = NvSw;
@@ -320,7 +321,7 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
320 goto done; 321 goto done;
321 } 322 }
322 323
323 ret = drm_gem_handle_create(file_priv, chan->ntfy->gem, 324 ret = drm_gem_handle_create(file_priv, &chan->ntfy->gem,
324 &init->notifier_handle); 325 &init->notifier_handle);
325 if (ret) 326 if (ret)
326 goto done; 327 goto done;
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index cfbeee607b3a..07273a2ae62f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -314,6 +314,16 @@ static bool nouveau_dsm_detect(void)
314 has_optimus = 1; 314 has_optimus = 1;
315 } 315 }
316 316
317 while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_3D << 8, pdev)) != NULL) {
318 vga_count++;
319
320 retval = nouveau_dsm_pci_probe(pdev);
321 if (retval & NOUVEAU_DSM_HAS_MUX)
322 has_dsm |= 1;
323 if (retval & NOUVEAU_DSM_HAS_OPT)
324 has_optimus = 1;
325 }
326
317 /* find the optimus DSM or the old v1 DSM */ 327 /* find the optimus DSM or the old v1 DSM */
318 if (has_optimus == 1) { 328 if (has_optimus == 1) {
319 acpi_get_name(nouveau_dsm_priv.dhandle, ACPI_FULL_PATHNAME, 329 acpi_get_name(nouveau_dsm_priv.dhandle, ACPI_FULL_PATHNAME,
diff --git a/drivers/gpu/drm/nouveau/nouveau_agp.c b/drivers/gpu/drm/nouveau/nouveau_agp.c
index 6e7a55f93a85..2953c4e91e1a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_agp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_agp.c
@@ -11,10 +11,28 @@ MODULE_PARM_DESC(agpmode, "AGP mode (0 to disable AGP)");
11static int nouveau_agpmode = -1; 11static int nouveau_agpmode = -1;
12module_param_named(agpmode, nouveau_agpmode, int, 0400); 12module_param_named(agpmode, nouveau_agpmode, int, 0400);
13 13
14struct nouveau_agpmode_quirk {
15 u16 hostbridge_vendor;
16 u16 hostbridge_device;
17 u16 chip_vendor;
18 u16 chip_device;
19 int mode;
20};
21
22static struct nouveau_agpmode_quirk nouveau_agpmode_quirk_list[] = {
23 /* VIA Apollo PRO133x / GeForce FX 5600 Ultra, max agpmode 2, fdo #20341 */
24 { PCI_VENDOR_ID_VIA, 0x0691, PCI_VENDOR_ID_NVIDIA, 0x0311, 2 },
25
26 {},
27};
28
14static unsigned long 29static unsigned long
15get_agp_mode(struct nouveau_drm *drm, unsigned long mode) 30get_agp_mode(struct nouveau_drm *drm, const struct drm_agp_info *info)
16{ 31{
17 struct nouveau_device *device = nv_device(drm->device); 32 struct nouveau_device *device = nv_device(drm->device);
33 struct nouveau_agpmode_quirk *quirk = nouveau_agpmode_quirk_list;
34 int agpmode = nouveau_agpmode;
35 unsigned long mode = info->mode;
18 36
19 /* 37 /*
20 * FW seems to be broken on nv18, it makes the card lock up 38 * FW seems to be broken on nv18, it makes the card lock up
@@ -24,11 +42,27 @@ get_agp_mode(struct nouveau_drm *drm, unsigned long mode)
24 mode &= ~PCI_AGP_COMMAND_FW; 42 mode &= ~PCI_AGP_COMMAND_FW;
25 43
26 /* 44 /*
45 * Go through the quirks list and adjust the agpmode accordingly.
46 */
47 while (agpmode == -1 && quirk->hostbridge_vendor) {
48 if (info->id_vendor == quirk->hostbridge_vendor &&
49 info->id_device == quirk->hostbridge_device &&
50 device->pdev->vendor == quirk->chip_vendor &&
51 device->pdev->device == quirk->chip_device) {
52 agpmode = quirk->mode;
53 nv_info(device, "Forcing agp mode to %dX. Use agpmode to override.\n",
54 agpmode);
55 break;
56 }
57 ++quirk;
58 }
59
60 /*
27 * AGP mode set in the command line. 61 * AGP mode set in the command line.
28 */ 62 */
29 if (nouveau_agpmode > 0) { 63 if (agpmode > 0) {
30 bool agpv3 = mode & 0x8; 64 bool agpv3 = mode & 0x8;
31 int rate = agpv3 ? nouveau_agpmode / 4 : nouveau_agpmode; 65 int rate = agpv3 ? agpmode / 4 : agpmode;
32 66
33 mode = (mode & ~0x7) | (rate & 0x7); 67 mode = (mode & ~0x7) | (rate & 0x7);
34 } 68 }
@@ -90,7 +124,7 @@ nouveau_agp_reset(struct nouveau_drm *drm)
90 if (ret) 124 if (ret)
91 return; 125 return;
92 126
93 mode.mode = get_agp_mode(drm, info.mode); 127 mode.mode = get_agp_mode(drm, &info);
94 mode.mode &= ~PCI_AGP_COMMAND_FW; 128 mode.mode &= ~PCI_AGP_COMMAND_FW;
95 129
96 ret = drm_agp_enable(dev, mode); 130 ret = drm_agp_enable(dev, mode);
@@ -139,7 +173,7 @@ nouveau_agp_init(struct nouveau_drm *drm)
139 } 173 }
140 174
141 /* see agp.h for the AGPSTAT_* modes available */ 175 /* see agp.h for the AGPSTAT_* modes available */
142 mode.mode = get_agp_mode(drm, info.mode); 176 mode.mode = get_agp_mode(drm, &info);
143 177
144 ret = drm_agp_enable(dev, mode); 178 ret = drm_agp_enable(dev, mode);
145 if (ret) { 179 if (ret) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c
index 2ffad2176b7f..630f6e84fc01 100644
--- a/drivers/gpu/drm/nouveau/nouveau_backlight.c
+++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c
@@ -82,7 +82,7 @@ nv40_backlight_init(struct drm_connector *connector)
82 memset(&props, 0, sizeof(struct backlight_properties)); 82 memset(&props, 0, sizeof(struct backlight_properties));
83 props.type = BACKLIGHT_RAW; 83 props.type = BACKLIGHT_RAW;
84 props.max_brightness = 31; 84 props.max_brightness = 31;
85 bd = backlight_device_register("nv_backlight", &connector->kdev, drm, 85 bd = backlight_device_register("nv_backlight", connector->kdev, drm,
86 &nv40_bl_ops, &props); 86 &nv40_bl_ops, &props);
87 if (IS_ERR(bd)) 87 if (IS_ERR(bd))
88 return PTR_ERR(bd); 88 return PTR_ERR(bd);
@@ -204,7 +204,7 @@ nv50_backlight_init(struct drm_connector *connector)
204 memset(&props, 0, sizeof(struct backlight_properties)); 204 memset(&props, 0, sizeof(struct backlight_properties));
205 props.type = BACKLIGHT_RAW; 205 props.type = BACKLIGHT_RAW;
206 props.max_brightness = 100; 206 props.max_brightness = 100;
207 bd = backlight_device_register("nv_backlight", &connector->kdev, 207 bd = backlight_device_register("nv_backlight", connector->kdev,
208 nv_encoder, ops, &props); 208 nv_encoder, ops, &props);
209 if (IS_ERR(bd)) 209 if (IS_ERR(bd))
210 return PTR_ERR(bd); 210 return PTR_ERR(bd);
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 3e7287675ecf..4c3feaaa1037 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -127,8 +127,8 @@ static int call_lvds_manufacturer_script(struct drm_device *dev, struct dcb_outp
127#ifdef __powerpc__ 127#ifdef __powerpc__
128 /* Powerbook specific quirks */ 128 /* Powerbook specific quirks */
129 if (script == LVDS_RESET && 129 if (script == LVDS_RESET &&
130 (dev->pci_device == 0x0179 || dev->pci_device == 0x0189 || 130 (dev->pdev->device == 0x0179 || dev->pdev->device == 0x0189 ||
131 dev->pci_device == 0x0329)) 131 dev->pdev->device == 0x0329))
132 nv_write_tmds(dev, dcbent->or, 0, 0x02, 0x72); 132 nv_write_tmds(dev, dcbent->or, 0, 0x02, 0x72);
133#endif 133#endif
134 134
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 755c38d06271..c0fde6b9393c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -98,12 +98,7 @@ nv10_bo_put_tile_region(struct drm_device *dev, struct nouveau_drm_tile *tile,
98 98
99 if (tile) { 99 if (tile) {
100 spin_lock(&drm->tile.lock); 100 spin_lock(&drm->tile.lock);
101 if (fence) { 101 tile->fence = nouveau_fence_ref(fence);
102 /* Mark it as pending. */
103 tile->fence = fence;
104 nouveau_fence_ref(fence);
105 }
106
107 tile->used = false; 102 tile->used = false;
108 spin_unlock(&drm->tile.lock); 103 spin_unlock(&drm->tile.lock);
109 } 104 }
@@ -146,7 +141,7 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
146 struct drm_device *dev = drm->dev; 141 struct drm_device *dev = drm->dev;
147 struct nouveau_bo *nvbo = nouveau_bo(bo); 142 struct nouveau_bo *nvbo = nouveau_bo(bo);
148 143
149 if (unlikely(nvbo->gem)) 144 if (unlikely(nvbo->gem.filp))
150 DRM_ERROR("bo %p still attached to GEM object\n", bo); 145 DRM_ERROR("bo %p still attached to GEM object\n", bo);
151 WARN_ON(nvbo->pin_refcnt > 0); 146 WARN_ON(nvbo->pin_refcnt > 0);
152 nv10_bo_put_tile_region(dev, nvbo->tile, NULL); 147 nv10_bo_put_tile_region(dev, nvbo->tile, NULL);
@@ -269,7 +264,8 @@ set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
269 struct nouveau_fb *pfb = nouveau_fb(drm->device); 264 struct nouveau_fb *pfb = nouveau_fb(drm->device);
270 u32 vram_pages = pfb->ram->size >> PAGE_SHIFT; 265 u32 vram_pages = pfb->ram->size >> PAGE_SHIFT;
271 266
272 if (nv_device(drm->device)->card_type == NV_10 && 267 if ((nv_device(drm->device)->card_type == NV_10 ||
268 nv_device(drm->device)->card_type == NV_11) &&
273 nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) && 269 nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) &&
274 nvbo->bo.mem.num_pages < vram_pages / 4) { 270 nvbo->bo.mem.num_pages < vram_pages / 4) {
275 /* 271 /*
@@ -982,7 +978,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
982 bool no_wait_gpu, struct ttm_mem_reg *new_mem) 978 bool no_wait_gpu, struct ttm_mem_reg *new_mem)
983{ 979{
984 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); 980 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
985 struct nouveau_channel *chan = chan = drm->ttm.chan; 981 struct nouveau_channel *chan = drm->ttm.chan;
986 struct nouveau_bo *nvbo = nouveau_bo(bo); 982 struct nouveau_bo *nvbo = nouveau_bo(bo);
987 struct ttm_mem_reg *old_mem = &bo->mem; 983 struct ttm_mem_reg *old_mem = &bo->mem;
988 int ret; 984 int ret;
@@ -1267,7 +1263,7 @@ nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
1267{ 1263{
1268 struct nouveau_bo *nvbo = nouveau_bo(bo); 1264 struct nouveau_bo *nvbo = nouveau_bo(bo);
1269 1265
1270 return drm_vma_node_verify_access(&nvbo->gem->vma_node, filp); 1266 return drm_vma_node_verify_access(&nvbo->gem.vma_node, filp);
1271} 1267}
1272 1268
1273static int 1269static int
@@ -1461,14 +1457,12 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
1461void 1457void
1462nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence) 1458nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence)
1463{ 1459{
1460 struct nouveau_fence *new_fence = nouveau_fence_ref(fence);
1464 struct nouveau_fence *old_fence = NULL; 1461 struct nouveau_fence *old_fence = NULL;
1465 1462
1466 if (likely(fence))
1467 nouveau_fence_ref(fence);
1468
1469 spin_lock(&nvbo->bo.bdev->fence_lock); 1463 spin_lock(&nvbo->bo.bdev->fence_lock);
1470 old_fence = nvbo->bo.sync_obj; 1464 old_fence = nvbo->bo.sync_obj;
1471 nvbo->bo.sync_obj = fence; 1465 nvbo->bo.sync_obj = new_fence;
1472 spin_unlock(&nvbo->bo.bdev->fence_lock); 1466 spin_unlock(&nvbo->bo.bdev->fence_lock);
1473 1467
1474 nouveau_fence_unref(&old_fence); 1468 nouveau_fence_unref(&old_fence);
@@ -1551,7 +1545,8 @@ nouveau_bo_vma_add(struct nouveau_bo *nvbo, struct nouveau_vm *vm,
1551 1545
1552 if (nvbo->bo.mem.mem_type == TTM_PL_VRAM) 1546 if (nvbo->bo.mem.mem_type == TTM_PL_VRAM)
1553 nouveau_vm_map(vma, nvbo->bo.mem.mm_node); 1547 nouveau_vm_map(vma, nvbo->bo.mem.mm_node);
1554 else if (nvbo->bo.mem.mem_type == TTM_PL_TT) { 1548 else if (nvbo->bo.mem.mem_type == TTM_PL_TT &&
1549 nvbo->page_shift == vma->vm->vmm->spg_shift) {
1555 if (node->sg) 1550 if (node->sg)
1556 nouveau_vm_map_sg_table(vma, 0, size, node); 1551 nouveau_vm_map_sg_table(vma, 0, size, node);
1557 else 1552 else
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.h b/drivers/gpu/drm/nouveau/nouveau_bo.h
index 653dbbbd4fa1..ff17c1f432fc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.h
@@ -27,7 +27,10 @@ struct nouveau_bo {
27 u32 tile_flags; 27 u32 tile_flags;
28 struct nouveau_drm_tile *tile; 28 struct nouveau_drm_tile *tile;
29 29
30 struct drm_gem_object *gem; 30 /* Only valid if allocated via nouveau_gem_new() and iff you hold a
31 * gem reference to it! For debugging, use gem.filp != NULL to test
32 * whether it is valid. */
33 struct drm_gem_object gem;
31 34
32 /* protect by the ttm reservation lock */ 35 /* protect by the ttm reservation lock */
33 int pin_refcnt; 36 int pin_refcnt;
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c
index e84f4c32331b..cc5152be2cf1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.c
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.c
@@ -346,22 +346,17 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
346 for (i = 0; i < NOUVEAU_DMA_SKIPS; i++) 346 for (i = 0; i < NOUVEAU_DMA_SKIPS; i++)
347 OUT_RING(chan, 0x00000000); 347 OUT_RING(chan, 0x00000000);
348 348
349 /* allocate software object class (used for fences on <= nv05, and 349 /* allocate software object class (used for fences on <= nv05) */
350 * to signal flip completion), bind it to a subchannel. 350 if (device->card_type < NV_10) {
351 */
352 if ((device->card_type < NV_E0) || gart /* nve0: want_nvsw */) {
353 ret = nouveau_object_new(nv_object(client), chan->handle, 351 ret = nouveau_object_new(nv_object(client), chan->handle,
354 NvSw, nouveau_abi16_swclass(chan->drm), 352 NvSw, 0x006e, NULL, 0, &object);
355 NULL, 0, &object);
356 if (ret) 353 if (ret)
357 return ret; 354 return ret;
358 355
359 swch = (void *)object->parent; 356 swch = (void *)object->parent;
360 swch->flip = nouveau_flip_complete; 357 swch->flip = nouveau_flip_complete;
361 swch->flip_data = chan; 358 swch->flip_data = chan;
362 }
363 359
364 if (device->card_type < NV_C0) {
365 ret = RING_SPACE(chan, 2); 360 ret = RING_SPACE(chan, 2);
366 if (ret) 361 if (ret)
367 return ret; 362 return ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index c5b36f9e9a10..1674882d60d5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -100,6 +100,7 @@ static void
100nouveau_connector_destroy(struct drm_connector *connector) 100nouveau_connector_destroy(struct drm_connector *connector)
101{ 101{
102 struct nouveau_connector *nv_connector = nouveau_connector(connector); 102 struct nouveau_connector *nv_connector = nouveau_connector(connector);
103 nouveau_event_ref(NULL, &nv_connector->hpd_func);
103 kfree(nv_connector->edid); 104 kfree(nv_connector->edid);
104 drm_sysfs_connector_remove(connector); 105 drm_sysfs_connector_remove(connector);
105 drm_connector_cleanup(connector); 106 drm_connector_cleanup(connector);
@@ -214,9 +215,10 @@ nouveau_connector_set_encoder(struct drm_connector *connector,
214 } else { 215 } else {
215 connector->doublescan_allowed = true; 216 connector->doublescan_allowed = true;
216 if (nv_device(drm->device)->card_type == NV_20 || 217 if (nv_device(drm->device)->card_type == NV_20 ||
217 (nv_device(drm->device)->card_type == NV_10 && 218 ((nv_device(drm->device)->card_type == NV_10 ||
218 (dev->pci_device & 0x0ff0) != 0x0100 && 219 nv_device(drm->device)->card_type == NV_11) &&
219 (dev->pci_device & 0x0ff0) != 0x0150)) 220 (dev->pdev->device & 0x0ff0) != 0x0100 &&
221 (dev->pdev->device & 0x0ff0) != 0x0150))
220 /* HW is broken */ 222 /* HW is broken */
221 connector->interlace_allowed = false; 223 connector->interlace_allowed = false;
222 else 224 else
@@ -932,10 +934,9 @@ nouveau_connector_hotplug_work(struct work_struct *work)
932} 934}
933 935
934static int 936static int
935nouveau_connector_hotplug(struct nouveau_eventh *event, int index) 937nouveau_connector_hotplug(void *data, int index)
936{ 938{
937 struct nouveau_connector *nv_connector = 939 struct nouveau_connector *nv_connector = data;
938 container_of(event, struct nouveau_connector, hpd_func);
939 schedule_work(&nv_connector->hpd_work); 940 schedule_work(&nv_connector->hpd_work);
940 return NVKM_EVENT_KEEP; 941 return NVKM_EVENT_KEEP;
941} 942}
@@ -1007,10 +1008,16 @@ nouveau_connector_create(struct drm_device *dev, int index)
1007 1008
1008 ret = gpio->find(gpio, 0, hpd[ffs((entry & 0x07033000) >> 12)], 1009 ret = gpio->find(gpio, 0, hpd[ffs((entry & 0x07033000) >> 12)],
1009 DCB_GPIO_UNUSED, &nv_connector->hpd); 1010 DCB_GPIO_UNUSED, &nv_connector->hpd);
1010 nv_connector->hpd_func.func = nouveau_connector_hotplug;
1011 if (ret) 1011 if (ret)
1012 nv_connector->hpd.func = DCB_GPIO_UNUSED; 1012 nv_connector->hpd.func = DCB_GPIO_UNUSED;
1013 1013
1014 if (nv_connector->hpd.func != DCB_GPIO_UNUSED) {
1015 nouveau_event_new(gpio->events, nv_connector->hpd.line,
1016 nouveau_connector_hotplug,
1017 nv_connector,
1018 &nv_connector->hpd_func);
1019 }
1020
1014 nv_connector->type = nv_connector->dcb[0]; 1021 nv_connector->type = nv_connector->dcb[0];
1015 if (drm_conntype_from_dcb(nv_connector->type) == 1022 if (drm_conntype_from_dcb(nv_connector->type) ==
1016 DRM_MODE_CONNECTOR_Unknown) { 1023 DRM_MODE_CONNECTOR_Unknown) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.h b/drivers/gpu/drm/nouveau/nouveau_connector.h
index 6e399aad491a..264a778f473b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.h
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.h
@@ -69,7 +69,7 @@ struct nouveau_connector {
69 69
70 struct dcb_gpio_func hpd; 70 struct dcb_gpio_func hpd;
71 struct work_struct hpd_work; 71 struct work_struct hpd_work;
72 struct nouveau_eventh hpd_func; 72 struct nouveau_eventh *hpd_func;
73 73
74 int dithering_mode; 74 int dithering_mode;
75 int dithering_depth; 75 int dithering_depth;
@@ -107,7 +107,4 @@ nouveau_crtc_connector_get(struct nouveau_crtc *nv_crtc)
107struct drm_connector * 107struct drm_connector *
108nouveau_connector_create(struct drm_device *, int index); 108nouveau_connector_create(struct drm_device *, int index);
109 109
110int
111nouveau_connector_bpp(struct drm_connector *);
112
113#endif /* __NOUVEAU_CONNECTOR_H__ */ 110#endif /* __NOUVEAU_CONNECTOR_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 7848590f5568..7809d92183c4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -26,7 +26,6 @@
26 26
27#include <drm/drmP.h> 27#include <drm/drmP.h>
28#include <drm/drm_crtc_helper.h> 28#include <drm/drm_crtc_helper.h>
29#include <drm/ttm/ttm_execbuf_util.h>
30 29
31#include "nouveau_fbcon.h" 30#include "nouveau_fbcon.h"
32#include "dispnv04/hw.h" 31#include "dispnv04/hw.h"
@@ -38,19 +37,92 @@
38 37
39#include "nouveau_fence.h" 38#include "nouveau_fence.h"
40 39
41#include <subdev/bios/gpio.h>
42#include <subdev/gpio.h>
43#include <engine/disp.h> 40#include <engine/disp.h>
44 41
45#include <core/class.h> 42#include <core/class.h>
46 43
44static int
45nouveau_display_vblank_handler(void *data, int head)
46{
47 struct nouveau_drm *drm = data;
48 drm_handle_vblank(drm->dev, head);
49 return NVKM_EVENT_KEEP;
50}
51
52int
53nouveau_display_vblank_enable(struct drm_device *dev, int head)
54{
55 struct nouveau_display *disp = nouveau_display(dev);
56 if (disp) {
57 nouveau_event_get(disp->vblank[head]);
58 return 0;
59 }
60 return -EIO;
61}
62
63void
64nouveau_display_vblank_disable(struct drm_device *dev, int head)
65{
66 struct nouveau_display *disp = nouveau_display(dev);
67 if (disp)
68 nouveau_event_put(disp->vblank[head]);
69}
70
71static void
72nouveau_display_vblank_fini(struct drm_device *dev)
73{
74 struct nouveau_display *disp = nouveau_display(dev);
75 int i;
76
77 if (disp->vblank) {
78 for (i = 0; i < dev->mode_config.num_crtc; i++)
79 nouveau_event_ref(NULL, &disp->vblank[i]);
80 kfree(disp->vblank);
81 disp->vblank = NULL;
82 }
83
84 drm_vblank_cleanup(dev);
85}
86
87static int
88nouveau_display_vblank_init(struct drm_device *dev)
89{
90 struct nouveau_display *disp = nouveau_display(dev);
91 struct nouveau_drm *drm = nouveau_drm(dev);
92 struct nouveau_disp *pdisp = nouveau_disp(drm->device);
93 int ret, i;
94
95 disp->vblank = kzalloc(dev->mode_config.num_crtc *
96 sizeof(*disp->vblank), GFP_KERNEL);
97 if (!disp->vblank)
98 return -ENOMEM;
99
100 for (i = 0; i < dev->mode_config.num_crtc; i++) {
101 ret = nouveau_event_new(pdisp->vblank, i,
102 nouveau_display_vblank_handler,
103 drm, &disp->vblank[i]);
104 if (ret) {
105 nouveau_display_vblank_fini(dev);
106 return ret;
107 }
108 }
109
110 ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
111 if (ret) {
112 nouveau_display_vblank_fini(dev);
113 return ret;
114 }
115
116 return 0;
117}
118
47static void 119static void
48nouveau_user_framebuffer_destroy(struct drm_framebuffer *drm_fb) 120nouveau_user_framebuffer_destroy(struct drm_framebuffer *drm_fb)
49{ 121{
50 struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb); 122 struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb);
51 123
52 if (fb->nvbo) 124 if (fb->nvbo)
53 drm_gem_object_unreference_unlocked(fb->nvbo->gem); 125 drm_gem_object_unreference_unlocked(&fb->nvbo->gem);
54 126
55 drm_framebuffer_cleanup(drm_fb); 127 drm_framebuffer_cleanup(drm_fb);
56 kfree(fb); 128 kfree(fb);
@@ -63,7 +135,7 @@ nouveau_user_framebuffer_create_handle(struct drm_framebuffer *drm_fb,
63{ 135{
64 struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb); 136 struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb);
65 137
66 return drm_gem_handle_create(file_priv, fb->nvbo->gem, handle); 138 return drm_gem_handle_create(file_priv, &fb->nvbo->gem, handle);
67} 139}
68 140
69static const struct drm_framebuffer_funcs nouveau_framebuffer_funcs = { 141static const struct drm_framebuffer_funcs nouveau_framebuffer_funcs = {
@@ -227,9 +299,7 @@ static struct nouveau_drm_prop_enum_list dither_depth[] = {
227int 299int
228nouveau_display_init(struct drm_device *dev) 300nouveau_display_init(struct drm_device *dev)
229{ 301{
230 struct nouveau_drm *drm = nouveau_drm(dev);
231 struct nouveau_display *disp = nouveau_display(dev); 302 struct nouveau_display *disp = nouveau_display(dev);
232 struct nouveau_gpio *gpio = nouveau_gpio(drm->device);
233 struct drm_connector *connector; 303 struct drm_connector *connector;
234 int ret; 304 int ret;
235 305
@@ -243,10 +313,7 @@ nouveau_display_init(struct drm_device *dev)
243 /* enable hotplug interrupts */ 313 /* enable hotplug interrupts */
244 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 314 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
245 struct nouveau_connector *conn = nouveau_connector(connector); 315 struct nouveau_connector *conn = nouveau_connector(connector);
246 if (gpio && conn->hpd.func != DCB_GPIO_UNUSED) { 316 if (conn->hpd_func) nouveau_event_get(conn->hpd_func);
247 nouveau_event_get(gpio->events, conn->hpd.line,
248 &conn->hpd_func);
249 }
250 } 317 }
251 318
252 return ret; 319 return ret;
@@ -255,18 +322,13 @@ nouveau_display_init(struct drm_device *dev)
255void 322void
256nouveau_display_fini(struct drm_device *dev) 323nouveau_display_fini(struct drm_device *dev)
257{ 324{
258 struct nouveau_drm *drm = nouveau_drm(dev);
259 struct nouveau_display *disp = nouveau_display(dev); 325 struct nouveau_display *disp = nouveau_display(dev);
260 struct nouveau_gpio *gpio = nouveau_gpio(drm->device);
261 struct drm_connector *connector; 326 struct drm_connector *connector;
262 327
263 /* disable hotplug interrupts */ 328 /* disable hotplug interrupts */
264 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 329 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
265 struct nouveau_connector *conn = nouveau_connector(connector); 330 struct nouveau_connector *conn = nouveau_connector(connector);
266 if (gpio && conn->hpd.func != DCB_GPIO_UNUSED) { 331 if (conn->hpd_func) nouveau_event_put(conn->hpd_func);
267 nouveau_event_put(gpio->events, conn->hpd.line,
268 &conn->hpd_func);
269 }
270 } 332 }
271 333
272 drm_kms_helper_poll_disable(dev); 334 drm_kms_helper_poll_disable(dev);
@@ -336,6 +398,11 @@ nouveau_display_create(struct drm_device *dev)
336 dev->mode_config.preferred_depth = 24; 398 dev->mode_config.preferred_depth = 24;
337 dev->mode_config.prefer_shadow = 1; 399 dev->mode_config.prefer_shadow = 1;
338 400
401 if (nv_device(drm->device)->chipset < 0x11)
402 dev->mode_config.async_page_flip = false;
403 else
404 dev->mode_config.async_page_flip = true;
405
339 drm_kms_helper_poll_init(dev); 406 drm_kms_helper_poll_init(dev);
340 drm_kms_helper_poll_disable(dev); 407 drm_kms_helper_poll_disable(dev);
341 408
@@ -352,7 +419,7 @@ nouveau_display_create(struct drm_device *dev)
352 goto disp_create_err; 419 goto disp_create_err;
353 420
354 if (dev->mode_config.num_crtc) { 421 if (dev->mode_config.num_crtc) {
355 ret = drm_vblank_init(dev, dev->mode_config.num_crtc); 422 ret = nouveau_display_vblank_init(dev);
356 if (ret) 423 if (ret)
357 goto vblank_err; 424 goto vblank_err;
358 } 425 }
@@ -374,7 +441,7 @@ nouveau_display_destroy(struct drm_device *dev)
374 struct nouveau_display *disp = nouveau_display(dev); 441 struct nouveau_display *disp = nouveau_display(dev);
375 442
376 nouveau_backlight_exit(dev); 443 nouveau_backlight_exit(dev);
377 drm_vblank_cleanup(dev); 444 nouveau_display_vblank_fini(dev);
378 445
379 drm_kms_helper_poll_fini(dev); 446 drm_kms_helper_poll_fini(dev);
380 drm_mode_config_cleanup(dev); 447 drm_mode_config_cleanup(dev);
@@ -394,7 +461,7 @@ nouveau_display_suspend(struct drm_device *dev)
394 461
395 nouveau_display_fini(dev); 462 nouveau_display_fini(dev);
396 463
397 NV_SUSPEND(drm, "unpinning framebuffer(s)...\n"); 464 NV_INFO(drm, "unpinning framebuffer(s)...\n");
398 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 465 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
399 struct nouveau_framebuffer *nouveau_fb; 466 struct nouveau_framebuffer *nouveau_fb;
400 467
@@ -492,19 +559,15 @@ nouveau_page_flip_emit(struct nouveau_channel *chan,
492 goto fail; 559 goto fail;
493 560
494 /* Emit the pageflip */ 561 /* Emit the pageflip */
495 ret = RING_SPACE(chan, 3); 562 ret = RING_SPACE(chan, 2);
496 if (ret) 563 if (ret)
497 goto fail; 564 goto fail;
498 565
499 if (nv_device(drm->device)->card_type < NV_C0) { 566 if (nv_device(drm->device)->card_type < NV_C0)
500 BEGIN_NV04(chan, NvSubSw, NV_SW_PAGE_FLIP, 1); 567 BEGIN_NV04(chan, NvSubSw, NV_SW_PAGE_FLIP, 1);
501 OUT_RING (chan, 0x00000000); 568 else
502 OUT_RING (chan, 0x00000000); 569 BEGIN_NVC0(chan, FermiSw, NV_SW_PAGE_FLIP, 1);
503 } else { 570 OUT_RING (chan, 0x00000000);
504 BEGIN_NVC0(chan, 0, NV10_SUBCHAN_REF_CNT, 1);
505 OUT_RING (chan, 0);
506 BEGIN_IMC0(chan, 0, NVSW_SUBCHAN_PAGE_FLIP, 0x0000);
507 }
508 FIRE_RING (chan); 571 FIRE_RING (chan);
509 572
510 ret = nouveau_fence_new(chan, false, pfence); 573 ret = nouveau_fence_new(chan, false, pfence);
@@ -521,22 +584,16 @@ fail:
521 584
522int 585int
523nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, 586nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
524 struct drm_pending_vblank_event *event, 587 struct drm_pending_vblank_event *event, u32 flags)
525 uint32_t page_flip_flags)
526{ 588{
589 const int swap_interval = (flags & DRM_MODE_PAGE_FLIP_ASYNC) ? 0 : 1;
527 struct drm_device *dev = crtc->dev; 590 struct drm_device *dev = crtc->dev;
528 struct nouveau_drm *drm = nouveau_drm(dev); 591 struct nouveau_drm *drm = nouveau_drm(dev);
529 struct nouveau_bo *old_bo = nouveau_framebuffer(crtc->fb)->nvbo; 592 struct nouveau_bo *old_bo = nouveau_framebuffer(crtc->fb)->nvbo;
530 struct nouveau_bo *new_bo = nouveau_framebuffer(fb)->nvbo; 593 struct nouveau_bo *new_bo = nouveau_framebuffer(fb)->nvbo;
531 struct nouveau_page_flip_state *s; 594 struct nouveau_page_flip_state *s;
532 struct nouveau_channel *chan = NULL; 595 struct nouveau_channel *chan = drm->channel;
533 struct nouveau_fence *fence; 596 struct nouveau_fence *fence;
534 struct ttm_validate_buffer resv[2] = {
535 { .bo = &old_bo->bo },
536 { .bo = &new_bo->bo },
537 };
538 struct ww_acquire_ctx ticket;
539 LIST_HEAD(res);
540 int ret; 597 int ret;
541 598
542 if (!drm->channel) 599 if (!drm->channel)
@@ -546,26 +603,22 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
546 if (!s) 603 if (!s)
547 return -ENOMEM; 604 return -ENOMEM;
548 605
549 /* Choose the channel the flip will be handled in */ 606 /* synchronise rendering channel with the kernel's channel */
550 spin_lock(&old_bo->bo.bdev->fence_lock); 607 spin_lock(&new_bo->bo.bdev->fence_lock);
551 fence = new_bo->bo.sync_obj; 608 fence = nouveau_fence_ref(new_bo->bo.sync_obj);
552 if (fence) 609 spin_unlock(&new_bo->bo.bdev->fence_lock);
553 chan = fence->channel; 610 ret = nouveau_fence_sync(fence, chan);
554 if (!chan) 611 if (ret)
555 chan = drm->channel; 612 return ret;
556 spin_unlock(&old_bo->bo.bdev->fence_lock);
557 613
558 if (new_bo != old_bo) { 614 if (new_bo != old_bo) {
559 ret = nouveau_bo_pin(new_bo, TTM_PL_FLAG_VRAM); 615 ret = nouveau_bo_pin(new_bo, TTM_PL_FLAG_VRAM);
560 if (ret) 616 if (ret)
561 goto fail_free; 617 goto fail_free;
562
563 list_add(&resv[1].head, &res);
564 } 618 }
565 list_add(&resv[0].head, &res);
566 619
567 mutex_lock(&chan->cli->mutex); 620 mutex_lock(&chan->cli->mutex);
568 ret = ttm_eu_reserve_buffers(&ticket, &res); 621 ret = ttm_bo_reserve(&old_bo->bo, true, false, false, NULL);
569 if (ret) 622 if (ret)
570 goto fail_unpin; 623 goto fail_unpin;
571 624
@@ -577,12 +630,29 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
577 630
578 /* Emit a page flip */ 631 /* Emit a page flip */
579 if (nv_device(drm->device)->card_type >= NV_50) { 632 if (nv_device(drm->device)->card_type >= NV_50) {
580 ret = nv50_display_flip_next(crtc, fb, chan, 0); 633 ret = nv50_display_flip_next(crtc, fb, chan, swap_interval);
581 if (ret) 634 if (ret)
582 goto fail_unreserve; 635 goto fail_unreserve;
583 } else { 636 } else {
584 struct nv04_display *dispnv04 = nv04_display(dev); 637 struct nv04_display *dispnv04 = nv04_display(dev);
585 nouveau_bo_ref(new_bo, &dispnv04->image[nouveau_crtc(crtc)->index]); 638 int head = nouveau_crtc(crtc)->index;
639
640 if (swap_interval) {
641 ret = RING_SPACE(chan, 8);
642 if (ret)
643 goto fail_unreserve;
644
645 BEGIN_NV04(chan, NvSubImageBlit, 0x012c, 1);
646 OUT_RING (chan, 0);
647 BEGIN_NV04(chan, NvSubImageBlit, 0x0134, 1);
648 OUT_RING (chan, head);
649 BEGIN_NV04(chan, NvSubImageBlit, 0x0100, 1);
650 OUT_RING (chan, 0);
651 BEGIN_NV04(chan, NvSubImageBlit, 0x0130, 1);
652 OUT_RING (chan, 0);
653 }
654
655 nouveau_bo_ref(new_bo, &dispnv04->image[head]);
586 } 656 }
587 657
588 ret = nouveau_page_flip_emit(chan, old_bo, new_bo, s, &fence); 658 ret = nouveau_page_flip_emit(chan, old_bo, new_bo, s, &fence);
@@ -593,14 +663,15 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
593 /* Update the crtc struct and cleanup */ 663 /* Update the crtc struct and cleanup */
594 crtc->fb = fb; 664 crtc->fb = fb;
595 665
596 ttm_eu_fence_buffer_objects(&ticket, &res, fence); 666 nouveau_bo_fence(old_bo, fence);
667 ttm_bo_unreserve(&old_bo->bo);
597 if (old_bo != new_bo) 668 if (old_bo != new_bo)
598 nouveau_bo_unpin(old_bo); 669 nouveau_bo_unpin(old_bo);
599 nouveau_fence_unref(&fence); 670 nouveau_fence_unref(&fence);
600 return 0; 671 return 0;
601 672
602fail_unreserve: 673fail_unreserve:
603 ttm_eu_backoff_reservation(&ticket, &res); 674 ttm_bo_unreserve(&old_bo->bo);
604fail_unpin: 675fail_unpin:
605 mutex_unlock(&chan->cli->mutex); 676 mutex_unlock(&chan->cli->mutex);
606 if (old_bo != new_bo) 677 if (old_bo != new_bo)
@@ -674,8 +745,8 @@ nouveau_display_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
674 if (ret) 745 if (ret)
675 return ret; 746 return ret;
676 747
677 ret = drm_gem_handle_create(file_priv, bo->gem, &args->handle); 748 ret = drm_gem_handle_create(file_priv, &bo->gem, &args->handle);
678 drm_gem_object_unreference_unlocked(bo->gem); 749 drm_gem_object_unreference_unlocked(&bo->gem);
679 return ret; 750 return ret;
680} 751}
681 752
@@ -688,7 +759,7 @@ nouveau_display_dumb_map_offset(struct drm_file *file_priv,
688 759
689 gem = drm_gem_object_lookup(dev, file_priv, handle); 760 gem = drm_gem_object_lookup(dev, file_priv, handle);
690 if (gem) { 761 if (gem) {
691 struct nouveau_bo *bo = gem->driver_private; 762 struct nouveau_bo *bo = nouveau_gem_object(gem);
692 *poffset = drm_vma_node_offset_addr(&bo->bo.vma_node); 763 *poffset = drm_vma_node_offset_addr(&bo->bo.vma_node);
693 drm_gem_object_unreference_unlocked(gem); 764 drm_gem_object_unreference_unlocked(gem);
694 return 0; 765 return 0;
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.h b/drivers/gpu/drm/nouveau/nouveau_display.h
index 025c66f8e0ed..8bc8bab90e8d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.h
+++ b/drivers/gpu/drm/nouveau/nouveau_display.h
@@ -36,6 +36,8 @@ struct nouveau_display {
36 int (*init)(struct drm_device *); 36 int (*init)(struct drm_device *);
37 void (*fini)(struct drm_device *); 37 void (*fini)(struct drm_device *);
38 38
39 struct nouveau_eventh **vblank;
40
39 struct drm_property *dithering_mode; 41 struct drm_property *dithering_mode;
40 struct drm_property *dithering_depth; 42 struct drm_property *dithering_depth;
41 struct drm_property *underscan_property; 43 struct drm_property *underscan_property;
@@ -59,6 +61,8 @@ void nouveau_display_fini(struct drm_device *dev);
59int nouveau_display_suspend(struct drm_device *dev); 61int nouveau_display_suspend(struct drm_device *dev);
60void nouveau_display_repin(struct drm_device *dev); 62void nouveau_display_repin(struct drm_device *dev);
61void nouveau_display_resume(struct drm_device *dev); 63void nouveau_display_resume(struct drm_device *dev);
64int nouveau_display_vblank_enable(struct drm_device *, int);
65void nouveau_display_vblank_disable(struct drm_device *, int);
62 66
63int nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, 67int nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
64 struct drm_pending_vblank_event *event, 68 struct drm_pending_vblank_event *event,
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.h b/drivers/gpu/drm/nouveau/nouveau_dma.h
index 690d5930ce32..984004d66a6d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.h
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.h
@@ -51,9 +51,11 @@ enum {
51 NvSubCtxSurf2D = 0, 51 NvSubCtxSurf2D = 0,
52 NvSubSw = 1, 52 NvSubSw = 1,
53 NvSubImageBlit = 2, 53 NvSubImageBlit = 2,
54 NvSub2D = 3,
55 NvSubGdiRect = 3, 54 NvSubGdiRect = 3,
56 NvSubCopy = 4, 55
56 NvSub2D = 3, /* DO NOT CHANGE - hardcoded for kepler gr fifo */
57 NvSubCopy = 4, /* DO NOT CHANGE - hardcoded for kepler gr fifo */
58 FermiSw = 5, /* DO NOT CHANGE (well.. 6/7 will work...) */
57}; 59};
58 60
59/* Object handles. */ 61/* Object handles. */
@@ -194,7 +196,6 @@ WIND_RING(struct nouveau_channel *chan)
194#define NV84_SUBCHAN_UEVENT 0x00000020 196#define NV84_SUBCHAN_UEVENT 0x00000020
195#define NV84_SUBCHAN_WRCACHE_FLUSH 0x00000024 197#define NV84_SUBCHAN_WRCACHE_FLUSH 0x00000024
196#define NV10_SUBCHAN_REF_CNT 0x00000050 198#define NV10_SUBCHAN_REF_CNT 0x00000050
197#define NVSW_SUBCHAN_PAGE_FLIP 0x00000054
198#define NV11_SUBCHAN_DMA_SEMAPHORE 0x00000060 199#define NV11_SUBCHAN_DMA_SEMAPHORE 0x00000060
199#define NV11_SUBCHAN_SEMAPHORE_OFFSET 0x00000064 200#define NV11_SUBCHAN_SEMAPHORE_OFFSET 0x00000064
200#define NV11_SUBCHAN_SEMAPHORE_ACQUIRE 0x00000068 201#define NV11_SUBCHAN_SEMAPHORE_ACQUIRE 0x00000068
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index e893c5362402..7a3759f1c41a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -37,6 +37,7 @@
37#include <engine/device.h> 37#include <engine/device.h>
38#include <engine/disp.h> 38#include <engine/disp.h>
39#include <engine/fifo.h> 39#include <engine/fifo.h>
40#include <engine/software.h>
40 41
41#include <subdev/vm.h> 42#include <subdev/vm.h>
42 43
@@ -46,7 +47,8 @@
46#include "nouveau_gem.h" 47#include "nouveau_gem.h"
47#include "nouveau_agp.h" 48#include "nouveau_agp.h"
48#include "nouveau_vga.h" 49#include "nouveau_vga.h"
49#include "nouveau_pm.h" 50#include "nouveau_sysfs.h"
51#include "nouveau_hwmon.h"
50#include "nouveau_acpi.h" 52#include "nouveau_acpi.h"
51#include "nouveau_bios.h" 53#include "nouveau_bios.h"
52#include "nouveau_ioctl.h" 54#include "nouveau_ioctl.h"
@@ -78,41 +80,6 @@ module_param_named(runpm, nouveau_runtime_pm, int, 0400);
78 80
79static struct drm_driver driver; 81static struct drm_driver driver;
80 82
81static int
82nouveau_drm_vblank_handler(struct nouveau_eventh *event, int head)
83{
84 struct nouveau_drm *drm =
85 container_of(event, struct nouveau_drm, vblank[head]);
86 drm_handle_vblank(drm->dev, head);
87 return NVKM_EVENT_KEEP;
88}
89
90static int
91nouveau_drm_vblank_enable(struct drm_device *dev, int head)
92{
93 struct nouveau_drm *drm = nouveau_drm(dev);
94 struct nouveau_disp *pdisp = nouveau_disp(drm->device);
95
96 if (WARN_ON_ONCE(head > ARRAY_SIZE(drm->vblank)))
97 return -EIO;
98 WARN_ON_ONCE(drm->vblank[head].func);
99 drm->vblank[head].func = nouveau_drm_vblank_handler;
100 nouveau_event_get(pdisp->vblank, head, &drm->vblank[head]);
101 return 0;
102}
103
104static void
105nouveau_drm_vblank_disable(struct drm_device *dev, int head)
106{
107 struct nouveau_drm *drm = nouveau_drm(dev);
108 struct nouveau_disp *pdisp = nouveau_disp(drm->device);
109 if (drm->vblank[head].func)
110 nouveau_event_put(pdisp->vblank, head, &drm->vblank[head]);
111 else
112 WARN_ON_ONCE(1);
113 drm->vblank[head].func = NULL;
114}
115
116static u64 83static u64
117nouveau_name(struct pci_dev *pdev) 84nouveau_name(struct pci_dev *pdev)
118{ 85{
@@ -177,7 +144,8 @@ nouveau_accel_init(struct nouveau_drm *drm)
177 144
178 /* initialise synchronisation routines */ 145 /* initialise synchronisation routines */
179 if (device->card_type < NV_10) ret = nv04_fence_create(drm); 146 if (device->card_type < NV_10) ret = nv04_fence_create(drm);
180 else if (device->chipset < 0x17) ret = nv10_fence_create(drm); 147 else if (device->card_type < NV_11 ||
148 device->chipset < 0x17) ret = nv10_fence_create(drm);
181 else if (device->card_type < NV_50) ret = nv17_fence_create(drm); 149 else if (device->card_type < NV_50) ret = nv17_fence_create(drm);
182 else if (device->chipset < 0x84) ret = nv50_fence_create(drm); 150 else if (device->chipset < 0x84) ret = nv50_fence_create(drm);
183 else if (device->card_type < NV_C0) ret = nv84_fence_create(drm); 151 else if (device->card_type < NV_C0) ret = nv84_fence_create(drm);
@@ -224,6 +192,32 @@ nouveau_accel_init(struct nouveau_drm *drm)
224 return; 192 return;
225 } 193 }
226 194
195 ret = nouveau_object_new(nv_object(drm), NVDRM_CHAN, NVDRM_NVSW,
196 nouveau_abi16_swclass(drm), NULL, 0, &object);
197 if (ret == 0) {
198 struct nouveau_software_chan *swch = (void *)object->parent;
199 ret = RING_SPACE(drm->channel, 2);
200 if (ret == 0) {
201 if (device->card_type < NV_C0) {
202 BEGIN_NV04(drm->channel, NvSubSw, 0, 1);
203 OUT_RING (drm->channel, NVDRM_NVSW);
204 } else
205 if (device->card_type < NV_E0) {
206 BEGIN_NVC0(drm->channel, FermiSw, 0, 1);
207 OUT_RING (drm->channel, 0x001f0000);
208 }
209 }
210 swch = (void *)object->parent;
211 swch->flip = nouveau_flip_complete;
212 swch->flip_data = drm->channel;
213 }
214
215 if (ret) {
216 NV_ERROR(drm, "failed to allocate software object, %d\n", ret);
217 nouveau_accel_fini(drm);
218 return;
219 }
220
227 if (device->card_type < NV_C0) { 221 if (device->card_type < NV_C0) {
228 ret = nouveau_gpuobj_new(drm->device, NULL, 32, 0, 0, 222 ret = nouveau_gpuobj_new(drm->device, NULL, 32, 0, 0,
229 &drm->notify); 223 &drm->notify);
@@ -418,8 +412,8 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
418 goto fail_dispinit; 412 goto fail_dispinit;
419 } 413 }
420 414
421 nouveau_pm_init(dev); 415 nouveau_sysfs_init(dev);
422 416 nouveau_hwmon_init(dev);
423 nouveau_accel_init(drm); 417 nouveau_accel_init(drm);
424 nouveau_fbcon_init(dev); 418 nouveau_fbcon_init(dev);
425 419
@@ -455,8 +449,8 @@ nouveau_drm_unload(struct drm_device *dev)
455 pm_runtime_get_sync(dev->dev); 449 pm_runtime_get_sync(dev->dev);
456 nouveau_fbcon_fini(dev); 450 nouveau_fbcon_fini(dev);
457 nouveau_accel_fini(drm); 451 nouveau_accel_fini(drm);
458 452 nouveau_hwmon_fini(dev);
459 nouveau_pm_fini(dev); 453 nouveau_sysfs_fini(dev);
460 454
461 if (dev->mode_config.num_crtc) 455 if (dev->mode_config.num_crtc)
462 nouveau_display_fini(dev); 456 nouveau_display_fini(dev);
@@ -496,16 +490,16 @@ nouveau_do_suspend(struct drm_device *dev)
496 int ret; 490 int ret;
497 491
498 if (dev->mode_config.num_crtc) { 492 if (dev->mode_config.num_crtc) {
499 NV_SUSPEND(drm, "suspending display...\n"); 493 NV_INFO(drm, "suspending display...\n");
500 ret = nouveau_display_suspend(dev); 494 ret = nouveau_display_suspend(dev);
501 if (ret) 495 if (ret)
502 return ret; 496 return ret;
503 } 497 }
504 498
505 NV_SUSPEND(drm, "evicting buffers...\n"); 499 NV_INFO(drm, "evicting buffers...\n");
506 ttm_bo_evict_mm(&drm->ttm.bdev, TTM_PL_VRAM); 500 ttm_bo_evict_mm(&drm->ttm.bdev, TTM_PL_VRAM);
507 501
508 NV_SUSPEND(drm, "waiting for kernel channels to go idle...\n"); 502 NV_INFO(drm, "waiting for kernel channels to go idle...\n");
509 if (drm->cechan) { 503 if (drm->cechan) {
510 ret = nouveau_channel_idle(drm->cechan); 504 ret = nouveau_channel_idle(drm->cechan);
511 if (ret) 505 if (ret)
@@ -518,7 +512,7 @@ nouveau_do_suspend(struct drm_device *dev)
518 return ret; 512 return ret;
519 } 513 }
520 514
521 NV_SUSPEND(drm, "suspending client object trees...\n"); 515 NV_INFO(drm, "suspending client object trees...\n");
522 if (drm->fence && nouveau_fence(drm)->suspend) { 516 if (drm->fence && nouveau_fence(drm)->suspend) {
523 if (!nouveau_fence(drm)->suspend(drm)) 517 if (!nouveau_fence(drm)->suspend(drm))
524 return -ENOMEM; 518 return -ENOMEM;
@@ -530,7 +524,7 @@ nouveau_do_suspend(struct drm_device *dev)
530 goto fail_client; 524 goto fail_client;
531 } 525 }
532 526
533 NV_SUSPEND(drm, "suspending kernel object tree...\n"); 527 NV_INFO(drm, "suspending kernel object tree...\n");
534 ret = nouveau_client_fini(&drm->client.base, true); 528 ret = nouveau_client_fini(&drm->client.base, true);
535 if (ret) 529 if (ret)
536 goto fail_client; 530 goto fail_client;
@@ -544,7 +538,7 @@ fail_client:
544 } 538 }
545 539
546 if (dev->mode_config.num_crtc) { 540 if (dev->mode_config.num_crtc) {
547 NV_SUSPEND(drm, "resuming display...\n"); 541 NV_INFO(drm, "resuming display...\n");
548 nouveau_display_resume(dev); 542 nouveau_display_resume(dev);
549 } 543 }
550 return ret; 544 return ret;
@@ -563,7 +557,6 @@ int nouveau_pmops_suspend(struct device *dev)
563 if (drm_dev->mode_config.num_crtc) 557 if (drm_dev->mode_config.num_crtc)
564 nouveau_fbcon_set_suspend(drm_dev, 1); 558 nouveau_fbcon_set_suspend(drm_dev, 1);
565 559
566 nv_suspend_set_printk_level(NV_DBG_INFO);
567 ret = nouveau_do_suspend(drm_dev); 560 ret = nouveau_do_suspend(drm_dev);
568 if (ret) 561 if (ret)
569 return ret; 562 return ret;
@@ -571,8 +564,6 @@ int nouveau_pmops_suspend(struct device *dev)
571 pci_save_state(pdev); 564 pci_save_state(pdev);
572 pci_disable_device(pdev); 565 pci_disable_device(pdev);
573 pci_set_power_state(pdev, PCI_D3hot); 566 pci_set_power_state(pdev, PCI_D3hot);
574 nv_suspend_set_printk_level(NV_DBG_DEBUG);
575
576 return 0; 567 return 0;
577} 568}
578 569
@@ -582,15 +573,15 @@ nouveau_do_resume(struct drm_device *dev)
582 struct nouveau_drm *drm = nouveau_drm(dev); 573 struct nouveau_drm *drm = nouveau_drm(dev);
583 struct nouveau_cli *cli; 574 struct nouveau_cli *cli;
584 575
585 NV_SUSPEND(drm, "re-enabling device...\n"); 576 NV_INFO(drm, "re-enabling device...\n");
586 577
587 nouveau_agp_reset(drm); 578 nouveau_agp_reset(drm);
588 579
589 NV_SUSPEND(drm, "resuming kernel object tree...\n"); 580 NV_INFO(drm, "resuming kernel object tree...\n");
590 nouveau_client_init(&drm->client.base); 581 nouveau_client_init(&drm->client.base);
591 nouveau_agp_init(drm); 582 nouveau_agp_init(drm);
592 583
593 NV_SUSPEND(drm, "resuming client object trees...\n"); 584 NV_INFO(drm, "resuming client object trees...\n");
594 if (drm->fence && nouveau_fence(drm)->resume) 585 if (drm->fence && nouveau_fence(drm)->resume)
595 nouveau_fence(drm)->resume(drm); 586 nouveau_fence(drm)->resume(drm);
596 587
@@ -599,10 +590,9 @@ nouveau_do_resume(struct drm_device *dev)
599 } 590 }
600 591
601 nouveau_run_vbios_init(dev); 592 nouveau_run_vbios_init(dev);
602 nouveau_pm_resume(dev);
603 593
604 if (dev->mode_config.num_crtc) { 594 if (dev->mode_config.num_crtc) {
605 NV_SUSPEND(drm, "resuming display...\n"); 595 NV_INFO(drm, "resuming display...\n");
606 nouveau_display_repin(dev); 596 nouveau_display_repin(dev);
607 } 597 }
608 598
@@ -626,19 +616,15 @@ int nouveau_pmops_resume(struct device *dev)
626 return ret; 616 return ret;
627 pci_set_master(pdev); 617 pci_set_master(pdev);
628 618
629 nv_suspend_set_printk_level(NV_DBG_INFO);
630 ret = nouveau_do_resume(drm_dev); 619 ret = nouveau_do_resume(drm_dev);
631 if (ret) { 620 if (ret)
632 nv_suspend_set_printk_level(NV_DBG_DEBUG);
633 return ret; 621 return ret;
634 }
635 if (drm_dev->mode_config.num_crtc) 622 if (drm_dev->mode_config.num_crtc)
636 nouveau_fbcon_set_suspend(drm_dev, 0); 623 nouveau_fbcon_set_suspend(drm_dev, 0);
637 624
638 nouveau_fbcon_zfill_all(drm_dev); 625 nouveau_fbcon_zfill_all(drm_dev);
639 if (drm_dev->mode_config.num_crtc) 626 if (drm_dev->mode_config.num_crtc)
640 nouveau_display_resume(drm_dev); 627 nouveau_display_resume(drm_dev);
641 nv_suspend_set_printk_level(NV_DBG_DEBUG);
642 return 0; 628 return 0;
643} 629}
644 630
@@ -648,12 +634,10 @@ static int nouveau_pmops_freeze(struct device *dev)
648 struct drm_device *drm_dev = pci_get_drvdata(pdev); 634 struct drm_device *drm_dev = pci_get_drvdata(pdev);
649 int ret; 635 int ret;
650 636
651 nv_suspend_set_printk_level(NV_DBG_INFO);
652 if (drm_dev->mode_config.num_crtc) 637 if (drm_dev->mode_config.num_crtc)
653 nouveau_fbcon_set_suspend(drm_dev, 1); 638 nouveau_fbcon_set_suspend(drm_dev, 1);
654 639
655 ret = nouveau_do_suspend(drm_dev); 640 ret = nouveau_do_suspend(drm_dev);
656 nv_suspend_set_printk_level(NV_DBG_DEBUG);
657 return ret; 641 return ret;
658} 642}
659 643
@@ -663,18 +647,14 @@ static int nouveau_pmops_thaw(struct device *dev)
663 struct drm_device *drm_dev = pci_get_drvdata(pdev); 647 struct drm_device *drm_dev = pci_get_drvdata(pdev);
664 int ret; 648 int ret;
665 649
666 nv_suspend_set_printk_level(NV_DBG_INFO);
667 ret = nouveau_do_resume(drm_dev); 650 ret = nouveau_do_resume(drm_dev);
668 if (ret) { 651 if (ret)
669 nv_suspend_set_printk_level(NV_DBG_DEBUG);
670 return ret; 652 return ret;
671 }
672 if (drm_dev->mode_config.num_crtc) 653 if (drm_dev->mode_config.num_crtc)
673 nouveau_fbcon_set_suspend(drm_dev, 0); 654 nouveau_fbcon_set_suspend(drm_dev, 0);
674 nouveau_fbcon_zfill_all(drm_dev); 655 nouveau_fbcon_zfill_all(drm_dev);
675 if (drm_dev->mode_config.num_crtc) 656 if (drm_dev->mode_config.num_crtc)
676 nouveau_display_resume(drm_dev); 657 nouveau_display_resume(drm_dev);
677 nv_suspend_set_printk_level(NV_DBG_DEBUG);
678 return 0; 658 return 0;
679} 659}
680 660
@@ -816,8 +796,8 @@ driver = {
816#endif 796#endif
817 797
818 .get_vblank_counter = drm_vblank_count, 798 .get_vblank_counter = drm_vblank_count,
819 .enable_vblank = nouveau_drm_vblank_enable, 799 .enable_vblank = nouveau_display_vblank_enable,
820 .disable_vblank = nouveau_drm_vblank_disable, 800 .disable_vblank = nouveau_display_vblank_disable,
821 801
822 .ioctls = nouveau_ioctls, 802 .ioctls = nouveau_ioctls,
823 .num_ioctls = ARRAY_SIZE(nouveau_ioctls), 803 .num_ioctls = ARRAY_SIZE(nouveau_ioctls),
@@ -834,7 +814,6 @@ driver = {
834 .gem_prime_vmap = nouveau_gem_prime_vmap, 814 .gem_prime_vmap = nouveau_gem_prime_vmap,
835 .gem_prime_vunmap = nouveau_gem_prime_vunmap, 815 .gem_prime_vunmap = nouveau_gem_prime_vunmap,
836 816
837 .gem_init_object = nouveau_gem_object_new,
838 .gem_free_object = nouveau_gem_object_del, 817 .gem_free_object = nouveau_gem_object_del,
839 .gem_open_object = nouveau_gem_object_open, 818 .gem_open_object = nouveau_gem_object_open,
840 .gem_close_object = nouveau_gem_object_close, 819 .gem_close_object = nouveau_gem_object_close,
@@ -879,6 +858,7 @@ static int nouveau_pmops_runtime_suspend(struct device *dev)
879 if (nouveau_runtime_pm == 0) 858 if (nouveau_runtime_pm == 0)
880 return -EINVAL; 859 return -EINVAL;
881 860
861 nv_debug_level(SILENT);
882 drm_kms_helper_poll_disable(drm_dev); 862 drm_kms_helper_poll_disable(drm_dev);
883 vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_OFF); 863 vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_OFF);
884 nouveau_switcheroo_optimus_dsm(); 864 nouveau_switcheroo_optimus_dsm();
@@ -915,6 +895,7 @@ static int nouveau_pmops_runtime_resume(struct device *dev)
915 nv_mask(device, 0x88488, (1 << 25), (1 << 25)); 895 nv_mask(device, 0x88488, (1 << 25), (1 << 25));
916 vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON); 896 vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON);
917 drm_dev->switch_power_state = DRM_SWITCH_POWER_ON; 897 drm_dev->switch_power_state = DRM_SWITCH_POWER_ON;
898 nv_debug_level(NORMAL);
918 return ret; 899 return ret;
919} 900}
920 901
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h
index 994fd6ec373b..4b0fb6c66be9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.h
@@ -51,10 +51,12 @@ struct nouveau_drm_tile {
51}; 51};
52 52
53enum nouveau_drm_handle { 53enum nouveau_drm_handle {
54 NVDRM_CLIENT = 0xffffffff, 54 NVDRM_CLIENT = 0xffffffff,
55 NVDRM_DEVICE = 0xdddddddd, 55 NVDRM_DEVICE = 0xdddddddd,
56 NVDRM_PUSH = 0xbbbb0000, /* |= client chid */ 56 NVDRM_CONTROL = 0xdddddddc,
57 NVDRM_CHAN = 0xcccc0000, /* |= client chid */ 57 NVDRM_PUSH = 0xbbbb0000, /* |= client chid */
58 NVDRM_CHAN = 0xcccc0000, /* |= client chid */
59 NVDRM_NVSW = 0x55550000,
58}; 60};
59 61
60struct nouveau_cli { 62struct nouveau_cli {
@@ -127,10 +129,10 @@ struct nouveau_drm {
127 struct nvbios vbios; 129 struct nvbios vbios;
128 struct nouveau_display *display; 130 struct nouveau_display *display;
129 struct backlight_device *backlight; 131 struct backlight_device *backlight;
130 struct nouveau_eventh vblank[4];
131 132
132 /* power management */ 133 /* power management */
133 struct nouveau_pm *pm; 134 struct nouveau_hwmon *hwmon;
135 struct nouveau_sysfs *sysfs;
134 136
135 /* display power reference */ 137 /* display power reference */
136 bool have_disp_power_ref; 138 bool have_disp_power_ref;
@@ -154,7 +156,6 @@ nouveau_dev(struct drm_device *dev)
154int nouveau_pmops_suspend(struct device *); 156int nouveau_pmops_suspend(struct device *);
155int nouveau_pmops_resume(struct device *); 157int nouveau_pmops_resume(struct device *);
156 158
157#define NV_SUSPEND(cli, fmt, args...) nv_suspend((cli), fmt, ##args)
158#define NV_FATAL(cli, fmt, args...) nv_fatal((cli), fmt, ##args) 159#define NV_FATAL(cli, fmt, args...) nv_fatal((cli), fmt, ##args)
159#define NV_ERROR(cli, fmt, args...) nv_error((cli), fmt, ##args) 160#define NV_ERROR(cli, fmt, args...) nv_error((cli), fmt, ##args)
160#define NV_WARN(cli, fmt, args...) nv_warn((cli), fmt, ##args) 161#define NV_WARN(cli, fmt, args...) nv_warn((cli), fmt, ##args)
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index a86ecf65c164..7903e0ed3c75 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -420,7 +420,7 @@ nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *fbcon)
420 nouveau_bo_unmap(nouveau_fb->nvbo); 420 nouveau_bo_unmap(nouveau_fb->nvbo);
421 nouveau_bo_vma_del(nouveau_fb->nvbo, &nouveau_fb->vma); 421 nouveau_bo_vma_del(nouveau_fb->nvbo, &nouveau_fb->vma);
422 nouveau_bo_unpin(nouveau_fb->nvbo); 422 nouveau_bo_unpin(nouveau_fb->nvbo);
423 drm_gem_object_unreference_unlocked(nouveau_fb->nvbo->gem); 423 drm_gem_object_unreference_unlocked(&nouveau_fb->nvbo->gem);
424 nouveau_fb->nvbo = NULL; 424 nouveau_fb->nvbo = NULL;
425 } 425 }
426 drm_fb_helper_fini(&fbcon->helper); 426 drm_fb_helper_fini(&fbcon->helper);
@@ -503,34 +503,45 @@ nouveau_fbcon_fini(struct drm_device *dev)
503 drm->fbcon = NULL; 503 drm->fbcon = NULL;
504} 504}
505 505
506void nouveau_fbcon_save_disable_accel(struct drm_device *dev) 506void
507nouveau_fbcon_save_disable_accel(struct drm_device *dev)
507{ 508{
508 struct nouveau_drm *drm = nouveau_drm(dev); 509 struct nouveau_drm *drm = nouveau_drm(dev);
509 510 if (drm->fbcon) {
510 drm->fbcon->saved_flags = drm->fbcon->helper.fbdev->flags; 511 drm->fbcon->saved_flags = drm->fbcon->helper.fbdev->flags;
511 drm->fbcon->helper.fbdev->flags |= FBINFO_HWACCEL_DISABLED; 512 drm->fbcon->helper.fbdev->flags |= FBINFO_HWACCEL_DISABLED;
513 }
512} 514}
513 515
514void nouveau_fbcon_restore_accel(struct drm_device *dev) 516void
517nouveau_fbcon_restore_accel(struct drm_device *dev)
515{ 518{
516 struct nouveau_drm *drm = nouveau_drm(dev); 519 struct nouveau_drm *drm = nouveau_drm(dev);
517 drm->fbcon->helper.fbdev->flags = drm->fbcon->saved_flags; 520 if (drm->fbcon) {
521 drm->fbcon->helper.fbdev->flags = drm->fbcon->saved_flags;
522 }
518} 523}
519 524
520void nouveau_fbcon_set_suspend(struct drm_device *dev, int state) 525void
526nouveau_fbcon_set_suspend(struct drm_device *dev, int state)
521{ 527{
522 struct nouveau_drm *drm = nouveau_drm(dev); 528 struct nouveau_drm *drm = nouveau_drm(dev);
523 console_lock(); 529 if (drm->fbcon) {
524 if (state == 0) 530 console_lock();
525 nouveau_fbcon_save_disable_accel(dev); 531 if (state == 0)
526 fb_set_suspend(drm->fbcon->helper.fbdev, state); 532 nouveau_fbcon_save_disable_accel(dev);
527 if (state == 1) 533 fb_set_suspend(drm->fbcon->helper.fbdev, state);
528 nouveau_fbcon_restore_accel(dev); 534 if (state == 1)
529 console_unlock(); 535 nouveau_fbcon_restore_accel(dev);
536 console_unlock();
537 }
530} 538}
531 539
532void nouveau_fbcon_zfill_all(struct drm_device *dev) 540void
541nouveau_fbcon_zfill_all(struct drm_device *dev)
533{ 542{
534 struct nouveau_drm *drm = nouveau_drm(dev); 543 struct nouveau_drm *drm = nouveau_drm(dev);
535 nouveau_fbcon_zfill(dev, drm->fbcon); 544 if (drm->fbcon) {
545 nouveau_fbcon_zfill(dev, drm->fbcon);
546 }
536} 547}
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index be3149932c2d..40cf52e6d6d2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -165,17 +165,11 @@ nouveau_fence_done(struct nouveau_fence *fence)
165 return !fence->channel; 165 return !fence->channel;
166} 166}
167 167
168struct nouveau_fence_uevent {
169 struct nouveau_eventh handler;
170 struct nouveau_fence_priv *priv;
171};
172
173static int 168static int
174nouveau_fence_wait_uevent_handler(struct nouveau_eventh *event, int index) 169nouveau_fence_wait_uevent_handler(void *data, int index)
175{ 170{
176 struct nouveau_fence_uevent *uevent = 171 struct nouveau_fence_priv *priv = data;
177 container_of(event, struct nouveau_fence_uevent, handler); 172 wake_up_all(&priv->waiting);
178 wake_up_all(&uevent->priv->waiting);
179 return NVKM_EVENT_KEEP; 173 return NVKM_EVENT_KEEP;
180} 174}
181 175
@@ -186,13 +180,16 @@ nouveau_fence_wait_uevent(struct nouveau_fence *fence, bool intr)
186 struct nouveau_channel *chan = fence->channel; 180 struct nouveau_channel *chan = fence->channel;
187 struct nouveau_fifo *pfifo = nouveau_fifo(chan->drm->device); 181 struct nouveau_fifo *pfifo = nouveau_fifo(chan->drm->device);
188 struct nouveau_fence_priv *priv = chan->drm->fence; 182 struct nouveau_fence_priv *priv = chan->drm->fence;
189 struct nouveau_fence_uevent uevent = { 183 struct nouveau_eventh *handler;
190 .handler.func = nouveau_fence_wait_uevent_handler,
191 .priv = priv,
192 };
193 int ret = 0; 184 int ret = 0;
194 185
195 nouveau_event_get(pfifo->uevent, 0, &uevent.handler); 186 ret = nouveau_event_new(pfifo->uevent, 0,
187 nouveau_fence_wait_uevent_handler,
188 priv, &handler);
189 if (ret)
190 return ret;
191
192 nouveau_event_get(handler);
196 193
197 if (fence->timeout) { 194 if (fence->timeout) {
198 unsigned long timeout = fence->timeout - jiffies; 195 unsigned long timeout = fence->timeout - jiffies;
@@ -224,7 +221,7 @@ nouveau_fence_wait_uevent(struct nouveau_fence *fence, bool intr)
224 } 221 }
225 } 222 }
226 223
227 nouveau_event_put(pfifo->uevent, 0, &uevent.handler); 224 nouveau_event_ref(NULL, &handler);
228 if (unlikely(ret < 0)) 225 if (unlikely(ret < 0))
229 return ret; 226 return ret;
230 227
@@ -309,7 +306,8 @@ nouveau_fence_unref(struct nouveau_fence **pfence)
309struct nouveau_fence * 306struct nouveau_fence *
310nouveau_fence_ref(struct nouveau_fence *fence) 307nouveau_fence_ref(struct nouveau_fence *fence)
311{ 308{
312 kref_get(&fence->kref); 309 if (fence)
310 kref_get(&fence->kref);
313 return fence; 311 return fence;
314} 312}
315 313
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index f32b71238c03..78a27f8ad7d9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -34,29 +34,20 @@
34#include "nouveau_ttm.h" 34#include "nouveau_ttm.h"
35#include "nouveau_gem.h" 35#include "nouveau_gem.h"
36 36
37int
38nouveau_gem_object_new(struct drm_gem_object *gem)
39{
40 return 0;
41}
42
43void 37void
44nouveau_gem_object_del(struct drm_gem_object *gem) 38nouveau_gem_object_del(struct drm_gem_object *gem)
45{ 39{
46 struct nouveau_bo *nvbo = gem->driver_private; 40 struct nouveau_bo *nvbo = nouveau_gem_object(gem);
47 struct ttm_buffer_object *bo = &nvbo->bo; 41 struct ttm_buffer_object *bo = &nvbo->bo;
48 42
49 if (!nvbo)
50 return;
51 nvbo->gem = NULL;
52
53 if (gem->import_attach) 43 if (gem->import_attach)
54 drm_prime_gem_destroy(gem, nvbo->bo.sg); 44 drm_prime_gem_destroy(gem, nvbo->bo.sg);
55 45
56 ttm_bo_unref(&bo);
57
58 drm_gem_object_release(gem); 46 drm_gem_object_release(gem);
59 kfree(gem); 47
48 /* reset filp so nouveau_bo_del_ttm() can test for it */
49 gem->filp = NULL;
50 ttm_bo_unref(&bo);
60} 51}
61 52
62int 53int
@@ -115,8 +106,7 @@ nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nouveau_vma *vma)
115 106
116 if (mapped) { 107 if (mapped) {
117 spin_lock(&nvbo->bo.bdev->fence_lock); 108 spin_lock(&nvbo->bo.bdev->fence_lock);
118 if (nvbo->bo.sync_obj) 109 fence = nouveau_fence_ref(nvbo->bo.sync_obj);
119 fence = nouveau_fence_ref(nvbo->bo.sync_obj);
120 spin_unlock(&nvbo->bo.bdev->fence_lock); 110 spin_unlock(&nvbo->bo.bdev->fence_lock);
121 } 111 }
122 112
@@ -186,14 +176,15 @@ nouveau_gem_new(struct drm_device *dev, int size, int align, uint32_t domain,
186 if (nv_device(drm->device)->card_type >= NV_50) 176 if (nv_device(drm->device)->card_type >= NV_50)
187 nvbo->valid_domains &= domain; 177 nvbo->valid_domains &= domain;
188 178
189 nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size); 179 /* Initialize the embedded gem-object. We return a single gem-reference
190 if (!nvbo->gem) { 180 * to the caller, instead of a normal nouveau_bo ttm reference. */
181 ret = drm_gem_object_init(dev, &nvbo->gem, nvbo->bo.mem.size);
182 if (ret) {
191 nouveau_bo_ref(NULL, pnvbo); 183 nouveau_bo_ref(NULL, pnvbo);
192 return -ENOMEM; 184 return -ENOMEM;
193 } 185 }
194 186
195 nvbo->bo.persistent_swap_storage = nvbo->gem->filp; 187 nvbo->bo.persistent_swap_storage = nvbo->gem.filp;
196 nvbo->gem->driver_private = nvbo;
197 return 0; 188 return 0;
198} 189}
199 190
@@ -250,15 +241,15 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
250 if (ret) 241 if (ret)
251 return ret; 242 return ret;
252 243
253 ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle); 244 ret = drm_gem_handle_create(file_priv, &nvbo->gem, &req->info.handle);
254 if (ret == 0) { 245 if (ret == 0) {
255 ret = nouveau_gem_info(file_priv, nvbo->gem, &req->info); 246 ret = nouveau_gem_info(file_priv, &nvbo->gem, &req->info);
256 if (ret) 247 if (ret)
257 drm_gem_handle_delete(file_priv, req->info.handle); 248 drm_gem_handle_delete(file_priv, req->info.handle);
258 } 249 }
259 250
260 /* drop reference from allocate - handle holds it now */ 251 /* drop reference from allocate - handle holds it now */
261 drm_gem_object_unreference_unlocked(nvbo->gem); 252 drm_gem_object_unreference_unlocked(&nvbo->gem);
262 return ret; 253 return ret;
263} 254}
264 255
@@ -266,7 +257,7 @@ static int
266nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains, 257nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains,
267 uint32_t write_domains, uint32_t valid_domains) 258 uint32_t write_domains, uint32_t valid_domains)
268{ 259{
269 struct nouveau_bo *nvbo = gem->driver_private; 260 struct nouveau_bo *nvbo = nouveau_gem_object(gem);
270 struct ttm_buffer_object *bo = &nvbo->bo; 261 struct ttm_buffer_object *bo = &nvbo->bo;
271 uint32_t domains = valid_domains & nvbo->valid_domains & 262 uint32_t domains = valid_domains & nvbo->valid_domains &
272 (write_domains ? write_domains : read_domains); 263 (write_domains ? write_domains : read_domains);
@@ -317,7 +308,8 @@ validate_fini_list(struct list_head *list, struct nouveau_fence *fence,
317 list_for_each_safe(entry, tmp, list) { 308 list_for_each_safe(entry, tmp, list) {
318 nvbo = list_entry(entry, struct nouveau_bo, entry); 309 nvbo = list_entry(entry, struct nouveau_bo, entry);
319 310
320 nouveau_bo_fence(nvbo, fence); 311 if (likely(fence))
312 nouveau_bo_fence(nvbo, fence);
321 313
322 if (unlikely(nvbo->validate_mapped)) { 314 if (unlikely(nvbo->validate_mapped)) {
323 ttm_bo_kunmap(&nvbo->kmap); 315 ttm_bo_kunmap(&nvbo->kmap);
@@ -327,7 +319,7 @@ validate_fini_list(struct list_head *list, struct nouveau_fence *fence,
327 list_del(&nvbo->entry); 319 list_del(&nvbo->entry);
328 nvbo->reserved_by = NULL; 320 nvbo->reserved_by = NULL;
329 ttm_bo_unreserve_ticket(&nvbo->bo, ticket); 321 ttm_bo_unreserve_ticket(&nvbo->bo, ticket);
330 drm_gem_object_unreference_unlocked(nvbo->gem); 322 drm_gem_object_unreference_unlocked(&nvbo->gem);
331 } 323 }
332} 324}
333 325
@@ -376,7 +368,7 @@ retry:
376 validate_fini(op, NULL); 368 validate_fini(op, NULL);
377 return -ENOENT; 369 return -ENOENT;
378 } 370 }
379 nvbo = gem->driver_private; 371 nvbo = nouveau_gem_object(gem);
380 if (nvbo == res_bo) { 372 if (nvbo == res_bo) {
381 res_bo = NULL; 373 res_bo = NULL;
382 drm_gem_object_unreference_unlocked(gem); 374 drm_gem_object_unreference_unlocked(gem);
@@ -446,8 +438,7 @@ validate_sync(struct nouveau_channel *chan, struct nouveau_bo *nvbo)
446 int ret = 0; 438 int ret = 0;
447 439
448 spin_lock(&nvbo->bo.bdev->fence_lock); 440 spin_lock(&nvbo->bo.bdev->fence_lock);
449 if (nvbo->bo.sync_obj) 441 fence = nouveau_fence_ref(nvbo->bo.sync_obj);
450 fence = nouveau_fence_ref(nvbo->bo.sync_obj);
451 spin_unlock(&nvbo->bo.bdev->fence_lock); 442 spin_unlock(&nvbo->bo.bdev->fence_lock);
452 443
453 if (fence) { 444 if (fence) {
@@ -478,7 +469,7 @@ validate_list(struct nouveau_channel *chan, struct nouveau_cli *cli,
478 return ret; 469 return ret;
479 } 470 }
480 471
481 ret = nouveau_gem_set_domain(nvbo->gem, b->read_domains, 472 ret = nouveau_gem_set_domain(&nvbo->gem, b->read_domains,
482 b->write_domains, 473 b->write_domains,
483 b->valid_domains); 474 b->valid_domains);
484 if (unlikely(ret)) { 475 if (unlikely(ret)) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.h b/drivers/gpu/drm/nouveau/nouveau_gem.h
index 502e4290aa8f..7caca057bc38 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.h
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.h
@@ -12,14 +12,13 @@
12static inline struct nouveau_bo * 12static inline struct nouveau_bo *
13nouveau_gem_object(struct drm_gem_object *gem) 13nouveau_gem_object(struct drm_gem_object *gem)
14{ 14{
15 return gem ? gem->driver_private : NULL; 15 return gem ? container_of(gem, struct nouveau_bo, gem) : NULL;
16} 16}
17 17
18/* nouveau_gem.c */ 18/* nouveau_gem.c */
19extern int nouveau_gem_new(struct drm_device *, int size, int align, 19extern int nouveau_gem_new(struct drm_device *, int size, int align,
20 uint32_t domain, uint32_t tile_mode, 20 uint32_t domain, uint32_t tile_mode,
21 uint32_t tile_flags, struct nouveau_bo **); 21 uint32_t tile_flags, struct nouveau_bo **);
22extern int nouveau_gem_object_new(struct drm_gem_object *);
23extern void nouveau_gem_object_del(struct drm_gem_object *); 22extern void nouveau_gem_object_del(struct drm_gem_object *);
24extern int nouveau_gem_object_open(struct drm_gem_object *, struct drm_file *); 23extern int nouveau_gem_object_open(struct drm_gem_object *, struct drm_file *);
25extern void nouveau_gem_object_close(struct drm_gem_object *, 24extern void nouveau_gem_object_close(struct drm_gem_object *,
diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.c b/drivers/gpu/drm/nouveau/nouveau_hwmon.c
index 936b442a6ab7..38a4db5bfe21 100644
--- a/drivers/gpu/drm/nouveau/nouveau_pm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_hwmon.c
@@ -32,369 +32,12 @@
32#include <drm/drmP.h> 32#include <drm/drmP.h>
33 33
34#include "nouveau_drm.h" 34#include "nouveau_drm.h"
35#include "nouveau_pm.h" 35#include "nouveau_hwmon.h"
36 36
37#include <subdev/gpio.h> 37#include <subdev/gpio.h>
38#include <subdev/timer.h> 38#include <subdev/timer.h>
39#include <subdev/therm.h> 39#include <subdev/therm.h>
40 40
41MODULE_PARM_DESC(perflvl, "Performance level (default: boot)");
42static char *nouveau_perflvl;
43module_param_named(perflvl, nouveau_perflvl, charp, 0400);
44
45MODULE_PARM_DESC(perflvl_wr, "Allow perflvl changes (warning: dangerous!)");
46static int nouveau_perflvl_wr;
47module_param_named(perflvl_wr, nouveau_perflvl_wr, int, 0400);
48
49static int
50nouveau_pm_perflvl_aux(struct drm_device *dev, struct nouveau_pm_level *perflvl,
51 struct nouveau_pm_level *a, struct nouveau_pm_level *b)
52{
53 struct nouveau_drm *drm = nouveau_drm(dev);
54 struct nouveau_pm *pm = nouveau_pm(dev);
55 struct nouveau_therm *therm = nouveau_therm(drm->device);
56 int ret;
57
58 /*XXX: not on all boards, we should control based on temperature
59 * on recent boards.. or maybe on some other factor we don't
60 * know about?
61 */
62 if (therm && therm->fan_set &&
63 a->fanspeed && b->fanspeed && b->fanspeed > a->fanspeed) {
64 ret = therm->fan_set(therm, perflvl->fanspeed);
65 if (ret && ret != -ENODEV) {
66 NV_ERROR(drm, "fanspeed set failed: %d\n", ret);
67 }
68 }
69
70 if (pm->voltage.supported && pm->voltage_set) {
71 if (perflvl->volt_min && b->volt_min > a->volt_min) {
72 ret = pm->voltage_set(dev, perflvl->volt_min);
73 if (ret) {
74 NV_ERROR(drm, "voltage set failed: %d\n", ret);
75 return ret;
76 }
77 }
78 }
79
80 return 0;
81}
82
83static int
84nouveau_pm_perflvl_set(struct drm_device *dev, struct nouveau_pm_level *perflvl)
85{
86 struct nouveau_pm *pm = nouveau_pm(dev);
87 void *state;
88 int ret;
89
90 if (perflvl == pm->cur)
91 return 0;
92
93 ret = nouveau_pm_perflvl_aux(dev, perflvl, pm->cur, perflvl);
94 if (ret)
95 return ret;
96
97 state = pm->clocks_pre(dev, perflvl);
98 if (IS_ERR(state)) {
99 ret = PTR_ERR(state);
100 goto error;
101 }
102 ret = pm->clocks_set(dev, state);
103 if (ret)
104 goto error;
105
106 ret = nouveau_pm_perflvl_aux(dev, perflvl, perflvl, pm->cur);
107 if (ret)
108 return ret;
109
110 pm->cur = perflvl;
111 return 0;
112
113error:
114 /* restore the fan speed and voltage before leaving */
115 nouveau_pm_perflvl_aux(dev, perflvl, perflvl, pm->cur);
116 return ret;
117}
118
119void
120nouveau_pm_trigger(struct drm_device *dev)
121{
122 struct nouveau_drm *drm = nouveau_drm(dev);
123 struct nouveau_timer *ptimer = nouveau_timer(drm->device);
124 struct nouveau_pm *pm = nouveau_pm(dev);
125 struct nouveau_pm_profile *profile = NULL;
126 struct nouveau_pm_level *perflvl = NULL;
127 int ret;
128
129 /* select power profile based on current power source */
130 if (power_supply_is_system_supplied())
131 profile = pm->profile_ac;
132 else
133 profile = pm->profile_dc;
134
135 if (profile != pm->profile) {
136 pm->profile->func->fini(pm->profile);
137 pm->profile = profile;
138 pm->profile->func->init(pm->profile);
139 }
140
141 /* select performance level based on profile */
142 perflvl = profile->func->select(profile);
143
144 /* change perflvl, if necessary */
145 if (perflvl != pm->cur) {
146 u64 time0 = ptimer->read(ptimer);
147
148 NV_INFO(drm, "setting performance level: %d", perflvl->id);
149 ret = nouveau_pm_perflvl_set(dev, perflvl);
150 if (ret)
151 NV_INFO(drm, "> reclocking failed: %d\n\n", ret);
152
153 NV_INFO(drm, "> reclocking took %lluns\n\n",
154 ptimer->read(ptimer) - time0);
155 }
156}
157
158static struct nouveau_pm_profile *
159profile_find(struct drm_device *dev, const char *string)
160{
161 struct nouveau_pm *pm = nouveau_pm(dev);
162 struct nouveau_pm_profile *profile;
163
164 list_for_each_entry(profile, &pm->profiles, head) {
165 if (!strncmp(profile->name, string, sizeof(profile->name)))
166 return profile;
167 }
168
169 return NULL;
170}
171
172static int
173nouveau_pm_profile_set(struct drm_device *dev, const char *profile)
174{
175 struct nouveau_pm *pm = nouveau_pm(dev);
176 struct nouveau_pm_profile *ac = NULL, *dc = NULL;
177 char string[16], *cur = string, *ptr;
178
179 /* safety precaution, for now */
180 if (nouveau_perflvl_wr != 7777)
181 return -EPERM;
182
183 strncpy(string, profile, sizeof(string));
184 string[sizeof(string) - 1] = 0;
185 if ((ptr = strchr(string, '\n')))
186 *ptr = '\0';
187
188 ptr = strsep(&cur, ",");
189 if (ptr)
190 ac = profile_find(dev, ptr);
191
192 ptr = strsep(&cur, ",");
193 if (ptr)
194 dc = profile_find(dev, ptr);
195 else
196 dc = ac;
197
198 if (ac == NULL || dc == NULL)
199 return -EINVAL;
200
201 pm->profile_ac = ac;
202 pm->profile_dc = dc;
203 nouveau_pm_trigger(dev);
204 return 0;
205}
206
207static void
208nouveau_pm_static_dummy(struct nouveau_pm_profile *profile)
209{
210}
211
212static struct nouveau_pm_level *
213nouveau_pm_static_select(struct nouveau_pm_profile *profile)
214{
215 return container_of(profile, struct nouveau_pm_level, profile);
216}
217
218const struct nouveau_pm_profile_func nouveau_pm_static_profile_func = {
219 .destroy = nouveau_pm_static_dummy,
220 .init = nouveau_pm_static_dummy,
221 .fini = nouveau_pm_static_dummy,
222 .select = nouveau_pm_static_select,
223};
224
225static int
226nouveau_pm_perflvl_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
227{
228 struct nouveau_drm *drm = nouveau_drm(dev);
229 struct nouveau_pm *pm = nouveau_pm(dev);
230 struct nouveau_therm *therm = nouveau_therm(drm->device);
231 int ret;
232
233 memset(perflvl, 0, sizeof(*perflvl));
234
235 if (pm->clocks_get) {
236 ret = pm->clocks_get(dev, perflvl);
237 if (ret)
238 return ret;
239 }
240
241 if (pm->voltage.supported && pm->voltage_get) {
242 ret = pm->voltage_get(dev);
243 if (ret > 0) {
244 perflvl->volt_min = ret;
245 perflvl->volt_max = ret;
246 }
247 }
248
249 if (therm && therm->fan_get) {
250 ret = therm->fan_get(therm);
251 if (ret >= 0)
252 perflvl->fanspeed = ret;
253 }
254
255 nouveau_mem_timing_read(dev, &perflvl->timing);
256 return 0;
257}
258
259static void
260nouveau_pm_perflvl_info(struct nouveau_pm_level *perflvl, char *ptr, int len)
261{
262 char c[16], s[16], v[32], f[16], m[16];
263
264 c[0] = '\0';
265 if (perflvl->core)
266 snprintf(c, sizeof(c), " core %dMHz", perflvl->core / 1000);
267
268 s[0] = '\0';
269 if (perflvl->shader)
270 snprintf(s, sizeof(s), " shader %dMHz", perflvl->shader / 1000);
271
272 m[0] = '\0';
273 if (perflvl->memory)
274 snprintf(m, sizeof(m), " memory %dMHz", perflvl->memory / 1000);
275
276 v[0] = '\0';
277 if (perflvl->volt_min && perflvl->volt_min != perflvl->volt_max) {
278 snprintf(v, sizeof(v), " voltage %dmV-%dmV",
279 perflvl->volt_min / 1000, perflvl->volt_max / 1000);
280 } else
281 if (perflvl->volt_min) {
282 snprintf(v, sizeof(v), " voltage %dmV",
283 perflvl->volt_min / 1000);
284 }
285
286 f[0] = '\0';
287 if (perflvl->fanspeed)
288 snprintf(f, sizeof(f), " fanspeed %d%%", perflvl->fanspeed);
289
290 snprintf(ptr, len, "%s%s%s%s%s\n", c, s, m, v, f);
291}
292
293static ssize_t
294nouveau_pm_get_perflvl_info(struct device *d,
295 struct device_attribute *a, char *buf)
296{
297 struct nouveau_pm_level *perflvl =
298 container_of(a, struct nouveau_pm_level, dev_attr);
299 char *ptr = buf;
300 int len = PAGE_SIZE;
301
302 snprintf(ptr, len, "%d:", perflvl->id);
303 ptr += strlen(buf);
304 len -= strlen(buf);
305
306 nouveau_pm_perflvl_info(perflvl, ptr, len);
307 return strlen(buf);
308}
309
310static ssize_t
311nouveau_pm_get_perflvl(struct device *d, struct device_attribute *a, char *buf)
312{
313 struct drm_device *dev = pci_get_drvdata(to_pci_dev(d));
314 struct nouveau_pm *pm = nouveau_pm(dev);
315 struct nouveau_pm_level cur;
316 int len = PAGE_SIZE, ret;
317 char *ptr = buf;
318
319 snprintf(ptr, len, "profile: %s, %s\nc:",
320 pm->profile_ac->name, pm->profile_dc->name);
321 ptr += strlen(buf);
322 len -= strlen(buf);
323
324 ret = nouveau_pm_perflvl_get(dev, &cur);
325 if (ret == 0)
326 nouveau_pm_perflvl_info(&cur, ptr, len);
327 return strlen(buf);
328}
329
330static ssize_t
331nouveau_pm_set_perflvl(struct device *d, struct device_attribute *a,
332 const char *buf, size_t count)
333{
334 struct drm_device *dev = pci_get_drvdata(to_pci_dev(d));
335 int ret;
336
337 ret = nouveau_pm_profile_set(dev, buf);
338 if (ret)
339 return ret;
340 return strlen(buf);
341}
342
343static DEVICE_ATTR(performance_level, S_IRUGO | S_IWUSR,
344 nouveau_pm_get_perflvl, nouveau_pm_set_perflvl);
345
346static int
347nouveau_sysfs_init(struct drm_device *dev)
348{
349 struct nouveau_drm *drm = nouveau_drm(dev);
350 struct nouveau_pm *pm = nouveau_pm(dev);
351 struct device *d = &dev->pdev->dev;
352 int ret, i;
353
354 ret = device_create_file(d, &dev_attr_performance_level);
355 if (ret)
356 return ret;
357
358 for (i = 0; i < pm->nr_perflvl; i++) {
359 struct nouveau_pm_level *perflvl = &pm->perflvl[i];
360
361 perflvl->dev_attr.attr.name = perflvl->name;
362 perflvl->dev_attr.attr.mode = S_IRUGO;
363 perflvl->dev_attr.show = nouveau_pm_get_perflvl_info;
364 perflvl->dev_attr.store = NULL;
365 sysfs_attr_init(&perflvl->dev_attr.attr);
366
367 ret = device_create_file(d, &perflvl->dev_attr);
368 if (ret) {
369 NV_ERROR(drm, "failed pervlvl %d sysfs: %d\n",
370 perflvl->id, i);
371 perflvl->dev_attr.attr.name = NULL;
372 nouveau_pm_fini(dev);
373 return ret;
374 }
375 }
376
377 return 0;
378}
379
380static void
381nouveau_sysfs_fini(struct drm_device *dev)
382{
383 struct nouveau_pm *pm = nouveau_pm(dev);
384 struct device *d = &dev->pdev->dev;
385 int i;
386
387 device_remove_file(d, &dev_attr_performance_level);
388 for (i = 0; i < pm->nr_perflvl; i++) {
389 struct nouveau_pm_level *pl = &pm->perflvl[i];
390
391 if (!pl->dev_attr.attr.name)
392 break;
393
394 device_remove_file(d, &pl->dev_attr);
395 }
396}
397
398#if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE)) 41#if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE))
399static ssize_t 42static ssize_t
400nouveau_hwmon_show_temp(struct device *d, struct device_attribute *a, char *buf) 43nouveau_hwmon_show_temp(struct device *d, struct device_attribute *a, char *buf)
@@ -778,9 +421,6 @@ nouveau_hwmon_set_pwm1(struct device *d, struct device_attribute *a,
778 int ret = -ENODEV; 421 int ret = -ENODEV;
779 long value; 422 long value;
780 423
781 if (nouveau_perflvl_wr != 7777)
782 return -EPERM;
783
784 if (kstrtol(buf, 10, &value) == -EINVAL) 424 if (kstrtol(buf, 10, &value) == -EINVAL)
785 return -EINVAL; 425 return -EINVAL;
786 426
@@ -919,17 +559,21 @@ static const struct attribute_group hwmon_pwm_fan_attrgroup = {
919}; 559};
920#endif 560#endif
921 561
922static int 562int
923nouveau_hwmon_init(struct drm_device *dev) 563nouveau_hwmon_init(struct drm_device *dev)
924{ 564{
925 struct nouveau_pm *pm = nouveau_pm(dev);
926
927#if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE)) 565#if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE))
928 struct nouveau_drm *drm = nouveau_drm(dev); 566 struct nouveau_drm *drm = nouveau_drm(dev);
929 struct nouveau_therm *therm = nouveau_therm(drm->device); 567 struct nouveau_therm *therm = nouveau_therm(drm->device);
568 struct nouveau_hwmon *hwmon;
930 struct device *hwmon_dev; 569 struct device *hwmon_dev;
931 int ret = 0; 570 int ret = 0;
932 571
572 hwmon = drm->hwmon = kzalloc(sizeof(*hwmon), GFP_KERNEL);
573 if (!hwmon)
574 return -ENOMEM;
575 hwmon->dev = dev;
576
933 if (!therm || !therm->temp_get || !therm->attr_get || !therm->attr_set) 577 if (!therm || !therm->temp_get || !therm->attr_get || !therm->attr_set)
934 return -ENODEV; 578 return -ENODEV;
935 579
@@ -976,199 +620,37 @@ nouveau_hwmon_init(struct drm_device *dev)
976 goto error; 620 goto error;
977 } 621 }
978 622
979 pm->hwmon = hwmon_dev; 623 hwmon->hwmon = hwmon_dev;
980 624
981 return 0; 625 return 0;
982 626
983error: 627error:
984 NV_ERROR(drm, "Unable to create some hwmon sysfs files: %d\n", ret); 628 NV_ERROR(drm, "Unable to create some hwmon sysfs files: %d\n", ret);
985 hwmon_device_unregister(hwmon_dev); 629 hwmon_device_unregister(hwmon_dev);
986 pm->hwmon = NULL; 630 hwmon->hwmon = NULL;
987 return ret; 631 return ret;
988#else 632#else
989 pm->hwmon = NULL; 633 hwmon->hwmon = NULL;
990 return 0; 634 return 0;
991#endif 635#endif
992} 636}
993 637
994static void 638void
995nouveau_hwmon_fini(struct drm_device *dev) 639nouveau_hwmon_fini(struct drm_device *dev)
996{ 640{
997#if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE)) 641#if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE))
998 struct nouveau_pm *pm = nouveau_pm(dev); 642 struct nouveau_hwmon *hwmon = nouveau_hwmon(dev);
999 643
1000 if (pm->hwmon) { 644 if (hwmon->hwmon) {
1001 sysfs_remove_group(&pm->hwmon->kobj, &hwmon_default_attrgroup); 645 sysfs_remove_group(&hwmon->hwmon->kobj, &hwmon_default_attrgroup);
1002 sysfs_remove_group(&pm->hwmon->kobj, &hwmon_temp_attrgroup); 646 sysfs_remove_group(&hwmon->hwmon->kobj, &hwmon_temp_attrgroup);
1003 sysfs_remove_group(&pm->hwmon->kobj, &hwmon_pwm_fan_attrgroup); 647 sysfs_remove_group(&hwmon->hwmon->kobj, &hwmon_pwm_fan_attrgroup);
1004 sysfs_remove_group(&pm->hwmon->kobj, &hwmon_fan_rpm_attrgroup); 648 sysfs_remove_group(&hwmon->hwmon->kobj, &hwmon_fan_rpm_attrgroup);
1005 649
1006 hwmon_device_unregister(pm->hwmon); 650 hwmon_device_unregister(hwmon->hwmon);
1007 } 651 }
1008#endif
1009}
1010
1011#if defined(CONFIG_ACPI) && defined(CONFIG_POWER_SUPPLY)
1012static int
1013nouveau_pm_acpi_event(struct notifier_block *nb, unsigned long val, void *data)
1014{
1015 struct nouveau_pm *pm = container_of(nb, struct nouveau_pm, acpi_nb);
1016 struct nouveau_drm *drm = nouveau_drm(pm->dev);
1017 struct acpi_bus_event *entry = (struct acpi_bus_event *)data;
1018
1019 if (strcmp(entry->device_class, "ac_adapter") == 0) {
1020 bool ac = power_supply_is_system_supplied();
1021 652
1022 NV_DEBUG(drm, "power supply changed: %s\n", ac ? "AC" : "DC"); 653 nouveau_drm(dev)->hwmon = NULL;
1023 nouveau_pm_trigger(pm->dev); 654 kfree(hwmon);
1024 }
1025
1026 return NOTIFY_OK;
1027}
1028#endif 655#endif
1029
1030int
1031nouveau_pm_init(struct drm_device *dev)
1032{
1033 struct nouveau_device *device = nouveau_dev(dev);
1034 struct nouveau_drm *drm = nouveau_drm(dev);
1035 struct nouveau_pm *pm;
1036 char info[256];
1037 int ret, i;
1038
1039 pm = drm->pm = kzalloc(sizeof(*pm), GFP_KERNEL);
1040 if (!pm)
1041 return -ENOMEM;
1042
1043 pm->dev = dev;
1044
1045 if (device->card_type < NV_40) {
1046 pm->clocks_get = nv04_pm_clocks_get;
1047 pm->clocks_pre = nv04_pm_clocks_pre;
1048 pm->clocks_set = nv04_pm_clocks_set;
1049 if (nouveau_gpio(drm->device)) {
1050 pm->voltage_get = nouveau_voltage_gpio_get;
1051 pm->voltage_set = nouveau_voltage_gpio_set;
1052 }
1053 } else
1054 if (device->card_type < NV_50) {
1055 pm->clocks_get = nv40_pm_clocks_get;
1056 pm->clocks_pre = nv40_pm_clocks_pre;
1057 pm->clocks_set = nv40_pm_clocks_set;
1058 pm->voltage_get = nouveau_voltage_gpio_get;
1059 pm->voltage_set = nouveau_voltage_gpio_set;
1060 } else
1061 if (device->card_type < NV_C0) {
1062 if (device->chipset < 0xa3 ||
1063 device->chipset == 0xaa ||
1064 device->chipset == 0xac) {
1065 pm->clocks_get = nv50_pm_clocks_get;
1066 pm->clocks_pre = nv50_pm_clocks_pre;
1067 pm->clocks_set = nv50_pm_clocks_set;
1068 } else {
1069 pm->clocks_get = nva3_pm_clocks_get;
1070 pm->clocks_pre = nva3_pm_clocks_pre;
1071 pm->clocks_set = nva3_pm_clocks_set;
1072 }
1073 pm->voltage_get = nouveau_voltage_gpio_get;
1074 pm->voltage_set = nouveau_voltage_gpio_set;
1075 } else
1076 if (device->card_type < NV_E0) {
1077 pm->clocks_get = nvc0_pm_clocks_get;
1078 pm->clocks_pre = nvc0_pm_clocks_pre;
1079 pm->clocks_set = nvc0_pm_clocks_set;
1080 pm->voltage_get = nouveau_voltage_gpio_get;
1081 pm->voltage_set = nouveau_voltage_gpio_set;
1082 }
1083
1084
1085 /* parse aux tables from vbios */
1086 nouveau_volt_init(dev);
1087
1088 INIT_LIST_HEAD(&pm->profiles);
1089
1090 /* determine current ("boot") performance level */
1091 ret = nouveau_pm_perflvl_get(dev, &pm->boot);
1092 if (ret) {
1093 NV_ERROR(drm, "failed to determine boot perflvl\n");
1094 return ret;
1095 }
1096
1097 strncpy(pm->boot.name, "boot", 4);
1098 strncpy(pm->boot.profile.name, "boot", 4);
1099 pm->boot.profile.func = &nouveau_pm_static_profile_func;
1100
1101 list_add(&pm->boot.profile.head, &pm->profiles);
1102
1103 pm->profile_ac = &pm->boot.profile;
1104 pm->profile_dc = &pm->boot.profile;
1105 pm->profile = &pm->boot.profile;
1106 pm->cur = &pm->boot;
1107
1108 /* add performance levels from vbios */
1109 nouveau_perf_init(dev);
1110
1111 /* display available performance levels */
1112 NV_INFO(drm, "%d available performance level(s)\n", pm->nr_perflvl);
1113 for (i = 0; i < pm->nr_perflvl; i++) {
1114 nouveau_pm_perflvl_info(&pm->perflvl[i], info, sizeof(info));
1115 NV_INFO(drm, "%d:%s", pm->perflvl[i].id, info);
1116 }
1117
1118 nouveau_pm_perflvl_info(&pm->boot, info, sizeof(info));
1119 NV_INFO(drm, "c:%s", info);
1120
1121 /* switch performance levels now if requested */
1122 if (nouveau_perflvl != NULL)
1123 nouveau_pm_profile_set(dev, nouveau_perflvl);
1124
1125 nouveau_sysfs_init(dev);
1126 nouveau_hwmon_init(dev);
1127#if defined(CONFIG_ACPI) && defined(CONFIG_POWER_SUPPLY)
1128 pm->acpi_nb.notifier_call = nouveau_pm_acpi_event;
1129 register_acpi_notifier(&pm->acpi_nb);
1130#endif
1131
1132 return 0;
1133}
1134
1135void
1136nouveau_pm_fini(struct drm_device *dev)
1137{
1138 struct nouveau_pm *pm = nouveau_pm(dev);
1139 struct nouveau_pm_profile *profile, *tmp;
1140
1141 list_for_each_entry_safe(profile, tmp, &pm->profiles, head) {
1142 list_del(&profile->head);
1143 profile->func->destroy(profile);
1144 }
1145
1146 if (pm->cur != &pm->boot)
1147 nouveau_pm_perflvl_set(dev, &pm->boot);
1148
1149 nouveau_perf_fini(dev);
1150 nouveau_volt_fini(dev);
1151
1152#if defined(CONFIG_ACPI) && defined(CONFIG_POWER_SUPPLY)
1153 unregister_acpi_notifier(&pm->acpi_nb);
1154#endif
1155 nouveau_hwmon_fini(dev);
1156 nouveau_sysfs_fini(dev);
1157
1158 nouveau_drm(dev)->pm = NULL;
1159 kfree(pm);
1160}
1161
1162void
1163nouveau_pm_resume(struct drm_device *dev)
1164{
1165 struct nouveau_pm *pm = nouveau_pm(dev);
1166 struct nouveau_pm_level *perflvl;
1167
1168 if (!pm->cur || pm->cur == &pm->boot)
1169 return;
1170
1171 perflvl = pm->cur;
1172 pm->cur = &pm->boot;
1173 nouveau_pm_perflvl_set(dev, perflvl);
1174} 656}
diff --git a/drivers/gpu/drm/nouveau/nouveau_hwmon.h b/drivers/gpu/drm/nouveau/nouveau_hwmon.h
new file mode 100644
index 000000000000..62ccbb39863c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_hwmon.h
@@ -0,0 +1,43 @@
1/*
2 * Copyright 2010 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#ifndef __NOUVEAU_PM_H__
26#define __NOUVEAU_PM_H__
27
28struct nouveau_hwmon {
29 struct drm_device *dev;
30 struct device *hwmon;
31};
32
33static inline struct nouveau_hwmon *
34nouveau_hwmon(struct drm_device *dev)
35{
36 return nouveau_drm(dev)->hwmon;
37}
38
39/* nouveau_hwmon.c */
40int nouveau_hwmon_init(struct drm_device *dev);
41void nouveau_hwmon_fini(struct drm_device *dev);
42
43#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_hwsq.h b/drivers/gpu/drm/nouveau/nouveau_hwsq.h
deleted file mode 100644
index 697687593a81..000000000000
--- a/drivers/gpu/drm/nouveau/nouveau_hwsq.h
+++ /dev/null
@@ -1,115 +0,0 @@
1/*
2 * Copyright 2010 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#ifndef __NOUVEAU_HWSQ_H__
26#define __NOUVEAU_HWSQ_H__
27
28struct hwsq_ucode {
29 u8 data[0x200];
30 union {
31 u8 *u08;
32 u16 *u16;
33 u32 *u32;
34 } ptr;
35 u16 len;
36
37 u32 reg;
38 u32 val;
39};
40
41static inline void
42hwsq_init(struct hwsq_ucode *hwsq)
43{
44 hwsq->ptr.u08 = hwsq->data;
45 hwsq->reg = 0xffffffff;
46 hwsq->val = 0xffffffff;
47}
48
49static inline void
50hwsq_fini(struct hwsq_ucode *hwsq)
51{
52 do {
53 *hwsq->ptr.u08++ = 0x7f;
54 hwsq->len = hwsq->ptr.u08 - hwsq->data;
55 } while (hwsq->len & 3);
56 hwsq->ptr.u08 = hwsq->data;
57}
58
59static inline void
60hwsq_usec(struct hwsq_ucode *hwsq, u8 usec)
61{
62 u32 shift = 0;
63 while (usec & ~3) {
64 usec >>= 2;
65 shift++;
66 }
67
68 *hwsq->ptr.u08++ = (shift << 2) | usec;
69}
70
71static inline void
72hwsq_setf(struct hwsq_ucode *hwsq, u8 flag, int val)
73{
74 flag += 0x80;
75 if (val >= 0)
76 flag += 0x20;
77 if (val >= 1)
78 flag += 0x20;
79 *hwsq->ptr.u08++ = flag;
80}
81
82static inline void
83hwsq_op5f(struct hwsq_ucode *hwsq, u8 v0, u8 v1)
84{
85 *hwsq->ptr.u08++ = 0x5f;
86 *hwsq->ptr.u08++ = v0;
87 *hwsq->ptr.u08++ = v1;
88}
89
90static inline void
91hwsq_wr32(struct hwsq_ucode *hwsq, u32 reg, u32 val)
92{
93 if (val != hwsq->val) {
94 if ((val & 0xffff0000) == (hwsq->val & 0xffff0000)) {
95 *hwsq->ptr.u08++ = 0x42;
96 *hwsq->ptr.u16++ = (val & 0x0000ffff);
97 } else {
98 *hwsq->ptr.u08++ = 0xe2;
99 *hwsq->ptr.u32++ = val;
100 }
101
102 hwsq->val = val;
103 }
104
105 if ((reg & 0xffff0000) == (hwsq->reg & 0xffff0000)) {
106 *hwsq->ptr.u08++ = 0x40;
107 *hwsq->ptr.u16++ = (reg & 0x0000ffff);
108 } else {
109 *hwsq->ptr.u08++ = 0xe0;
110 *hwsq->ptr.u32++ = reg;
111 }
112 hwsq->reg = reg;
113}
114
115#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
deleted file mode 100644
index 4f6a572f2258..000000000000
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ /dev/null
@@ -1,647 +0,0 @@
1/*
2 * Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
3 * Copyright 2005 Stephane Marchesin
4 *
5 * The Weather Channel (TM) funded Tungsten Graphics to develop the
6 * initial release of the Radeon 8500 driver under the XFree86 license.
7 * This notice must be preserved.
8 *
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the next
17 * paragraph) shall be included in all copies or substantial portions of the
18 * Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * THE AUTHORS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
26 * DEALINGS IN THE SOFTWARE.
27 *
28 * Authors:
29 * Ben Skeggs <bskeggs@redhat.com>
30 * Roy Spliet <r.spliet@student.tudelft.nl>
31 */
32
33#include "nouveau_drm.h"
34#include "nouveau_pm.h"
35
36#include <subdev/fb.h>
37
38static int
39nv40_mem_timing_calc(struct drm_device *dev, u32 freq,
40 struct nouveau_pm_tbl_entry *e, u8 len,
41 struct nouveau_pm_memtiming *boot,
42 struct nouveau_pm_memtiming *t)
43{
44 struct nouveau_drm *drm = nouveau_drm(dev);
45
46 t->reg[0] = (e->tRP << 24 | e->tRAS << 16 | e->tRFC << 8 | e->tRC);
47
48 /* XXX: I don't trust the -1's and +1's... they must come
49 * from somewhere! */
50 t->reg[1] = (e->tWR + 2 + (t->tCWL - 1)) << 24 |
51 1 << 16 |
52 (e->tWTR + 2 + (t->tCWL - 1)) << 8 |
53 (e->tCL + 2 - (t->tCWL - 1));
54
55 t->reg[2] = 0x20200000 |
56 ((t->tCWL - 1) << 24 |
57 e->tRRD << 16 |
58 e->tRCDWR << 8 |
59 e->tRCDRD);
60
61 NV_DEBUG(drm, "Entry %d: 220: %08x %08x %08x\n", t->id,
62 t->reg[0], t->reg[1], t->reg[2]);
63 return 0;
64}
65
66static int
67nv50_mem_timing_calc(struct drm_device *dev, u32 freq,
68 struct nouveau_pm_tbl_entry *e, u8 len,
69 struct nouveau_pm_memtiming *boot,
70 struct nouveau_pm_memtiming *t)
71{
72 struct nouveau_device *device = nouveau_dev(dev);
73 struct nouveau_fb *pfb = nouveau_fb(device);
74 struct nouveau_drm *drm = nouveau_drm(dev);
75 struct bit_entry P;
76 uint8_t unk18 = 1, unk20 = 0, unk21 = 0, tmp7_3;
77
78 if (bit_table(dev, 'P', &P))
79 return -EINVAL;
80
81 switch (min(len, (u8) 22)) {
82 case 22:
83 unk21 = e->tUNK_21;
84 case 21:
85 unk20 = e->tUNK_20;
86 case 20:
87 if (e->tCWL > 0)
88 t->tCWL = e->tCWL;
89 case 19:
90 unk18 = e->tUNK_18;
91 break;
92 }
93
94 t->reg[0] = (e->tRP << 24 | e->tRAS << 16 | e->tRFC << 8 | e->tRC);
95
96 t->reg[1] = (e->tWR + 2 + (t->tCWL - 1)) << 24 |
97 max(unk18, (u8) 1) << 16 |
98 (e->tWTR + 2 + (t->tCWL - 1)) << 8;
99
100 t->reg[2] = ((t->tCWL - 1) << 24 |
101 e->tRRD << 16 |
102 e->tRCDWR << 8 |
103 e->tRCDRD);
104
105 t->reg[4] = e->tUNK_13 << 8 | e->tUNK_13;
106
107 t->reg[5] = (e->tRFC << 24 | max(e->tRCDRD, e->tRCDWR) << 16 | e->tRP);
108
109 t->reg[8] = boot->reg[8] & 0xffffff00;
110
111 if (P.version == 1) {
112 t->reg[1] |= (e->tCL + 2 - (t->tCWL - 1));
113
114 t->reg[3] = (0x14 + e->tCL) << 24 |
115 0x16 << 16 |
116 (e->tCL - 1) << 8 |
117 (e->tCL - 1);
118
119 t->reg[4] |= boot->reg[4] & 0xffff0000;
120
121 t->reg[6] = (0x33 - t->tCWL) << 16 |
122 t->tCWL << 8 |
123 (0x2e + e->tCL - t->tCWL);
124
125 t->reg[7] = 0x4000202 | (e->tCL - 1) << 16;
126
127 /* XXX: P.version == 1 only has DDR2 and GDDR3? */
128 if (pfb->ram->type == NV_MEM_TYPE_DDR2) {
129 t->reg[5] |= (e->tCL + 3) << 8;
130 t->reg[6] |= (t->tCWL - 2) << 8;
131 t->reg[8] |= (e->tCL - 4);
132 } else {
133 t->reg[5] |= (e->tCL + 2) << 8;
134 t->reg[6] |= t->tCWL << 8;
135 t->reg[8] |= (e->tCL - 2);
136 }
137 } else {
138 t->reg[1] |= (5 + e->tCL - (t->tCWL));
139
140 /* XXX: 0xb? 0x30? */
141 t->reg[3] = (0x30 + e->tCL) << 24 |
142 (boot->reg[3] & 0x00ff0000)|
143 (0xb + e->tCL) << 8 |
144 (e->tCL - 1);
145
146 t->reg[4] |= (unk20 << 24 | unk21 << 16);
147
148 /* XXX: +6? */
149 t->reg[5] |= (t->tCWL + 6) << 8;
150
151 t->reg[6] = (0x5a + e->tCL) << 16 |
152 (6 - e->tCL + t->tCWL) << 8 |
153 (0x50 + e->tCL - t->tCWL);
154
155 tmp7_3 = (boot->reg[7] & 0xff000000) >> 24;
156 t->reg[7] = (tmp7_3 << 24) |
157 ((tmp7_3 - 6 + e->tCL) << 16) |
158 0x202;
159 }
160
161 NV_DEBUG(drm, "Entry %d: 220: %08x %08x %08x %08x\n", t->id,
162 t->reg[0], t->reg[1], t->reg[2], t->reg[3]);
163 NV_DEBUG(drm, " 230: %08x %08x %08x %08x\n",
164 t->reg[4], t->reg[5], t->reg[6], t->reg[7]);
165 NV_DEBUG(drm, " 240: %08x\n", t->reg[8]);
166 return 0;
167}
168
169static int
170nvc0_mem_timing_calc(struct drm_device *dev, u32 freq,
171 struct nouveau_pm_tbl_entry *e, u8 len,
172 struct nouveau_pm_memtiming *boot,
173 struct nouveau_pm_memtiming *t)
174{
175 struct nouveau_drm *drm = nouveau_drm(dev);
176
177 if (e->tCWL > 0)
178 t->tCWL = e->tCWL;
179
180 t->reg[0] = (e->tRP << 24 | (e->tRAS & 0x7f) << 17 |
181 e->tRFC << 8 | e->tRC);
182
183 t->reg[1] = (boot->reg[1] & 0xff000000) |
184 (e->tRCDWR & 0x0f) << 20 |
185 (e->tRCDRD & 0x0f) << 14 |
186 (t->tCWL << 7) |
187 (e->tCL & 0x0f);
188
189 t->reg[2] = (boot->reg[2] & 0xff0000ff) |
190 e->tWR << 16 | e->tWTR << 8;
191
192 t->reg[3] = (e->tUNK_20 & 0x1f) << 9 |
193 (e->tUNK_21 & 0xf) << 5 |
194 (e->tUNK_13 & 0x1f);
195
196 t->reg[4] = (boot->reg[4] & 0xfff00fff) |
197 (e->tRRD&0x1f) << 15;
198
199 NV_DEBUG(drm, "Entry %d: 290: %08x %08x %08x %08x\n", t->id,
200 t->reg[0], t->reg[1], t->reg[2], t->reg[3]);
201 NV_DEBUG(drm, " 2a0: %08x\n", t->reg[4]);
202 return 0;
203}
204
205/**
206 * MR generation methods
207 */
208
209static int
210nouveau_mem_ddr2_mr(struct drm_device *dev, u32 freq,
211 struct nouveau_pm_tbl_entry *e, u8 len,
212 struct nouveau_pm_memtiming *boot,
213 struct nouveau_pm_memtiming *t)
214{
215 struct nouveau_drm *drm = nouveau_drm(dev);
216
217 t->drive_strength = 0;
218 if (len < 15) {
219 t->odt = boot->odt;
220 } else {
221 t->odt = e->RAM_FT1 & 0x07;
222 }
223
224 if (e->tCL >= NV_MEM_CL_DDR2_MAX) {
225 NV_WARN(drm, "(%u) Invalid tCL: %u", t->id, e->tCL);
226 return -ERANGE;
227 }
228
229 if (e->tWR >= NV_MEM_WR_DDR2_MAX) {
230 NV_WARN(drm, "(%u) Invalid tWR: %u", t->id, e->tWR);
231 return -ERANGE;
232 }
233
234 if (t->odt > 3) {
235 NV_WARN(drm, "(%u) Invalid odt value, assuming disabled: %x",
236 t->id, t->odt);
237 t->odt = 0;
238 }
239
240 t->mr[0] = (boot->mr[0] & 0x100f) |
241 (e->tCL) << 4 |
242 (e->tWR - 1) << 9;
243 t->mr[1] = (boot->mr[1] & 0x101fbb) |
244 (t->odt & 0x1) << 2 |
245 (t->odt & 0x2) << 5;
246
247 NV_DEBUG(drm, "(%u) MR: %08x", t->id, t->mr[0]);
248 return 0;
249}
250
251static const uint8_t nv_mem_wr_lut_ddr3[NV_MEM_WR_DDR3_MAX] = {
252 0, 0, 0, 0, 0, 1, 2, 3, 4, 5, 5, 6, 6, 7, 7, 0, 0};
253
254static int
255nouveau_mem_ddr3_mr(struct drm_device *dev, u32 freq,
256 struct nouveau_pm_tbl_entry *e, u8 len,
257 struct nouveau_pm_memtiming *boot,
258 struct nouveau_pm_memtiming *t)
259{
260 struct nouveau_drm *drm = nouveau_drm(dev);
261 u8 cl = e->tCL - 4;
262
263 t->drive_strength = 0;
264 if (len < 15) {
265 t->odt = boot->odt;
266 } else {
267 t->odt = e->RAM_FT1 & 0x07;
268 }
269
270 if (e->tCL >= NV_MEM_CL_DDR3_MAX || e->tCL < 4) {
271 NV_WARN(drm, "(%u) Invalid tCL: %u", t->id, e->tCL);
272 return -ERANGE;
273 }
274
275 if (e->tWR >= NV_MEM_WR_DDR3_MAX || e->tWR < 4) {
276 NV_WARN(drm, "(%u) Invalid tWR: %u", t->id, e->tWR);
277 return -ERANGE;
278 }
279
280 if (e->tCWL < 5) {
281 NV_WARN(drm, "(%u) Invalid tCWL: %u", t->id, e->tCWL);
282 return -ERANGE;
283 }
284
285 t->mr[0] = (boot->mr[0] & 0x180b) |
286 /* CAS */
287 (cl & 0x7) << 4 |
288 (cl & 0x8) >> 1 |
289 (nv_mem_wr_lut_ddr3[e->tWR]) << 9;
290 t->mr[1] = (boot->mr[1] & 0x101dbb) |
291 (t->odt & 0x1) << 2 |
292 (t->odt & 0x2) << 5 |
293 (t->odt & 0x4) << 7;
294 t->mr[2] = (boot->mr[2] & 0x20ffb7) | (e->tCWL - 5) << 3;
295
296 NV_DEBUG(drm, "(%u) MR: %08x %08x", t->id, t->mr[0], t->mr[2]);
297 return 0;
298}
299
300static const uint8_t nv_mem_cl_lut_gddr3[NV_MEM_CL_GDDR3_MAX] = {
301 0, 0, 0, 0, 4, 5, 6, 7, 0, 1, 2, 3, 8, 9, 10, 11};
302static const uint8_t nv_mem_wr_lut_gddr3[NV_MEM_WR_GDDR3_MAX] = {
303 0, 0, 0, 0, 0, 2, 3, 8, 9, 10, 11, 0, 0, 1, 1, 0, 3};
304
305static int
306nouveau_mem_gddr3_mr(struct drm_device *dev, u32 freq,
307 struct nouveau_pm_tbl_entry *e, u8 len,
308 struct nouveau_pm_memtiming *boot,
309 struct nouveau_pm_memtiming *t)
310{
311 struct nouveau_drm *drm = nouveau_drm(dev);
312
313 if (len < 15) {
314 t->drive_strength = boot->drive_strength;
315 t->odt = boot->odt;
316 } else {
317 t->drive_strength = (e->RAM_FT1 & 0x30) >> 4;
318 t->odt = e->RAM_FT1 & 0x07;
319 }
320
321 if (e->tCL >= NV_MEM_CL_GDDR3_MAX) {
322 NV_WARN(drm, "(%u) Invalid tCL: %u", t->id, e->tCL);
323 return -ERANGE;
324 }
325
326 if (e->tWR >= NV_MEM_WR_GDDR3_MAX) {
327 NV_WARN(drm, "(%u) Invalid tWR: %u", t->id, e->tWR);
328 return -ERANGE;
329 }
330
331 if (t->odt > 3) {
332 NV_WARN(drm, "(%u) Invalid odt value, assuming autocal: %x",
333 t->id, t->odt);
334 t->odt = 0;
335 }
336
337 t->mr[0] = (boot->mr[0] & 0xe0b) |
338 /* CAS */
339 ((nv_mem_cl_lut_gddr3[e->tCL] & 0x7) << 4) |
340 ((nv_mem_cl_lut_gddr3[e->tCL] & 0x8) >> 2);
341 t->mr[1] = (boot->mr[1] & 0x100f40) | t->drive_strength |
342 (t->odt << 2) |
343 (nv_mem_wr_lut_gddr3[e->tWR] & 0xf) << 4;
344 t->mr[2] = boot->mr[2];
345
346 NV_DEBUG(drm, "(%u) MR: %08x %08x %08x", t->id,
347 t->mr[0], t->mr[1], t->mr[2]);
348 return 0;
349}
350
351static int
352nouveau_mem_gddr5_mr(struct drm_device *dev, u32 freq,
353 struct nouveau_pm_tbl_entry *e, u8 len,
354 struct nouveau_pm_memtiming *boot,
355 struct nouveau_pm_memtiming *t)
356{
357 struct nouveau_drm *drm = nouveau_drm(dev);
358
359 if (len < 15) {
360 t->drive_strength = boot->drive_strength;
361 t->odt = boot->odt;
362 } else {
363 t->drive_strength = (e->RAM_FT1 & 0x30) >> 4;
364 t->odt = e->RAM_FT1 & 0x03;
365 }
366
367 if (e->tCL >= NV_MEM_CL_GDDR5_MAX) {
368 NV_WARN(drm, "(%u) Invalid tCL: %u", t->id, e->tCL);
369 return -ERANGE;
370 }
371
372 if (e->tWR >= NV_MEM_WR_GDDR5_MAX) {
373 NV_WARN(drm, "(%u) Invalid tWR: %u", t->id, e->tWR);
374 return -ERANGE;
375 }
376
377 if (t->odt > 3) {
378 NV_WARN(drm, "(%u) Invalid odt value, assuming autocal: %x",
379 t->id, t->odt);
380 t->odt = 0;
381 }
382
383 t->mr[0] = (boot->mr[0] & 0x007) |
384 ((e->tCL - 5) << 3) |
385 ((e->tWR - 4) << 8);
386 t->mr[1] = (boot->mr[1] & 0x1007f0) |
387 t->drive_strength |
388 (t->odt << 2);
389
390 NV_DEBUG(drm, "(%u) MR: %08x %08x", t->id, t->mr[0], t->mr[1]);
391 return 0;
392}
393
394int
395nouveau_mem_timing_calc(struct drm_device *dev, u32 freq,
396 struct nouveau_pm_memtiming *t)
397{
398 struct nouveau_device *device = nouveau_dev(dev);
399 struct nouveau_fb *pfb = nouveau_fb(device);
400 struct nouveau_pm *pm = nouveau_pm(dev);
401 struct nouveau_pm_memtiming *boot = &pm->boot.timing;
402 struct nouveau_pm_tbl_entry *e;
403 u8 ver, len, *ptr, *ramcfg;
404 int ret;
405
406 ptr = nouveau_perf_timing(dev, freq, &ver, &len);
407 if (!ptr || ptr[0] == 0x00) {
408 *t = *boot;
409 return 0;
410 }
411 e = (struct nouveau_pm_tbl_entry *)ptr;
412
413 t->tCWL = boot->tCWL;
414
415 switch (device->card_type) {
416 case NV_40:
417 ret = nv40_mem_timing_calc(dev, freq, e, len, boot, t);
418 break;
419 case NV_50:
420 ret = nv50_mem_timing_calc(dev, freq, e, len, boot, t);
421 break;
422 case NV_C0:
423 case NV_D0:
424 ret = nvc0_mem_timing_calc(dev, freq, e, len, boot, t);
425 break;
426 default:
427 ret = -ENODEV;
428 break;
429 }
430
431 switch (pfb->ram->type * !ret) {
432 case NV_MEM_TYPE_GDDR3:
433 ret = nouveau_mem_gddr3_mr(dev, freq, e, len, boot, t);
434 break;
435 case NV_MEM_TYPE_GDDR5:
436 ret = nouveau_mem_gddr5_mr(dev, freq, e, len, boot, t);
437 break;
438 case NV_MEM_TYPE_DDR2:
439 ret = nouveau_mem_ddr2_mr(dev, freq, e, len, boot, t);
440 break;
441 case NV_MEM_TYPE_DDR3:
442 ret = nouveau_mem_ddr3_mr(dev, freq, e, len, boot, t);
443 break;
444 default:
445 ret = -EINVAL;
446 break;
447 }
448
449 ramcfg = nouveau_perf_ramcfg(dev, freq, &ver, &len);
450 if (ramcfg) {
451 int dll_off;
452
453 if (ver == 0x00)
454 dll_off = !!(ramcfg[3] & 0x04);
455 else
456 dll_off = !!(ramcfg[2] & 0x40);
457
458 switch (pfb->ram->type) {
459 case NV_MEM_TYPE_GDDR3:
460 t->mr[1] &= ~0x00000040;
461 t->mr[1] |= 0x00000040 * dll_off;
462 break;
463 default:
464 t->mr[1] &= ~0x00000001;
465 t->mr[1] |= 0x00000001 * dll_off;
466 break;
467 }
468 }
469
470 return ret;
471}
472
473void
474nouveau_mem_timing_read(struct drm_device *dev, struct nouveau_pm_memtiming *t)
475{
476 struct nouveau_device *device = nouveau_dev(dev);
477 struct nouveau_fb *pfb = nouveau_fb(device);
478 u32 timing_base, timing_regs, mr_base;
479 int i;
480
481 if (device->card_type >= 0xC0) {
482 timing_base = 0x10f290;
483 mr_base = 0x10f300;
484 } else {
485 timing_base = 0x100220;
486 mr_base = 0x1002c0;
487 }
488
489 t->id = -1;
490
491 switch (device->card_type) {
492 case NV_50:
493 timing_regs = 9;
494 break;
495 case NV_C0:
496 case NV_D0:
497 timing_regs = 5;
498 break;
499 case NV_30:
500 case NV_40:
501 timing_regs = 3;
502 break;
503 default:
504 timing_regs = 0;
505 return;
506 }
507 for(i = 0; i < timing_regs; i++)
508 t->reg[i] = nv_rd32(device, timing_base + (0x04 * i));
509
510 t->tCWL = 0;
511 if (device->card_type < NV_C0) {
512 t->tCWL = ((nv_rd32(device, 0x100228) & 0x0f000000) >> 24) + 1;
513 } else if (device->card_type <= NV_D0) {
514 t->tCWL = ((nv_rd32(device, 0x10f294) & 0x00000f80) >> 7);
515 }
516
517 t->mr[0] = nv_rd32(device, mr_base);
518 t->mr[1] = nv_rd32(device, mr_base + 0x04);
519 t->mr[2] = nv_rd32(device, mr_base + 0x20);
520 t->mr[3] = nv_rd32(device, mr_base + 0x24);
521
522 t->odt = 0;
523 t->drive_strength = 0;
524
525 switch (pfb->ram->type) {
526 case NV_MEM_TYPE_DDR3:
527 t->odt |= (t->mr[1] & 0x200) >> 7;
528 case NV_MEM_TYPE_DDR2:
529 t->odt |= (t->mr[1] & 0x04) >> 2 |
530 (t->mr[1] & 0x40) >> 5;
531 break;
532 case NV_MEM_TYPE_GDDR3:
533 case NV_MEM_TYPE_GDDR5:
534 t->drive_strength = t->mr[1] & 0x03;
535 t->odt = (t->mr[1] & 0x0c) >> 2;
536 break;
537 default:
538 break;
539 }
540}
541
542int
543nouveau_mem_exec(struct nouveau_mem_exec_func *exec,
544 struct nouveau_pm_level *perflvl)
545{
546 struct nouveau_drm *drm = nouveau_drm(exec->dev);
547 struct nouveau_device *device = nouveau_dev(exec->dev);
548 struct nouveau_fb *pfb = nouveau_fb(device);
549 struct nouveau_pm_memtiming *info = &perflvl->timing;
550 u32 tMRD = 1000, tCKSRE = 0, tCKSRX = 0, tXS = 0, tDLLK = 0;
551 u32 mr[3] = { info->mr[0], info->mr[1], info->mr[2] };
552 u32 mr1_dlloff;
553
554 switch (pfb->ram->type) {
555 case NV_MEM_TYPE_DDR2:
556 tDLLK = 2000;
557 mr1_dlloff = 0x00000001;
558 break;
559 case NV_MEM_TYPE_DDR3:
560 tDLLK = 12000;
561 tCKSRE = 2000;
562 tXS = 1000;
563 mr1_dlloff = 0x00000001;
564 break;
565 case NV_MEM_TYPE_GDDR3:
566 tDLLK = 40000;
567 mr1_dlloff = 0x00000040;
568 break;
569 default:
570 NV_ERROR(drm, "cannot reclock unsupported memtype\n");
571 return -ENODEV;
572 }
573
574 /* fetch current MRs */
575 switch (pfb->ram->type) {
576 case NV_MEM_TYPE_GDDR3:
577 case NV_MEM_TYPE_DDR3:
578 mr[2] = exec->mrg(exec, 2);
579 default:
580 mr[1] = exec->mrg(exec, 1);
581 mr[0] = exec->mrg(exec, 0);
582 break;
583 }
584
585 /* DLL 'on' -> DLL 'off' mode, disable before entering self-refresh */
586 if (!(mr[1] & mr1_dlloff) && (info->mr[1] & mr1_dlloff)) {
587 exec->precharge(exec);
588 exec->mrs (exec, 1, mr[1] | mr1_dlloff);
589 exec->wait(exec, tMRD);
590 }
591
592 /* enter self-refresh mode */
593 exec->precharge(exec);
594 exec->refresh(exec);
595 exec->refresh(exec);
596 exec->refresh_auto(exec, false);
597 exec->refresh_self(exec, true);
598 exec->wait(exec, tCKSRE);
599
600 /* modify input clock frequency */
601 exec->clock_set(exec);
602
603 /* exit self-refresh mode */
604 exec->wait(exec, tCKSRX);
605 exec->precharge(exec);
606 exec->refresh_self(exec, false);
607 exec->refresh_auto(exec, true);
608 exec->wait(exec, tXS);
609 exec->wait(exec, tXS);
610
611 /* update MRs */
612 if (mr[2] != info->mr[2]) {
613 exec->mrs (exec, 2, info->mr[2]);
614 exec->wait(exec, tMRD);
615 }
616
617 if (mr[1] != info->mr[1]) {
618 /* need to keep DLL off until later, at least on GDDR3 */
619 exec->mrs (exec, 1, info->mr[1] | (mr[1] & mr1_dlloff));
620 exec->wait(exec, tMRD);
621 }
622
623 if (mr[0] != info->mr[0]) {
624 exec->mrs (exec, 0, info->mr[0]);
625 exec->wait(exec, tMRD);
626 }
627
628 /* update PFB timing registers */
629 exec->timing_set(exec);
630
631 /* DLL (enable + ) reset */
632 if (!(info->mr[1] & mr1_dlloff)) {
633 if (mr[1] & mr1_dlloff) {
634 exec->mrs (exec, 1, info->mr[1]);
635 exec->wait(exec, tMRD);
636 }
637 exec->mrs (exec, 0, info->mr[0] | 0x00000100);
638 exec->wait(exec, tMRD);
639 exec->mrs (exec, 0, info->mr[0] | 0x00000000);
640 exec->wait(exec, tMRD);
641 exec->wait(exec, tDLLK);
642 if (pfb->ram->type == NV_MEM_TYPE_GDDR3)
643 exec->precharge(exec);
644 }
645
646 return 0;
647}
diff --git a/drivers/gpu/drm/nouveau/nouveau_perf.c b/drivers/gpu/drm/nouveau/nouveau_perf.c
deleted file mode 100644
index 4fe883c54918..000000000000
--- a/drivers/gpu/drm/nouveau/nouveau_perf.c
+++ /dev/null
@@ -1,416 +0,0 @@
1/*
2 * Copyright 2010 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <drm/drmP.h>
26
27#include "nouveau_drm.h"
28#include "nouveau_reg.h"
29#include "nouveau_pm.h"
30
31static u8 *
32nouveau_perf_table(struct drm_device *dev, u8 *ver)
33{
34 struct nouveau_drm *drm = nouveau_drm(dev);
35 struct nvbios *bios = &drm->vbios;
36 struct bit_entry P;
37
38 if (!bit_table(dev, 'P', &P) && P.version && P.version <= 2) {
39 u8 *perf = ROMPTR(dev, P.data[0]);
40 if (perf) {
41 *ver = perf[0];
42 return perf;
43 }
44 }
45
46 if (bios->type == NVBIOS_BMP) {
47 if (bios->data[bios->offset + 6] >= 0x25) {
48 u8 *perf = ROMPTR(dev, bios->data[bios->offset + 0x94]);
49 if (perf) {
50 *ver = perf[1];
51 return perf;
52 }
53 }
54 }
55
56 return NULL;
57}
58
59static u8 *
60nouveau_perf_entry(struct drm_device *dev, int idx,
61 u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
62{
63 u8 *perf = nouveau_perf_table(dev, ver);
64 if (perf) {
65 if (*ver >= 0x12 && *ver < 0x20 && idx < perf[2]) {
66 *hdr = perf[3];
67 *cnt = 0;
68 *len = 0;
69 return perf + perf[0] + idx * perf[3];
70 } else
71 if (*ver >= 0x20 && *ver < 0x40 && idx < perf[2]) {
72 *hdr = perf[3];
73 *cnt = perf[4];
74 *len = perf[5];
75 return perf + perf[1] + idx * (*hdr + (*cnt * *len));
76 } else
77 if (*ver >= 0x40 && *ver < 0x41 && idx < perf[5]) {
78 *hdr = perf[2];
79 *cnt = perf[4];
80 *len = perf[3];
81 return perf + perf[1] + idx * (*hdr + (*cnt * *len));
82 }
83 }
84 return NULL;
85}
86
87u8 *
88nouveau_perf_rammap(struct drm_device *dev, u32 freq,
89 u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
90{
91 struct nouveau_drm *drm = nouveau_drm(dev);
92 struct bit_entry P;
93 u8 *perf, i = 0;
94
95 if (!bit_table(dev, 'P', &P) && P.version == 2) {
96 u8 *rammap = ROMPTR(dev, P.data[4]);
97 if (rammap) {
98 u8 *ramcfg = rammap + rammap[1];
99
100 *ver = rammap[0];
101 *hdr = rammap[2];
102 *cnt = rammap[4];
103 *len = rammap[3];
104
105 freq /= 1000;
106 for (i = 0; i < rammap[5]; i++) {
107 if (freq >= ROM16(ramcfg[0]) &&
108 freq <= ROM16(ramcfg[2]))
109 return ramcfg;
110
111 ramcfg += *hdr + (*cnt * *len);
112 }
113 }
114
115 return NULL;
116 }
117
118 if (nv_device(drm->device)->chipset == 0x49 ||
119 nv_device(drm->device)->chipset == 0x4b)
120 freq /= 2;
121
122 while ((perf = nouveau_perf_entry(dev, i++, ver, hdr, cnt, len))) {
123 if (*ver >= 0x20 && *ver < 0x25) {
124 if (perf[0] != 0xff && freq <= ROM16(perf[11]) * 1000)
125 break;
126 } else
127 if (*ver >= 0x25 && *ver < 0x40) {
128 if (perf[0] != 0xff && freq <= ROM16(perf[12]) * 1000)
129 break;
130 }
131 }
132
133 if (perf) {
134 u8 *ramcfg = perf + *hdr;
135 *ver = 0x00;
136 *hdr = 0;
137 return ramcfg;
138 }
139
140 return NULL;
141}
142
143u8 *
144nouveau_perf_ramcfg(struct drm_device *dev, u32 freq, u8 *ver, u8 *len)
145{
146 struct nouveau_device *device = nouveau_dev(dev);
147 struct nouveau_drm *drm = nouveau_drm(dev);
148 struct nvbios *bios = &drm->vbios;
149 u8 strap, hdr, cnt;
150 u8 *rammap;
151
152 strap = (nv_rd32(device, 0x101000) & 0x0000003c) >> 2;
153 if (bios->ram_restrict_tbl_ptr)
154 strap = bios->data[bios->ram_restrict_tbl_ptr + strap];
155
156 rammap = nouveau_perf_rammap(dev, freq, ver, &hdr, &cnt, len);
157 if (rammap && strap < cnt)
158 return rammap + hdr + (strap * *len);
159
160 return NULL;
161}
162
163u8 *
164nouveau_perf_timing(struct drm_device *dev, u32 freq, u8 *ver, u8 *len)
165{
166 struct nouveau_drm *drm = nouveau_drm(dev);
167 struct nvbios *bios = &drm->vbios;
168 struct bit_entry P;
169 u8 *perf, *timing = NULL;
170 u8 i = 0, hdr, cnt;
171
172 if (bios->type == NVBIOS_BMP) {
173 while ((perf = nouveau_perf_entry(dev, i++, ver, &hdr, &cnt,
174 len)) && *ver == 0x15) {
175 if (freq <= ROM32(perf[5]) * 20) {
176 *ver = 0x00;
177 *len = 14;
178 return perf + 41;
179 }
180 }
181 return NULL;
182 }
183
184 if (!bit_table(dev, 'P', &P)) {
185 if (P.version == 1)
186 timing = ROMPTR(dev, P.data[4]);
187 else
188 if (P.version == 2)
189 timing = ROMPTR(dev, P.data[8]);
190 }
191
192 if (timing && timing[0] == 0x10) {
193 u8 *ramcfg = nouveau_perf_ramcfg(dev, freq, ver, len);
194 if (ramcfg && ramcfg[1] < timing[2]) {
195 *ver = timing[0];
196 *len = timing[3];
197 return timing + timing[1] + (ramcfg[1] * timing[3]);
198 }
199 }
200
201 return NULL;
202}
203
204static void
205legacy_perf_init(struct drm_device *dev)
206{
207 struct nouveau_device *device = nouveau_dev(dev);
208 struct nouveau_drm *drm = nouveau_drm(dev);
209 struct nvbios *bios = &drm->vbios;
210 struct nouveau_pm *pm = nouveau_pm(dev);
211 char *perf, *entry, *bmp = &bios->data[bios->offset];
212 int headerlen, use_straps;
213
214 if (bmp[5] < 0x5 || bmp[6] < 0x14) {
215 NV_DEBUG(drm, "BMP version too old for perf\n");
216 return;
217 }
218
219 perf = ROMPTR(dev, bmp[0x73]);
220 if (!perf) {
221 NV_DEBUG(drm, "No memclock table pointer found.\n");
222 return;
223 }
224
225 switch (perf[0]) {
226 case 0x12:
227 case 0x14:
228 case 0x18:
229 use_straps = 0;
230 headerlen = 1;
231 break;
232 case 0x01:
233 use_straps = perf[1] & 1;
234 headerlen = (use_straps ? 8 : 2);
235 break;
236 default:
237 NV_WARN(drm, "Unknown memclock table version %x.\n", perf[0]);
238 return;
239 }
240
241 entry = perf + headerlen;
242 if (use_straps)
243 entry += (nv_rd32(device, NV_PEXTDEV_BOOT_0) & 0x3c) >> 1;
244
245 sprintf(pm->perflvl[0].name, "performance_level_0");
246 pm->perflvl[0].memory = ROM16(entry[0]) * 20;
247 pm->nr_perflvl = 1;
248}
249
250static void
251nouveau_perf_voltage(struct drm_device *dev, struct nouveau_pm_level *perflvl)
252{
253 struct nouveau_drm *drm = nouveau_drm(dev);
254 struct bit_entry P;
255 u8 *vmap;
256 int id;
257
258 id = perflvl->volt_min;
259 perflvl->volt_min = 0;
260
261 /* boards using voltage table version <0x40 store the voltage
262 * level directly in the perflvl entry as a multiple of 10mV
263 */
264 if (drm->pm->voltage.version < 0x40) {
265 perflvl->volt_min = id * 10000;
266 perflvl->volt_max = perflvl->volt_min;
267 return;
268 }
269
270 /* on newer ones, the perflvl stores an index into yet another
271 * vbios table containing a min/max voltage value for the perflvl
272 */
273 if (bit_table(dev, 'P', &P) || P.version != 2 || P.length < 34) {
274 NV_DEBUG(drm, "where's our volt map table ptr? %d %d\n",
275 P.version, P.length);
276 return;
277 }
278
279 vmap = ROMPTR(dev, P.data[32]);
280 if (!vmap) {
281 NV_DEBUG(drm, "volt map table pointer invalid\n");
282 return;
283 }
284
285 if (id < vmap[3]) {
286 vmap += vmap[1] + (vmap[2] * id);
287 perflvl->volt_min = ROM32(vmap[0]);
288 perflvl->volt_max = ROM32(vmap[4]);
289 }
290}
291
292void
293nouveau_perf_init(struct drm_device *dev)
294{
295 struct nouveau_drm *drm = nouveau_drm(dev);
296 struct nouveau_pm *pm = nouveau_pm(dev);
297 struct nvbios *bios = &drm->vbios;
298 u8 *perf, ver, hdr, cnt, len;
299 int ret, vid, i = -1;
300
301 if (bios->type == NVBIOS_BMP && bios->data[bios->offset + 6] < 0x25) {
302 legacy_perf_init(dev);
303 return;
304 }
305
306 perf = nouveau_perf_table(dev, &ver);
307
308 while ((perf = nouveau_perf_entry(dev, ++i, &ver, &hdr, &cnt, &len))) {
309 struct nouveau_pm_level *perflvl = &pm->perflvl[pm->nr_perflvl];
310
311 if (perf[0] == 0xff)
312 continue;
313
314 switch (ver) {
315 case 0x12:
316 case 0x13:
317 case 0x15:
318 perflvl->fanspeed = perf[55];
319 if (hdr > 56)
320 perflvl->volt_min = perf[56];
321 perflvl->core = ROM32(perf[1]) * 10;
322 perflvl->memory = ROM32(perf[5]) * 20;
323 break;
324 case 0x21:
325 case 0x23:
326 case 0x24:
327 perflvl->fanspeed = perf[4];
328 perflvl->volt_min = perf[5];
329 perflvl->shader = ROM16(perf[6]) * 1000;
330 perflvl->core = perflvl->shader;
331 perflvl->core += (signed char)perf[8] * 1000;
332 if (nv_device(drm->device)->chipset == 0x49 ||
333 nv_device(drm->device)->chipset == 0x4b)
334 perflvl->memory = ROM16(perf[11]) * 1000;
335 else
336 perflvl->memory = ROM16(perf[11]) * 2000;
337 break;
338 case 0x25:
339 perflvl->fanspeed = perf[4];
340 perflvl->volt_min = perf[5];
341 perflvl->core = ROM16(perf[6]) * 1000;
342 perflvl->shader = ROM16(perf[10]) * 1000;
343 perflvl->memory = ROM16(perf[12]) * 1000;
344 break;
345 case 0x30:
346 perflvl->memscript = ROM16(perf[2]);
347 case 0x35:
348 perflvl->fanspeed = perf[6];
349 perflvl->volt_min = perf[7];
350 perflvl->core = ROM16(perf[8]) * 1000;
351 perflvl->shader = ROM16(perf[10]) * 1000;
352 perflvl->memory = ROM16(perf[12]) * 1000;
353 perflvl->vdec = ROM16(perf[16]) * 1000;
354 perflvl->dom6 = ROM16(perf[20]) * 1000;
355 break;
356 case 0x40:
357#define subent(n) ((ROM16(perf[hdr + (n) * len]) & 0xfff) * 1000)
358 perflvl->fanspeed = 0; /*XXX*/
359 perflvl->volt_min = perf[2];
360 if (nv_device(drm->device)->card_type == NV_50) {
361 perflvl->core = subent(0);
362 perflvl->shader = subent(1);
363 perflvl->memory = subent(2);
364 perflvl->vdec = subent(3);
365 perflvl->unka0 = subent(4);
366 } else {
367 perflvl->hub06 = subent(0);
368 perflvl->hub01 = subent(1);
369 perflvl->copy = subent(2);
370 perflvl->shader = subent(3);
371 perflvl->rop = subent(4);
372 perflvl->memory = subent(5);
373 perflvl->vdec = subent(6);
374 perflvl->daemon = subent(10);
375 perflvl->hub07 = subent(11);
376 perflvl->core = perflvl->shader / 2;
377 }
378 break;
379 }
380
381 /* make sure vid is valid */
382 nouveau_perf_voltage(dev, perflvl);
383 if (pm->voltage.supported && perflvl->volt_min) {
384 vid = nouveau_volt_vid_lookup(dev, perflvl->volt_min);
385 if (vid < 0) {
386 NV_DEBUG(drm, "perflvl %d, bad vid\n", i);
387 continue;
388 }
389 }
390
391 /* get the corresponding memory timings */
392 ret = nouveau_mem_timing_calc(dev, perflvl->memory,
393 &perflvl->timing);
394 if (ret) {
395 NV_DEBUG(drm, "perflvl %d, bad timing: %d\n", i, ret);
396 continue;
397 }
398
399 snprintf(perflvl->name, sizeof(perflvl->name),
400 "performance_level_%d", i);
401 perflvl->id = i;
402
403 snprintf(perflvl->profile.name, sizeof(perflvl->profile.name),
404 "%d", perflvl->id);
405 perflvl->profile.func = &nouveau_pm_static_profile_func;
406 list_add_tail(&perflvl->profile.head, &pm->profiles);
407
408
409 pm->nr_perflvl++;
410 }
411}
412
413void
414nouveau_perf_fini(struct drm_device *dev)
415{
416}
diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.h b/drivers/gpu/drm/nouveau/nouveau_pm.h
deleted file mode 100644
index 73b789c230a9..000000000000
--- a/drivers/gpu/drm/nouveau/nouveau_pm.h
+++ /dev/null
@@ -1,283 +0,0 @@
1/*
2 * Copyright 2010 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#ifndef __NOUVEAU_PM_H__
26#define __NOUVEAU_PM_H__
27
28#include <subdev/bios/pll.h>
29#include <subdev/clock.h>
30
31struct nouveau_pm_voltage_level {
32 u32 voltage; /* microvolts */
33 u8 vid;
34};
35
36struct nouveau_pm_voltage {
37 bool supported;
38 u8 version;
39 u8 vid_mask;
40
41 struct nouveau_pm_voltage_level *level;
42 int nr_level;
43};
44
45/* Exclusive upper limits */
46#define NV_MEM_CL_DDR2_MAX 8
47#define NV_MEM_WR_DDR2_MAX 9
48#define NV_MEM_CL_DDR3_MAX 17
49#define NV_MEM_WR_DDR3_MAX 17
50#define NV_MEM_CL_GDDR3_MAX 16
51#define NV_MEM_WR_GDDR3_MAX 18
52#define NV_MEM_CL_GDDR5_MAX 21
53#define NV_MEM_WR_GDDR5_MAX 20
54
55struct nouveau_pm_memtiming {
56 int id;
57
58 u32 reg[9];
59 u32 mr[4];
60
61 u8 tCWL;
62
63 u8 odt;
64 u8 drive_strength;
65};
66
67struct nouveau_pm_tbl_header {
68 u8 version;
69 u8 header_len;
70 u8 entry_cnt;
71 u8 entry_len;
72};
73
74struct nouveau_pm_tbl_entry {
75 u8 tWR;
76 u8 tWTR;
77 u8 tCL;
78 u8 tRC;
79 u8 empty_4;
80 u8 tRFC; /* Byte 5 */
81 u8 empty_6;
82 u8 tRAS; /* Byte 7 */
83 u8 empty_8;
84 u8 tRP; /* Byte 9 */
85 u8 tRCDRD;
86 u8 tRCDWR;
87 u8 tRRD;
88 u8 tUNK_13;
89 u8 RAM_FT1; /* 14, a bitmask of random RAM features */
90 u8 empty_15;
91 u8 tUNK_16;
92 u8 empty_17;
93 u8 tUNK_18;
94 u8 tCWL;
95 u8 tUNK_20, tUNK_21;
96};
97
98struct nouveau_pm_profile;
99struct nouveau_pm_profile_func {
100 void (*destroy)(struct nouveau_pm_profile *);
101 void (*init)(struct nouveau_pm_profile *);
102 void (*fini)(struct nouveau_pm_profile *);
103 struct nouveau_pm_level *(*select)(struct nouveau_pm_profile *);
104};
105
106struct nouveau_pm_profile {
107 const struct nouveau_pm_profile_func *func;
108 struct list_head head;
109 char name[8];
110};
111
112#define NOUVEAU_PM_MAX_LEVEL 8
113struct nouveau_pm_level {
114 struct nouveau_pm_profile profile;
115 struct device_attribute dev_attr;
116 char name[32];
117 int id;
118
119 struct nouveau_pm_memtiming timing;
120 u32 memory;
121 u16 memscript;
122
123 u32 core;
124 u32 shader;
125 u32 rop;
126 u32 copy;
127 u32 daemon;
128 u32 vdec;
129 u32 dom6;
130 u32 unka0; /* nva3:nvc0 */
131 u32 hub01; /* nvc0- */
132 u32 hub06; /* nvc0- */
133 u32 hub07; /* nvc0- */
134
135 u32 volt_min; /* microvolts */
136 u32 volt_max;
137 u8 fanspeed;
138};
139
140struct nouveau_pm_temp_sensor_constants {
141 u16 offset_constant;
142 s16 offset_mult;
143 s16 offset_div;
144 s16 slope_mult;
145 s16 slope_div;
146};
147
148struct nouveau_pm_threshold_temp {
149 s16 critical;
150 s16 down_clock;
151};
152
153struct nouveau_pm {
154 struct drm_device *dev;
155
156 struct nouveau_pm_voltage voltage;
157 struct nouveau_pm_level perflvl[NOUVEAU_PM_MAX_LEVEL];
158 int nr_perflvl;
159 struct nouveau_pm_temp_sensor_constants sensor_constants;
160 struct nouveau_pm_threshold_temp threshold_temp;
161
162 struct nouveau_pm_profile *profile_ac;
163 struct nouveau_pm_profile *profile_dc;
164 struct nouveau_pm_profile *profile;
165 struct list_head profiles;
166
167 struct nouveau_pm_level boot;
168 struct nouveau_pm_level *cur;
169
170 struct device *hwmon;
171 struct notifier_block acpi_nb;
172
173 int (*clocks_get)(struct drm_device *, struct nouveau_pm_level *);
174 void *(*clocks_pre)(struct drm_device *, struct nouveau_pm_level *);
175 int (*clocks_set)(struct drm_device *, void *);
176
177 int (*voltage_get)(struct drm_device *);
178 int (*voltage_set)(struct drm_device *, int voltage);
179};
180
181static inline struct nouveau_pm *
182nouveau_pm(struct drm_device *dev)
183{
184 return nouveau_drm(dev)->pm;
185}
186
187struct nouveau_mem_exec_func {
188 struct drm_device *dev;
189 void (*precharge)(struct nouveau_mem_exec_func *);
190 void (*refresh)(struct nouveau_mem_exec_func *);
191 void (*refresh_auto)(struct nouveau_mem_exec_func *, bool);
192 void (*refresh_self)(struct nouveau_mem_exec_func *, bool);
193 void (*wait)(struct nouveau_mem_exec_func *, u32 nsec);
194 u32 (*mrg)(struct nouveau_mem_exec_func *, int mr);
195 void (*mrs)(struct nouveau_mem_exec_func *, int mr, u32 data);
196 void (*clock_set)(struct nouveau_mem_exec_func *);
197 void (*timing_set)(struct nouveau_mem_exec_func *);
198 void *priv;
199};
200
201/* nouveau_mem.c */
202int nouveau_mem_exec(struct nouveau_mem_exec_func *,
203 struct nouveau_pm_level *);
204
205/* nouveau_pm.c */
206int nouveau_pm_init(struct drm_device *dev);
207void nouveau_pm_fini(struct drm_device *dev);
208void nouveau_pm_resume(struct drm_device *dev);
209extern const struct nouveau_pm_profile_func nouveau_pm_static_profile_func;
210void nouveau_pm_trigger(struct drm_device *dev);
211
212/* nouveau_volt.c */
213void nouveau_volt_init(struct drm_device *);
214void nouveau_volt_fini(struct drm_device *);
215int nouveau_volt_vid_lookup(struct drm_device *, int voltage);
216int nouveau_volt_lvl_lookup(struct drm_device *, int vid);
217int nouveau_voltage_gpio_get(struct drm_device *);
218int nouveau_voltage_gpio_set(struct drm_device *, int voltage);
219
220/* nouveau_perf.c */
221void nouveau_perf_init(struct drm_device *);
222void nouveau_perf_fini(struct drm_device *);
223u8 *nouveau_perf_rammap(struct drm_device *, u32 freq, u8 *ver,
224 u8 *hdr, u8 *cnt, u8 *len);
225u8 *nouveau_perf_ramcfg(struct drm_device *, u32 freq, u8 *ver, u8 *len);
226u8 *nouveau_perf_timing(struct drm_device *, u32 freq, u8 *ver, u8 *len);
227
228/* nouveau_mem.c */
229void nouveau_mem_timing_init(struct drm_device *);
230void nouveau_mem_timing_fini(struct drm_device *);
231
232/* nv04_pm.c */
233int nv04_pm_clocks_get(struct drm_device *, struct nouveau_pm_level *);
234void *nv04_pm_clocks_pre(struct drm_device *, struct nouveau_pm_level *);
235int nv04_pm_clocks_set(struct drm_device *, void *);
236
237/* nv40_pm.c */
238int nv40_pm_clocks_get(struct drm_device *, struct nouveau_pm_level *);
239void *nv40_pm_clocks_pre(struct drm_device *, struct nouveau_pm_level *);
240int nv40_pm_clocks_set(struct drm_device *, void *);
241int nv40_pm_pwm_get(struct drm_device *, int, u32 *, u32 *);
242int nv40_pm_pwm_set(struct drm_device *, int, u32, u32);
243
244/* nv50_pm.c */
245int nv50_pm_clocks_get(struct drm_device *, struct nouveau_pm_level *);
246void *nv50_pm_clocks_pre(struct drm_device *, struct nouveau_pm_level *);
247int nv50_pm_clocks_set(struct drm_device *, void *);
248int nv50_pm_pwm_get(struct drm_device *, int, u32 *, u32 *);
249int nv50_pm_pwm_set(struct drm_device *, int, u32, u32);
250
251/* nva3_pm.c */
252int nva3_pm_clocks_get(struct drm_device *, struct nouveau_pm_level *);
253void *nva3_pm_clocks_pre(struct drm_device *, struct nouveau_pm_level *);
254int nva3_pm_clocks_set(struct drm_device *, void *);
255
256/* nvc0_pm.c */
257int nvc0_pm_clocks_get(struct drm_device *, struct nouveau_pm_level *);
258void *nvc0_pm_clocks_pre(struct drm_device *, struct nouveau_pm_level *);
259int nvc0_pm_clocks_set(struct drm_device *, void *);
260
261/* nouveau_mem.c */
262int nouveau_mem_timing_calc(struct drm_device *, u32 freq,
263 struct nouveau_pm_memtiming *);
264void nouveau_mem_timing_read(struct drm_device *,
265 struct nouveau_pm_memtiming *);
266
267static inline int
268nva3_calc_pll(struct drm_device *dev, struct nvbios_pll *pll, u32 freq,
269 int *N, int *fN, int *M, int *P)
270{
271 struct nouveau_device *device = nouveau_dev(dev);
272 struct nouveau_clock *clk = nouveau_clock(device);
273 struct nouveau_pll_vals pv;
274 int ret;
275
276 ret = clk->pll_calc(clk, pll, freq, &pv);
277 *N = pv.N1;
278 *M = pv.M1;
279 *P = pv.log2P;
280 return ret;
281}
282
283#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c
index e90468d5e5c0..51a2cb102b44 100644
--- a/drivers/gpu/drm/nouveau/nouveau_prime.c
+++ b/drivers/gpu/drm/nouveau/nouveau_prime.c
@@ -71,14 +71,16 @@ struct drm_gem_object *nouveau_gem_prime_import_sg_table(struct drm_device *dev,
71 return ERR_PTR(ret); 71 return ERR_PTR(ret);
72 72
73 nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_GART; 73 nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_GART;
74 nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size); 74
75 if (!nvbo->gem) { 75 /* Initialize the embedded gem-object. We return a single gem-reference
76 * to the caller, instead of a normal nouveau_bo ttm reference. */
77 ret = drm_gem_object_init(dev, &nvbo->gem, nvbo->bo.mem.size);
78 if (ret) {
76 nouveau_bo_ref(NULL, &nvbo); 79 nouveau_bo_ref(NULL, &nvbo);
77 return ERR_PTR(-ENOMEM); 80 return ERR_PTR(-ENOMEM);
78 } 81 }
79 82
80 nvbo->gem->driver_private = nvbo; 83 return &nvbo->gem;
81 return nvbo->gem;
82} 84}
83 85
84int nouveau_gem_prime_pin(struct drm_gem_object *obj) 86int nouveau_gem_prime_pin(struct drm_gem_object *obj)
diff --git a/drivers/gpu/drm/nouveau/nouveau_sysfs.c b/drivers/gpu/drm/nouveau/nouveau_sysfs.c
new file mode 100644
index 000000000000..89201a17ce75
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_sysfs.c
@@ -0,0 +1,162 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */
24
25#include "nouveau_sysfs.h"
26
27#include <core/object.h>
28#include <core/class.h>
29
30static inline struct drm_device *
31drm_device(struct device *d)
32{
33 return pci_get_drvdata(to_pci_dev(d));
34}
35
36#define snappendf(p,r,f,a...) do { \
37 snprintf(p, r, f, ##a); \
38 r -= strlen(p); \
39 p += strlen(p); \
40} while(0)
41
42static ssize_t
43nouveau_sysfs_pstate_get(struct device *d, struct device_attribute *a, char *b)
44{
45 struct nouveau_sysfs *sysfs = nouveau_sysfs(drm_device(d));
46 struct nv_control_pstate_info info;
47 size_t cnt = PAGE_SIZE;
48 char *buf = b;
49 int ret, i;
50
51 ret = nv_exec(sysfs->ctrl, NV_CONTROL_PSTATE_INFO, &info, sizeof(info));
52 if (ret)
53 return ret;
54
55 for (i = 0; i < info.count + 1; i++) {
56 const s32 state = i < info.count ? i :
57 NV_CONTROL_PSTATE_ATTR_STATE_CURRENT;
58 struct nv_control_pstate_attr attr = {
59 .state = state,
60 .index = 0,
61 };
62
63 ret = nv_exec(sysfs->ctrl, NV_CONTROL_PSTATE_ATTR,
64 &attr, sizeof(attr));
65 if (ret)
66 return ret;
67
68 if (i < info.count)
69 snappendf(buf, cnt, "%02x:", attr.state);
70 else
71 snappendf(buf, cnt, "--:");
72
73 attr.index = 0;
74 do {
75 attr.state = state;
76 ret = nv_exec(sysfs->ctrl, NV_CONTROL_PSTATE_ATTR,
77 &attr, sizeof(attr));
78 if (ret)
79 return ret;
80
81 snappendf(buf, cnt, " %s %d", attr.name, attr.min);
82 if (attr.min != attr.max)
83 snappendf(buf, cnt, "-%d", attr.max);
84 snappendf(buf, cnt, " %s", attr.unit);
85 } while (attr.index);
86
87 if ((state >= 0 && info.pstate == state) ||
88 (state < 0 && info.ustate < 0))
89 snappendf(buf, cnt, " *");
90 snappendf(buf, cnt, "\n");
91 }
92
93 return strlen(b);
94}
95
96static ssize_t
97nouveau_sysfs_pstate_set(struct device *d, struct device_attribute *a,
98 const char *buf, size_t count)
99{
100 struct nouveau_sysfs *sysfs = nouveau_sysfs(drm_device(d));
101 struct nv_control_pstate_user args;
102 long value, ret;
103 char *tmp;
104
105 if ((tmp = strchr(buf, '\n')))
106 *tmp = '\0';
107
108 if (!strcasecmp(buf, "none"))
109 args.state = NV_CONTROL_PSTATE_USER_STATE_UNKNOWN;
110 else
111 if (!strcasecmp(buf, "auto"))
112 args.state = NV_CONTROL_PSTATE_USER_STATE_PERFMON;
113 else {
114 ret = kstrtol(buf, 16, &value);
115 if (ret)
116 return ret;
117 args.state = value;
118 }
119
120 ret = nv_exec(sysfs->ctrl, NV_CONTROL_PSTATE_USER, &args, sizeof(args));
121 if (ret < 0)
122 return ret;
123
124 return count;
125}
126
127static DEVICE_ATTR(pstate, S_IRUGO | S_IWUSR,
128 nouveau_sysfs_pstate_get, nouveau_sysfs_pstate_set);
129
130void
131nouveau_sysfs_fini(struct drm_device *dev)
132{
133 struct nouveau_sysfs *sysfs = nouveau_sysfs(dev);
134 struct nouveau_drm *drm = nouveau_drm(dev);
135
136 if (sysfs->ctrl) {
137 device_remove_file(&dev->pdev->dev, &dev_attr_pstate);
138 nouveau_object_del(nv_object(drm), NVDRM_DEVICE, NVDRM_CONTROL);
139 }
140
141 drm->sysfs = NULL;
142 kfree(sysfs);
143}
144
145int
146nouveau_sysfs_init(struct drm_device *dev)
147{
148 struct nouveau_drm *drm = nouveau_drm(dev);
149 struct nouveau_sysfs *sysfs;
150 int ret;
151
152 sysfs = drm->sysfs = kzalloc(sizeof(*sysfs), GFP_KERNEL);
153 if (!sysfs)
154 return -ENOMEM;
155
156 ret = nouveau_object_new(nv_object(drm), NVDRM_DEVICE, NVDRM_CONTROL,
157 NV_CONTROL_CLASS, NULL, 0, &sysfs->ctrl);
158 if (ret == 0)
159 device_create_file(&dev->pdev->dev, &dev_attr_pstate);
160
161 return 0;
162}
diff --git a/drivers/gpu/drm/nouveau/nouveau_sysfs.h b/drivers/gpu/drm/nouveau/nouveau_sysfs.h
new file mode 100644
index 000000000000..74b47f1e01ed
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_sysfs.h
@@ -0,0 +1,19 @@
1#ifndef __NOUVEAU_SYSFS_H__
2#define __NOUVEAU_SYSFS_H__
3
4#include "nouveau_drm.h"
5
6struct nouveau_sysfs {
7 struct nouveau_object *ctrl;
8};
9
10static inline struct nouveau_sysfs *
11nouveau_sysfs(struct drm_device *dev)
12{
13 return nouveau_drm(dev)->sysfs;
14}
15
16int nouveau_sysfs_init(struct drm_device *);
17void nouveau_sysfs_fini(struct drm_device *);
18
19#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_volt.c b/drivers/gpu/drm/nouveau/nouveau_volt.c
deleted file mode 100644
index 9976414cbe50..000000000000
--- a/drivers/gpu/drm/nouveau/nouveau_volt.c
+++ /dev/null
@@ -1,250 +0,0 @@
1/*
2 * Copyright 2010 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <drm/drmP.h>
26
27#include "nouveau_drm.h"
28#include "nouveau_pm.h"
29
30#include <subdev/bios/gpio.h>
31#include <subdev/gpio.h>
32
33static const enum dcb_gpio_func_name vidtag[] = { 0x04, 0x05, 0x06, 0x1a, 0x73 };
34static int nr_vidtag = sizeof(vidtag) / sizeof(vidtag[0]);
35
36int
37nouveau_voltage_gpio_get(struct drm_device *dev)
38{
39 struct nouveau_pm_voltage *volt = &nouveau_pm(dev)->voltage;
40 struct nouveau_device *device = nouveau_dev(dev);
41 struct nouveau_gpio *gpio = nouveau_gpio(device);
42 u8 vid = 0;
43 int i;
44
45 for (i = 0; i < nr_vidtag; i++) {
46 if (!(volt->vid_mask & (1 << i)))
47 continue;
48
49 vid |= gpio->get(gpio, 0, vidtag[i], 0xff) << i;
50 }
51
52 return nouveau_volt_lvl_lookup(dev, vid);
53}
54
55int
56nouveau_voltage_gpio_set(struct drm_device *dev, int voltage)
57{
58 struct nouveau_device *device = nouveau_dev(dev);
59 struct nouveau_gpio *gpio = nouveau_gpio(device);
60 struct nouveau_pm_voltage *volt = &nouveau_pm(dev)->voltage;
61 int vid, i;
62
63 vid = nouveau_volt_vid_lookup(dev, voltage);
64 if (vid < 0)
65 return vid;
66
67 for (i = 0; i < nr_vidtag; i++) {
68 if (!(volt->vid_mask & (1 << i)))
69 continue;
70
71 gpio->set(gpio, 0, vidtag[i], 0xff, !!(vid & (1 << i)));
72 }
73
74 return 0;
75}
76
77int
78nouveau_volt_vid_lookup(struct drm_device *dev, int voltage)
79{
80 struct nouveau_pm_voltage *volt = &nouveau_pm(dev)->voltage;
81 int i;
82
83 for (i = 0; i < volt->nr_level; i++) {
84 if (volt->level[i].voltage == voltage)
85 return volt->level[i].vid;
86 }
87
88 return -ENOENT;
89}
90
91int
92nouveau_volt_lvl_lookup(struct drm_device *dev, int vid)
93{
94 struct nouveau_pm_voltage *volt = &nouveau_pm(dev)->voltage;
95 int i;
96
97 for (i = 0; i < volt->nr_level; i++) {
98 if (volt->level[i].vid == vid)
99 return volt->level[i].voltage;
100 }
101
102 return -ENOENT;
103}
104
105void
106nouveau_volt_init(struct drm_device *dev)
107{
108 struct nouveau_drm *drm = nouveau_drm(dev);
109 struct nouveau_gpio *gpio = nouveau_gpio(drm->device);
110 struct nouveau_pm *pm = nouveau_pm(dev);
111 struct nouveau_pm_voltage *voltage = &pm->voltage;
112 struct nvbios *bios = &drm->vbios;
113 struct dcb_gpio_func func;
114 struct bit_entry P;
115 u8 *volt = NULL, *entry;
116 int i, headerlen, recordlen, entries, vidmask, vidshift;
117
118 if (bios->type == NVBIOS_BIT) {
119 if (bit_table(dev, 'P', &P))
120 return;
121
122 if (P.version == 1)
123 volt = ROMPTR(dev, P.data[16]);
124 else
125 if (P.version == 2)
126 volt = ROMPTR(dev, P.data[12]);
127 else {
128 NV_WARN(drm, "unknown volt for BIT P %d\n", P.version);
129 }
130 } else {
131 if (bios->data[bios->offset + 6] < 0x27) {
132 NV_DEBUG(drm, "BMP version too old for voltage\n");
133 return;
134 }
135
136 volt = ROMPTR(dev, bios->data[bios->offset + 0x98]);
137 }
138
139 if (!volt) {
140 NV_DEBUG(drm, "voltage table pointer invalid\n");
141 return;
142 }
143
144 switch (volt[0]) {
145 case 0x10:
146 case 0x11:
147 case 0x12:
148 headerlen = 5;
149 recordlen = volt[1];
150 entries = volt[2];
151 vidshift = 0;
152 vidmask = volt[4];
153 break;
154 case 0x20:
155 headerlen = volt[1];
156 recordlen = volt[3];
157 entries = volt[2];
158 vidshift = 0; /* could be vidshift like 0x30? */
159 vidmask = volt[5];
160 break;
161 case 0x30:
162 headerlen = volt[1];
163 recordlen = volt[2];
164 entries = volt[3];
165 vidmask = volt[4];
166 /* no longer certain what volt[5] is, if it's related to
167 * the vid shift then it's definitely not a function of
168 * how many bits are set.
169 *
170 * after looking at a number of nva3+ vbios images, they
171 * all seem likely to have a static shift of 2.. lets
172 * go with that for now until proven otherwise.
173 */
174 vidshift = 2;
175 break;
176 case 0x40:
177 headerlen = volt[1];
178 recordlen = volt[2];
179 entries = volt[3]; /* not a clue what the entries are for.. */
180 vidmask = volt[11]; /* guess.. */
181 vidshift = 0;
182 break;
183 default:
184 NV_WARN(drm, "voltage table 0x%02x unknown\n", volt[0]);
185 return;
186 }
187
188 /* validate vid mask */
189 voltage->vid_mask = vidmask;
190 if (!voltage->vid_mask)
191 return;
192
193 i = 0;
194 while (vidmask) {
195 if (i > nr_vidtag) {
196 NV_DEBUG(drm, "vid bit %d unknown\n", i);
197 return;
198 }
199
200 if (gpio && gpio->find(gpio, 0, vidtag[i], 0xff, &func)) {
201 NV_DEBUG(drm, "vid bit %d has no gpio tag\n", i);
202 return;
203 }
204
205 vidmask >>= 1;
206 i++;
207 }
208
209 /* parse vbios entries into common format */
210 voltage->version = volt[0];
211 if (voltage->version < 0x40) {
212 voltage->nr_level = entries;
213 voltage->level =
214 kcalloc(entries, sizeof(*voltage->level), GFP_KERNEL);
215 if (!voltage->level)
216 return;
217
218 entry = volt + headerlen;
219 for (i = 0; i < entries; i++, entry += recordlen) {
220 voltage->level[i].voltage = entry[0] * 10000;
221 voltage->level[i].vid = entry[1] >> vidshift;
222 }
223 } else {
224 u32 volt_uv = ROM32(volt[4]);
225 s16 step_uv = ROM16(volt[8]);
226 u8 vid;
227
228 voltage->nr_level = voltage->vid_mask + 1;
229 voltage->level = kcalloc(voltage->nr_level,
230 sizeof(*voltage->level), GFP_KERNEL);
231 if (!voltage->level)
232 return;
233
234 for (vid = 0; vid <= voltage->vid_mask; vid++) {
235 voltage->level[vid].voltage = volt_uv;
236 voltage->level[vid].vid = vid;
237 volt_uv += step_uv;
238 }
239 }
240
241 voltage->supported = true;
242}
243
244void
245nouveau_volt_fini(struct drm_device *dev)
246{
247 struct nouveau_pm_voltage *volt = &nouveau_pm(dev)->voltage;
248
249 kfree(volt->level);
250}
diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c
index 77dcc9c50777..8fe32bbed99a 100644
--- a/drivers/gpu/drm/nouveau/nv04_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c
@@ -255,6 +255,12 @@ nv04_fbcon_accel_init(struct fb_info *info)
255 OUT_RING(chan, NvCtxSurf2D); 255 OUT_RING(chan, NvCtxSurf2D);
256 BEGIN_NV04(chan, NvSubImageBlit, 0x02fc, 1); 256 BEGIN_NV04(chan, NvSubImageBlit, 0x02fc, 1);
257 OUT_RING(chan, 3); 257 OUT_RING(chan, 3);
258 if (device->chipset >= 0x11 /*XXX: oclass == 0x009f*/) {
259 BEGIN_NV04(chan, NvSubImageBlit, 0x0120, 3);
260 OUT_RING(chan, 0);
261 OUT_RING(chan, 1);
262 OUT_RING(chan, 2);
263 }
258 264
259 BEGIN_NV04(chan, NvSubGdiRect, 0x0000, 1); 265 BEGIN_NV04(chan, NvSubGdiRect, 0x0000, 1);
260 OUT_RING(chan, NvGdiRect); 266 OUT_RING(chan, NvGdiRect);
diff --git a/drivers/gpu/drm/nouveau/nv04_pm.c b/drivers/gpu/drm/nouveau/nv04_pm.c
deleted file mode 100644
index 27afc0ea28b0..000000000000
--- a/drivers/gpu/drm/nouveau/nv04_pm.c
+++ /dev/null
@@ -1,146 +0,0 @@
1/*
2 * Copyright 2010 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <drm/drmP.h>
26#include "nouveau_drm.h"
27#include "nouveau_reg.h"
28#include "dispnv04/hw.h"
29#include "nouveau_pm.h"
30
31#include <subdev/bios/pll.h>
32#include <subdev/clock.h>
33#include <subdev/timer.h>
34
35int
36nv04_pm_clocks_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
37{
38 int ret;
39
40 ret = nouveau_hw_get_clock(dev, PLL_CORE);
41 if (ret < 0)
42 return ret;
43 perflvl->core = ret;
44
45 ret = nouveau_hw_get_clock(dev, PLL_MEMORY);
46 if (ret < 0)
47 return ret;
48 perflvl->memory = ret;
49
50 return 0;
51}
52
53struct nv04_pm_clock {
54 struct nvbios_pll pll;
55 struct nouveau_pll_vals calc;
56};
57
58struct nv04_pm_state {
59 struct nv04_pm_clock core;
60 struct nv04_pm_clock memory;
61};
62
63static int
64calc_pll(struct drm_device *dev, u32 id, int khz, struct nv04_pm_clock *clk)
65{
66 struct nouveau_device *device = nouveau_dev(dev);
67 struct nouveau_bios *bios = nouveau_bios(device);
68 struct nouveau_clock *pclk = nouveau_clock(device);
69 int ret;
70
71 ret = nvbios_pll_parse(bios, id, &clk->pll);
72 if (ret)
73 return ret;
74
75 ret = pclk->pll_calc(pclk, &clk->pll, khz, &clk->calc);
76 if (!ret)
77 return -EINVAL;
78
79 return 0;
80}
81
82void *
83nv04_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
84{
85 struct nv04_pm_state *info;
86 int ret;
87
88 info = kzalloc(sizeof(*info), GFP_KERNEL);
89 if (!info)
90 return ERR_PTR(-ENOMEM);
91
92 ret = calc_pll(dev, PLL_CORE, perflvl->core, &info->core);
93 if (ret)
94 goto error;
95
96 if (perflvl->memory) {
97 ret = calc_pll(dev, PLL_MEMORY, perflvl->memory, &info->memory);
98 if (ret)
99 goto error;
100 }
101
102 return info;
103error:
104 kfree(info);
105 return ERR_PTR(ret);
106}
107
108static void
109prog_pll(struct drm_device *dev, struct nv04_pm_clock *clk)
110{
111 struct nouveau_device *device = nouveau_dev(dev);
112 struct nouveau_clock *pclk = nouveau_clock(device);
113 u32 reg = clk->pll.reg;
114
115 /* thank the insane nouveau_hw_setpll() interface for this */
116 if (device->card_type >= NV_40)
117 reg += 4;
118
119 pclk->pll_prog(pclk, reg, &clk->calc);
120}
121
122int
123nv04_pm_clocks_set(struct drm_device *dev, void *pre_state)
124{
125 struct nouveau_device *device = nouveau_dev(dev);
126 struct nouveau_timer *ptimer = nouveau_timer(device);
127 struct nv04_pm_state *state = pre_state;
128
129 prog_pll(dev, &state->core);
130
131 if (state->memory.pll.reg) {
132 prog_pll(dev, &state->memory);
133 if (device->card_type < NV_30) {
134 if (device->card_type == NV_20)
135 nv_mask(device, 0x1002c4, 0, 1 << 20);
136
137 /* Reset the DLLs */
138 nv_mask(device, 0x1002c0, 0, 1 << 8);
139 }
140 }
141
142 nv_ofuncs(ptimer)->init(nv_object(ptimer));
143
144 kfree(state);
145 return 0;
146}
diff --git a/drivers/gpu/drm/nouveau/nv40_pm.c b/drivers/gpu/drm/nouveau/nv40_pm.c
deleted file mode 100644
index 625f80d53dc2..000000000000
--- a/drivers/gpu/drm/nouveau/nv40_pm.c
+++ /dev/null
@@ -1,353 +0,0 @@
1/*
2 * Copyright 2011 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <drm/drmP.h>
26#include "nouveau_drm.h"
27#include "nouveau_bios.h"
28#include "nouveau_pm.h"
29#include "dispnv04/hw.h"
30
31#include <subdev/bios/pll.h>
32#include <subdev/clock.h>
33#include <subdev/timer.h>
34
35#include <engine/fifo.h>
36
37#define min2(a,b) ((a) < (b) ? (a) : (b))
38
39static u32
40read_pll_1(struct drm_device *dev, u32 reg)
41{
42 struct nouveau_device *device = nouveau_dev(dev);
43 u32 ctrl = nv_rd32(device, reg + 0x00);
44 int P = (ctrl & 0x00070000) >> 16;
45 int N = (ctrl & 0x0000ff00) >> 8;
46 int M = (ctrl & 0x000000ff) >> 0;
47 u32 ref = 27000, clk = 0;
48
49 if (ctrl & 0x80000000)
50 clk = ref * N / M;
51
52 return clk >> P;
53}
54
55static u32
56read_pll_2(struct drm_device *dev, u32 reg)
57{
58 struct nouveau_device *device = nouveau_dev(dev);
59 u32 ctrl = nv_rd32(device, reg + 0x00);
60 u32 coef = nv_rd32(device, reg + 0x04);
61 int N2 = (coef & 0xff000000) >> 24;
62 int M2 = (coef & 0x00ff0000) >> 16;
63 int N1 = (coef & 0x0000ff00) >> 8;
64 int M1 = (coef & 0x000000ff) >> 0;
65 int P = (ctrl & 0x00070000) >> 16;
66 u32 ref = 27000, clk = 0;
67
68 if ((ctrl & 0x80000000) && M1) {
69 clk = ref * N1 / M1;
70 if ((ctrl & 0x40000100) == 0x40000000) {
71 if (M2)
72 clk = clk * N2 / M2;
73 else
74 clk = 0;
75 }
76 }
77
78 return clk >> P;
79}
80
81static u32
82read_clk(struct drm_device *dev, u32 src)
83{
84 switch (src) {
85 case 3:
86 return read_pll_2(dev, 0x004000);
87 case 2:
88 return read_pll_1(dev, 0x004008);
89 default:
90 break;
91 }
92
93 return 0;
94}
95
96int
97nv40_pm_clocks_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
98{
99 struct nouveau_device *device = nouveau_dev(dev);
100 u32 ctrl = nv_rd32(device, 0x00c040);
101
102 perflvl->core = read_clk(dev, (ctrl & 0x00000003) >> 0);
103 perflvl->shader = read_clk(dev, (ctrl & 0x00000030) >> 4);
104 perflvl->memory = read_pll_2(dev, 0x4020);
105 return 0;
106}
107
108struct nv40_pm_state {
109 u32 ctrl;
110 u32 npll_ctrl;
111 u32 npll_coef;
112 u32 spll;
113 u32 mpll_ctrl;
114 u32 mpll_coef;
115};
116
117static int
118nv40_calc_pll(struct drm_device *dev, u32 reg, struct nvbios_pll *pll,
119 u32 clk, int *N1, int *M1, int *N2, int *M2, int *log2P)
120{
121 struct nouveau_device *device = nouveau_dev(dev);
122 struct nouveau_bios *bios = nouveau_bios(device);
123 struct nouveau_clock *pclk = nouveau_clock(device);
124 struct nouveau_pll_vals coef;
125 int ret;
126
127 ret = nvbios_pll_parse(bios, reg, pll);
128 if (ret)
129 return ret;
130
131 if (clk < pll->vco1.max_freq)
132 pll->vco2.max_freq = 0;
133
134 ret = pclk->pll_calc(pclk, pll, clk, &coef);
135 if (ret == 0)
136 return -ERANGE;
137
138 *N1 = coef.N1;
139 *M1 = coef.M1;
140 if (N2 && M2) {
141 if (pll->vco2.max_freq) {
142 *N2 = coef.N2;
143 *M2 = coef.M2;
144 } else {
145 *N2 = 1;
146 *M2 = 1;
147 }
148 }
149 *log2P = coef.log2P;
150 return 0;
151}
152
153void *
154nv40_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
155{
156 struct nv40_pm_state *info;
157 struct nvbios_pll pll;
158 int N1, N2, M1, M2, log2P;
159 int ret;
160
161 info = kmalloc(sizeof(*info), GFP_KERNEL);
162 if (!info)
163 return ERR_PTR(-ENOMEM);
164
165 /* core/geometric clock */
166 ret = nv40_calc_pll(dev, 0x004000, &pll, perflvl->core,
167 &N1, &M1, &N2, &M2, &log2P);
168 if (ret < 0)
169 goto out;
170
171 if (N2 == M2) {
172 info->npll_ctrl = 0x80000100 | (log2P << 16);
173 info->npll_coef = (N1 << 8) | M1;
174 } else {
175 info->npll_ctrl = 0xc0000000 | (log2P << 16);
176 info->npll_coef = (N2 << 24) | (M2 << 16) | (N1 << 8) | M1;
177 }
178
179 /* use the second PLL for shader/rop clock, if it differs from core */
180 if (perflvl->shader && perflvl->shader != perflvl->core) {
181 ret = nv40_calc_pll(dev, 0x004008, &pll, perflvl->shader,
182 &N1, &M1, NULL, NULL, &log2P);
183 if (ret < 0)
184 goto out;
185
186 info->spll = 0xc0000000 | (log2P << 16) | (N1 << 8) | M1;
187 info->ctrl = 0x00000223;
188 } else {
189 info->spll = 0x00000000;
190 info->ctrl = 0x00000333;
191 }
192
193 /* memory clock */
194 if (!perflvl->memory) {
195 info->mpll_ctrl = 0x00000000;
196 goto out;
197 }
198
199 ret = nv40_calc_pll(dev, 0x004020, &pll, perflvl->memory,
200 &N1, &M1, &N2, &M2, &log2P);
201 if (ret < 0)
202 goto out;
203
204 info->mpll_ctrl = 0x80000000 | (log2P << 16);
205 info->mpll_ctrl |= min2(pll.bias_p + log2P, pll.max_p) << 20;
206 if (N2 == M2) {
207 info->mpll_ctrl |= 0x00000100;
208 info->mpll_coef = (N1 << 8) | M1;
209 } else {
210 info->mpll_ctrl |= 0x40000000;
211 info->mpll_coef = (N2 << 24) | (M2 << 16) | (N1 << 8) | M1;
212 }
213
214out:
215 if (ret < 0) {
216 kfree(info);
217 info = ERR_PTR(ret);
218 }
219 return info;
220}
221
222static bool
223nv40_pm_gr_idle(void *data)
224{
225 struct drm_device *dev = data;
226 struct nouveau_device *device = nouveau_dev(dev);
227
228 if ((nv_rd32(device, 0x400760) & 0x000000f0) >> 4 !=
229 (nv_rd32(device, 0x400760) & 0x0000000f))
230 return false;
231
232 if (nv_rd32(device, 0x400700))
233 return false;
234
235 return true;
236}
237
238int
239nv40_pm_clocks_set(struct drm_device *dev, void *pre_state)
240{
241 struct nouveau_device *device = nouveau_dev(dev);
242 struct nouveau_fifo *pfifo = nouveau_fifo(device);
243 struct nouveau_drm *drm = nouveau_drm(dev);
244 struct nv40_pm_state *info = pre_state;
245 unsigned long flags;
246 struct bit_entry M;
247 u32 crtc_mask = 0;
248 u8 sr1[2];
249 int i, ret = -EAGAIN;
250
251 /* determine which CRTCs are active, fetch VGA_SR1 for each */
252 for (i = 0; i < 2; i++) {
253 u32 vbl = nv_rd32(device, 0x600808 + (i * 0x2000));
254 u32 cnt = 0;
255 do {
256 if (vbl != nv_rd32(device, 0x600808 + (i * 0x2000))) {
257 nv_wr08(device, 0x0c03c4 + (i * 0x2000), 0x01);
258 sr1[i] = nv_rd08(device, 0x0c03c5 + (i * 0x2000));
259 if (!(sr1[i] & 0x20))
260 crtc_mask |= (1 << i);
261 break;
262 }
263 udelay(1);
264 } while (cnt++ < 32);
265 }
266
267 /* halt and idle engines */
268 pfifo->pause(pfifo, &flags);
269
270 if (!nv_wait_cb(device, nv40_pm_gr_idle, dev))
271 goto resume;
272
273 ret = 0;
274
275 /* set engine clocks */
276 nv_mask(device, 0x00c040, 0x00000333, 0x00000000);
277 nv_wr32(device, 0x004004, info->npll_coef);
278 nv_mask(device, 0x004000, 0xc0070100, info->npll_ctrl);
279 nv_mask(device, 0x004008, 0xc007ffff, info->spll);
280 mdelay(5);
281 nv_mask(device, 0x00c040, 0x00000333, info->ctrl);
282
283 if (!info->mpll_ctrl)
284 goto resume;
285
286 /* wait for vblank start on active crtcs, disable memory access */
287 for (i = 0; i < 2; i++) {
288 if (!(crtc_mask & (1 << i)))
289 continue;
290 nv_wait(device, 0x600808 + (i * 0x2000), 0x00010000, 0x00000000);
291 nv_wait(device, 0x600808 + (i * 0x2000), 0x00010000, 0x00010000);
292 nv_wr08(device, 0x0c03c4 + (i * 0x2000), 0x01);
293 nv_wr08(device, 0x0c03c5 + (i * 0x2000), sr1[i] | 0x20);
294 }
295
296 /* prepare ram for reclocking */
297 nv_wr32(device, 0x1002d4, 0x00000001); /* precharge */
298 nv_wr32(device, 0x1002d0, 0x00000001); /* refresh */
299 nv_wr32(device, 0x1002d0, 0x00000001); /* refresh */
300 nv_mask(device, 0x100210, 0x80000000, 0x00000000); /* no auto refresh */
301 nv_wr32(device, 0x1002dc, 0x00000001); /* enable self-refresh */
302
303 /* change the PLL of each memory partition */
304 nv_mask(device, 0x00c040, 0x0000c000, 0x00000000);
305 switch (nv_device(drm->device)->chipset) {
306 case 0x40:
307 case 0x45:
308 case 0x41:
309 case 0x42:
310 case 0x47:
311 nv_mask(device, 0x004044, 0xc0771100, info->mpll_ctrl);
312 nv_mask(device, 0x00402c, 0xc0771100, info->mpll_ctrl);
313 nv_wr32(device, 0x004048, info->mpll_coef);
314 nv_wr32(device, 0x004030, info->mpll_coef);
315 case 0x43:
316 case 0x49:
317 case 0x4b:
318 nv_mask(device, 0x004038, 0xc0771100, info->mpll_ctrl);
319 nv_wr32(device, 0x00403c, info->mpll_coef);
320 default:
321 nv_mask(device, 0x004020, 0xc0771100, info->mpll_ctrl);
322 nv_wr32(device, 0x004024, info->mpll_coef);
323 break;
324 }
325 udelay(100);
326 nv_mask(device, 0x00c040, 0x0000c000, 0x0000c000);
327
328 /* re-enable normal operation of memory controller */
329 nv_wr32(device, 0x1002dc, 0x00000000);
330 nv_mask(device, 0x100210, 0x80000000, 0x80000000);
331 udelay(100);
332
333 /* execute memory reset script from vbios */
334 if (!bit_table(dev, 'M', &M))
335 nouveau_bios_run_init_table(dev, ROM16(M.data[0]), NULL, 0);
336
337 /* make sure we're in vblank (hopefully the same one as before), and
338 * then re-enable crtc memory access
339 */
340 for (i = 0; i < 2; i++) {
341 if (!(crtc_mask & (1 << i)))
342 continue;
343 nv_wait(device, 0x600808 + (i * 0x2000), 0x00010000, 0x00010000);
344 nv_wr08(device, 0x0c03c4 + (i * 0x2000), 0x01);
345 nv_wr08(device, 0x0c03c5 + (i * 0x2000), sr1[i]);
346 }
347
348 /* resume engines */
349resume:
350 pfifo->start(pfifo, &flags);
351 kfree(info);
352 return ret;
353}
diff --git a/drivers/gpu/drm/nouveau/nv50_pm.c b/drivers/gpu/drm/nouveau/nv50_pm.c
deleted file mode 100644
index 4efc33fa73fc..000000000000
--- a/drivers/gpu/drm/nouveau/nv50_pm.c
+++ /dev/null
@@ -1,855 +0,0 @@
1/*
2 * Copyright 2010 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <drm/drmP.h>
26#include "nouveau_drm.h"
27#include "nouveau_bios.h"
28#include "dispnv04/hw.h"
29#include "nouveau_pm.h"
30#include "nouveau_hwsq.h"
31
32#include "nv50_display.h"
33
34#include <subdev/bios/pll.h>
35#include <subdev/clock.h>
36#include <subdev/timer.h>
37#include <subdev/fb.h>
38
39enum clk_src {
40 clk_src_crystal,
41 clk_src_href,
42 clk_src_hclk,
43 clk_src_hclkm3,
44 clk_src_hclkm3d2,
45 clk_src_host,
46 clk_src_nvclk,
47 clk_src_sclk,
48 clk_src_mclk,
49 clk_src_vdec,
50 clk_src_dom6
51};
52
53static u32 read_clk(struct drm_device *, enum clk_src);
54
55static u32
56read_div(struct drm_device *dev)
57{
58 struct nouveau_device *device = nouveau_dev(dev);
59 struct nouveau_drm *drm = nouveau_drm(dev);
60
61 switch (nv_device(drm->device)->chipset) {
62 case 0x50: /* it exists, but only has bit 31, not the dividers.. */
63 case 0x84:
64 case 0x86:
65 case 0x98:
66 case 0xa0:
67 return nv_rd32(device, 0x004700);
68 case 0x92:
69 case 0x94:
70 case 0x96:
71 return nv_rd32(device, 0x004800);
72 default:
73 return 0x00000000;
74 }
75}
76
77static u32
78read_pll_src(struct drm_device *dev, u32 base)
79{
80 struct nouveau_device *device = nouveau_dev(dev);
81 struct nouveau_drm *drm = nouveau_drm(dev);
82 u32 coef, ref = read_clk(dev, clk_src_crystal);
83 u32 rsel = nv_rd32(device, 0x00e18c);
84 int P, N, M, id;
85
86 switch (nv_device(drm->device)->chipset) {
87 case 0x50:
88 case 0xa0:
89 switch (base) {
90 case 0x4020:
91 case 0x4028: id = !!(rsel & 0x00000004); break;
92 case 0x4008: id = !!(rsel & 0x00000008); break;
93 case 0x4030: id = 0; break;
94 default:
95 NV_ERROR(drm, "ref: bad pll 0x%06x\n", base);
96 return 0;
97 }
98
99 coef = nv_rd32(device, 0x00e81c + (id * 0x0c));
100 ref *= (coef & 0x01000000) ? 2 : 4;
101 P = (coef & 0x00070000) >> 16;
102 N = ((coef & 0x0000ff00) >> 8) + 1;
103 M = ((coef & 0x000000ff) >> 0) + 1;
104 break;
105 case 0x84:
106 case 0x86:
107 case 0x92:
108 coef = nv_rd32(device, 0x00e81c);
109 P = (coef & 0x00070000) >> 16;
110 N = (coef & 0x0000ff00) >> 8;
111 M = (coef & 0x000000ff) >> 0;
112 break;
113 case 0x94:
114 case 0x96:
115 case 0x98:
116 rsel = nv_rd32(device, 0x00c050);
117 switch (base) {
118 case 0x4020: rsel = (rsel & 0x00000003) >> 0; break;
119 case 0x4008: rsel = (rsel & 0x0000000c) >> 2; break;
120 case 0x4028: rsel = (rsel & 0x00001800) >> 11; break;
121 case 0x4030: rsel = 3; break;
122 default:
123 NV_ERROR(drm, "ref: bad pll 0x%06x\n", base);
124 return 0;
125 }
126
127 switch (rsel) {
128 case 0: id = 1; break;
129 case 1: return read_clk(dev, clk_src_crystal);
130 case 2: return read_clk(dev, clk_src_href);
131 case 3: id = 0; break;
132 }
133
134 coef = nv_rd32(device, 0x00e81c + (id * 0x28));
135 P = (nv_rd32(device, 0x00e824 + (id * 0x28)) >> 16) & 7;
136 P += (coef & 0x00070000) >> 16;
137 N = (coef & 0x0000ff00) >> 8;
138 M = (coef & 0x000000ff) >> 0;
139 break;
140 default:
141 BUG_ON(1);
142 }
143
144 if (M)
145 return (ref * N / M) >> P;
146 return 0;
147}
148
149static u32
150read_pll_ref(struct drm_device *dev, u32 base)
151{
152 struct nouveau_device *device = nouveau_dev(dev);
153 struct nouveau_drm *drm = nouveau_drm(dev);
154 u32 src, mast = nv_rd32(device, 0x00c040);
155
156 switch (base) {
157 case 0x004028:
158 src = !!(mast & 0x00200000);
159 break;
160 case 0x004020:
161 src = !!(mast & 0x00400000);
162 break;
163 case 0x004008:
164 src = !!(mast & 0x00010000);
165 break;
166 case 0x004030:
167 src = !!(mast & 0x02000000);
168 break;
169 case 0x00e810:
170 return read_clk(dev, clk_src_crystal);
171 default:
172 NV_ERROR(drm, "bad pll 0x%06x\n", base);
173 return 0;
174 }
175
176 if (src)
177 return read_clk(dev, clk_src_href);
178 return read_pll_src(dev, base);
179}
180
181static u32
182read_pll(struct drm_device *dev, u32 base)
183{
184 struct nouveau_device *device = nouveau_dev(dev);
185 struct nouveau_drm *drm = nouveau_drm(dev);
186 u32 mast = nv_rd32(device, 0x00c040);
187 u32 ctrl = nv_rd32(device, base + 0);
188 u32 coef = nv_rd32(device, base + 4);
189 u32 ref = read_pll_ref(dev, base);
190 u32 clk = 0;
191 int N1, N2, M1, M2;
192
193 if (base == 0x004028 && (mast & 0x00100000)) {
194 /* wtf, appears to only disable post-divider on nva0 */
195 if (nv_device(drm->device)->chipset != 0xa0)
196 return read_clk(dev, clk_src_dom6);
197 }
198
199 N2 = (coef & 0xff000000) >> 24;
200 M2 = (coef & 0x00ff0000) >> 16;
201 N1 = (coef & 0x0000ff00) >> 8;
202 M1 = (coef & 0x000000ff);
203 if ((ctrl & 0x80000000) && M1) {
204 clk = ref * N1 / M1;
205 if ((ctrl & 0x40000100) == 0x40000000) {
206 if (M2)
207 clk = clk * N2 / M2;
208 else
209 clk = 0;
210 }
211 }
212
213 return clk;
214}
215
216static u32
217read_clk(struct drm_device *dev, enum clk_src src)
218{
219 struct nouveau_device *device = nouveau_dev(dev);
220 struct nouveau_drm *drm = nouveau_drm(dev);
221 u32 mast = nv_rd32(device, 0x00c040);
222 u32 P = 0;
223
224 switch (src) {
225 case clk_src_crystal:
226 return device->crystal;
227 case clk_src_href:
228 return 100000; /* PCIE reference clock */
229 case clk_src_hclk:
230 return read_clk(dev, clk_src_href) * 27778 / 10000;
231 case clk_src_hclkm3:
232 return read_clk(dev, clk_src_hclk) * 3;
233 case clk_src_hclkm3d2:
234 return read_clk(dev, clk_src_hclk) * 3 / 2;
235 case clk_src_host:
236 switch (mast & 0x30000000) {
237 case 0x00000000: return read_clk(dev, clk_src_href);
238 case 0x10000000: break;
239 case 0x20000000: /* !0x50 */
240 case 0x30000000: return read_clk(dev, clk_src_hclk);
241 }
242 break;
243 case clk_src_nvclk:
244 if (!(mast & 0x00100000))
245 P = (nv_rd32(device, 0x004028) & 0x00070000) >> 16;
246 switch (mast & 0x00000003) {
247 case 0x00000000: return read_clk(dev, clk_src_crystal) >> P;
248 case 0x00000001: return read_clk(dev, clk_src_dom6);
249 case 0x00000002: return read_pll(dev, 0x004020) >> P;
250 case 0x00000003: return read_pll(dev, 0x004028) >> P;
251 }
252 break;
253 case clk_src_sclk:
254 P = (nv_rd32(device, 0x004020) & 0x00070000) >> 16;
255 switch (mast & 0x00000030) {
256 case 0x00000000:
257 if (mast & 0x00000080)
258 return read_clk(dev, clk_src_host) >> P;
259 return read_clk(dev, clk_src_crystal) >> P;
260 case 0x00000010: break;
261 case 0x00000020: return read_pll(dev, 0x004028) >> P;
262 case 0x00000030: return read_pll(dev, 0x004020) >> P;
263 }
264 break;
265 case clk_src_mclk:
266 P = (nv_rd32(device, 0x004008) & 0x00070000) >> 16;
267 if (nv_rd32(device, 0x004008) & 0x00000200) {
268 switch (mast & 0x0000c000) {
269 case 0x00000000:
270 return read_clk(dev, clk_src_crystal) >> P;
271 case 0x00008000:
272 case 0x0000c000:
273 return read_clk(dev, clk_src_href) >> P;
274 }
275 } else {
276 return read_pll(dev, 0x004008) >> P;
277 }
278 break;
279 case clk_src_vdec:
280 P = (read_div(dev) & 0x00000700) >> 8;
281 switch (nv_device(drm->device)->chipset) {
282 case 0x84:
283 case 0x86:
284 case 0x92:
285 case 0x94:
286 case 0x96:
287 case 0xa0:
288 switch (mast & 0x00000c00) {
289 case 0x00000000:
290 if (nv_device(drm->device)->chipset == 0xa0) /* wtf?? */
291 return read_clk(dev, clk_src_nvclk) >> P;
292 return read_clk(dev, clk_src_crystal) >> P;
293 case 0x00000400:
294 return 0;
295 case 0x00000800:
296 if (mast & 0x01000000)
297 return read_pll(dev, 0x004028) >> P;
298 return read_pll(dev, 0x004030) >> P;
299 case 0x00000c00:
300 return read_clk(dev, clk_src_nvclk) >> P;
301 }
302 break;
303 case 0x98:
304 switch (mast & 0x00000c00) {
305 case 0x00000000:
306 return read_clk(dev, clk_src_nvclk) >> P;
307 case 0x00000400:
308 return 0;
309 case 0x00000800:
310 return read_clk(dev, clk_src_hclkm3d2) >> P;
311 case 0x00000c00:
312 return read_clk(dev, clk_src_mclk) >> P;
313 }
314 break;
315 }
316 break;
317 case clk_src_dom6:
318 switch (nv_device(drm->device)->chipset) {
319 case 0x50:
320 case 0xa0:
321 return read_pll(dev, 0x00e810) >> 2;
322 case 0x84:
323 case 0x86:
324 case 0x92:
325 case 0x94:
326 case 0x96:
327 case 0x98:
328 P = (read_div(dev) & 0x00000007) >> 0;
329 switch (mast & 0x0c000000) {
330 case 0x00000000: return read_clk(dev, clk_src_href);
331 case 0x04000000: break;
332 case 0x08000000: return read_clk(dev, clk_src_hclk);
333 case 0x0c000000:
334 return read_clk(dev, clk_src_hclkm3) >> P;
335 }
336 break;
337 default:
338 break;
339 }
340 default:
341 break;
342 }
343
344 NV_DEBUG(drm, "unknown clock source %d 0x%08x\n", src, mast);
345 return 0;
346}
347
348int
349nv50_pm_clocks_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
350{
351 struct nouveau_drm *drm = nouveau_drm(dev);
352 if (nv_device(drm->device)->chipset == 0xaa ||
353 nv_device(drm->device)->chipset == 0xac)
354 return 0;
355
356 perflvl->core = read_clk(dev, clk_src_nvclk);
357 perflvl->shader = read_clk(dev, clk_src_sclk);
358 perflvl->memory = read_clk(dev, clk_src_mclk);
359 if (nv_device(drm->device)->chipset != 0x50) {
360 perflvl->vdec = read_clk(dev, clk_src_vdec);
361 perflvl->dom6 = read_clk(dev, clk_src_dom6);
362 }
363
364 return 0;
365}
366
367struct nv50_pm_state {
368 struct nouveau_pm_level *perflvl;
369 struct hwsq_ucode eclk_hwsq;
370 struct hwsq_ucode mclk_hwsq;
371 u32 mscript;
372 u32 mmast;
373 u32 mctrl;
374 u32 mcoef;
375};
376
377static u32
378calc_pll(struct drm_device *dev, u32 reg, struct nvbios_pll *pll,
379 u32 clk, int *N1, int *M1, int *log2P)
380{
381 struct nouveau_device *device = nouveau_dev(dev);
382 struct nouveau_bios *bios = nouveau_bios(device);
383 struct nouveau_clock *pclk = nouveau_clock(device);
384 struct nouveau_pll_vals coef;
385 int ret;
386
387 ret = nvbios_pll_parse(bios, reg, pll);
388 if (ret)
389 return 0;
390
391 pll->vco2.max_freq = 0;
392 pll->refclk = read_pll_ref(dev, reg);
393 if (!pll->refclk)
394 return 0;
395
396 ret = pclk->pll_calc(pclk, pll, clk, &coef);
397 if (ret == 0)
398 return 0;
399
400 *N1 = coef.N1;
401 *M1 = coef.M1;
402 *log2P = coef.log2P;
403 return ret;
404}
405
406static inline u32
407calc_div(u32 src, u32 target, int *div)
408{
409 u32 clk0 = src, clk1 = src;
410 for (*div = 0; *div <= 7; (*div)++) {
411 if (clk0 <= target) {
412 clk1 = clk0 << (*div ? 1 : 0);
413 break;
414 }
415 clk0 >>= 1;
416 }
417
418 if (target - clk0 <= clk1 - target)
419 return clk0;
420 (*div)--;
421 return clk1;
422}
423
424static inline u32
425clk_same(u32 a, u32 b)
426{
427 return ((a / 1000) == (b / 1000));
428}
429
430static void
431mclk_precharge(struct nouveau_mem_exec_func *exec)
432{
433 struct nv50_pm_state *info = exec->priv;
434 struct hwsq_ucode *hwsq = &info->mclk_hwsq;
435
436 hwsq_wr32(hwsq, 0x1002d4, 0x00000001);
437}
438
439static void
440mclk_refresh(struct nouveau_mem_exec_func *exec)
441{
442 struct nv50_pm_state *info = exec->priv;
443 struct hwsq_ucode *hwsq = &info->mclk_hwsq;
444
445 hwsq_wr32(hwsq, 0x1002d0, 0x00000001);
446}
447
448static void
449mclk_refresh_auto(struct nouveau_mem_exec_func *exec, bool enable)
450{
451 struct nv50_pm_state *info = exec->priv;
452 struct hwsq_ucode *hwsq = &info->mclk_hwsq;
453
454 hwsq_wr32(hwsq, 0x100210, enable ? 0x80000000 : 0x00000000);
455}
456
457static void
458mclk_refresh_self(struct nouveau_mem_exec_func *exec, bool enable)
459{
460 struct nv50_pm_state *info = exec->priv;
461 struct hwsq_ucode *hwsq = &info->mclk_hwsq;
462
463 hwsq_wr32(hwsq, 0x1002dc, enable ? 0x00000001 : 0x00000000);
464}
465
466static void
467mclk_wait(struct nouveau_mem_exec_func *exec, u32 nsec)
468{
469 struct nv50_pm_state *info = exec->priv;
470 struct hwsq_ucode *hwsq = &info->mclk_hwsq;
471
472 if (nsec > 1000)
473 hwsq_usec(hwsq, (nsec + 500) / 1000);
474}
475
476static u32
477mclk_mrg(struct nouveau_mem_exec_func *exec, int mr)
478{
479 struct nouveau_device *device = nouveau_dev(exec->dev);
480 if (mr <= 1)
481 return nv_rd32(device, 0x1002c0 + ((mr - 0) * 4));
482 if (mr <= 3)
483 return nv_rd32(device, 0x1002e0 + ((mr - 2) * 4));
484 return 0;
485}
486
487static void
488mclk_mrs(struct nouveau_mem_exec_func *exec, int mr, u32 data)
489{
490 struct nouveau_device *device = nouveau_dev(exec->dev);
491 struct nouveau_fb *pfb = nouveau_fb(device);
492 struct nv50_pm_state *info = exec->priv;
493 struct hwsq_ucode *hwsq = &info->mclk_hwsq;
494
495 if (mr <= 1) {
496 if (pfb->ram->ranks > 1)
497 hwsq_wr32(hwsq, 0x1002c8 + ((mr - 0) * 4), data);
498 hwsq_wr32(hwsq, 0x1002c0 + ((mr - 0) * 4), data);
499 } else
500 if (mr <= 3) {
501 if (pfb->ram->ranks > 1)
502 hwsq_wr32(hwsq, 0x1002e8 + ((mr - 2) * 4), data);
503 hwsq_wr32(hwsq, 0x1002e0 + ((mr - 2) * 4), data);
504 }
505}
506
507static void
508mclk_clock_set(struct nouveau_mem_exec_func *exec)
509{
510 struct nouveau_device *device = nouveau_dev(exec->dev);
511 struct nv50_pm_state *info = exec->priv;
512 struct hwsq_ucode *hwsq = &info->mclk_hwsq;
513 u32 ctrl = nv_rd32(device, 0x004008);
514
515 info->mmast = nv_rd32(device, 0x00c040);
516 info->mmast &= ~0xc0000000; /* get MCLK_2 from HREF */
517 info->mmast |= 0x0000c000; /* use MCLK_2 as MPLL_BYPASS clock */
518
519 hwsq_wr32(hwsq, 0xc040, info->mmast);
520 hwsq_wr32(hwsq, 0x4008, ctrl | 0x00000200); /* bypass MPLL */
521 if (info->mctrl & 0x80000000)
522 hwsq_wr32(hwsq, 0x400c, info->mcoef);
523 hwsq_wr32(hwsq, 0x4008, info->mctrl);
524}
525
526static void
527mclk_timing_set(struct nouveau_mem_exec_func *exec)
528{
529 struct nouveau_device *device = nouveau_dev(exec->dev);
530 struct nv50_pm_state *info = exec->priv;
531 struct nouveau_pm_level *perflvl = info->perflvl;
532 struct hwsq_ucode *hwsq = &info->mclk_hwsq;
533 int i;
534
535 for (i = 0; i < 9; i++) {
536 u32 reg = 0x100220 + (i * 4);
537 u32 val = nv_rd32(device, reg);
538 if (val != perflvl->timing.reg[i])
539 hwsq_wr32(hwsq, reg, perflvl->timing.reg[i]);
540 }
541}
542
543static int
544calc_mclk(struct drm_device *dev, struct nouveau_pm_level *perflvl,
545 struct nv50_pm_state *info)
546{
547 struct nouveau_drm *drm = nouveau_drm(dev);
548 struct nouveau_device *device = nouveau_dev(dev);
549 u32 crtc_mask = 0; /*XXX: nv50_display_active_crtcs(dev); */
550 struct nouveau_mem_exec_func exec = {
551 .dev = dev,
552 .precharge = mclk_precharge,
553 .refresh = mclk_refresh,
554 .refresh_auto = mclk_refresh_auto,
555 .refresh_self = mclk_refresh_self,
556 .wait = mclk_wait,
557 .mrg = mclk_mrg,
558 .mrs = mclk_mrs,
559 .clock_set = mclk_clock_set,
560 .timing_set = mclk_timing_set,
561 .priv = info
562 };
563 struct hwsq_ucode *hwsq = &info->mclk_hwsq;
564 struct nvbios_pll pll;
565 int N, M, P;
566 int ret;
567
568 /* use pcie refclock if possible, otherwise use mpll */
569 info->mctrl = nv_rd32(device, 0x004008);
570 info->mctrl &= ~0x81ff0200;
571 if (clk_same(perflvl->memory, read_clk(dev, clk_src_href))) {
572 info->mctrl |= 0x00000200 | (pll.bias_p << 19);
573 } else {
574 ret = calc_pll(dev, 0x4008, &pll, perflvl->memory, &N, &M, &P);
575 if (ret == 0)
576 return -EINVAL;
577
578 info->mctrl |= 0x80000000 | (P << 22) | (P << 16);
579 info->mctrl |= pll.bias_p << 19;
580 info->mcoef = (N << 8) | M;
581 }
582
583 /* build the ucode which will reclock the memory for us */
584 hwsq_init(hwsq);
585 if (crtc_mask) {
586 hwsq_op5f(hwsq, crtc_mask, 0x00); /* wait for scanout */
587 hwsq_op5f(hwsq, crtc_mask, 0x01); /* wait for vblank */
588 }
589 if (nv_device(drm->device)->chipset >= 0x92)
590 hwsq_wr32(hwsq, 0x611200, 0x00003300); /* disable scanout */
591 hwsq_setf(hwsq, 0x10, 0); /* disable bus access */
592 hwsq_op5f(hwsq, 0x00, 0x01); /* no idea :s */
593
594 ret = nouveau_mem_exec(&exec, perflvl);
595 if (ret)
596 return ret;
597
598 hwsq_setf(hwsq, 0x10, 1); /* enable bus access */
599 hwsq_op5f(hwsq, 0x00, 0x00); /* no idea, reverse of 0x00, 0x01? */
600 if (nv_device(drm->device)->chipset >= 0x92)
601 hwsq_wr32(hwsq, 0x611200, 0x00003330); /* enable scanout */
602 hwsq_fini(hwsq);
603 return 0;
604}
605
606void *
607nv50_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
608{
609 struct nouveau_device *device = nouveau_dev(dev);
610 struct nouveau_drm *drm = nouveau_drm(dev);
611 struct nv50_pm_state *info;
612 struct hwsq_ucode *hwsq;
613 struct nvbios_pll pll;
614 u32 out, mast, divs, ctrl;
615 int clk, ret = -EINVAL;
616 int N, M, P1, P2;
617
618 if (nv_device(drm->device)->chipset == 0xaa ||
619 nv_device(drm->device)->chipset == 0xac)
620 return ERR_PTR(-ENODEV);
621
622 info = kmalloc(sizeof(*info), GFP_KERNEL);
623 if (!info)
624 return ERR_PTR(-ENOMEM);
625 info->perflvl = perflvl;
626
627 /* memory: build hwsq ucode which we'll use to reclock memory.
628 * use pcie refclock if possible, otherwise use mpll */
629 info->mclk_hwsq.len = 0;
630 if (perflvl->memory) {
631 ret = calc_mclk(dev, perflvl, info);
632 if (ret)
633 goto error;
634 info->mscript = perflvl->memscript;
635 }
636
637 divs = read_div(dev);
638 mast = info->mmast;
639
640 /* start building HWSQ script for engine reclocking */
641 hwsq = &info->eclk_hwsq;
642 hwsq_init(hwsq);
643 hwsq_setf(hwsq, 0x10, 0); /* disable bus access */
644 hwsq_op5f(hwsq, 0x00, 0x01); /* wait for access disabled? */
645
646 /* vdec/dom6: switch to "safe" clocks temporarily */
647 if (perflvl->vdec) {
648 mast &= ~0x00000c00;
649 divs &= ~0x00000700;
650 }
651
652 if (perflvl->dom6) {
653 mast &= ~0x0c000000;
654 divs &= ~0x00000007;
655 }
656
657 hwsq_wr32(hwsq, 0x00c040, mast);
658
659 /* vdec: avoid modifying xpll until we know exactly how the other
660 * clock domains work, i suspect at least some of them can also be
661 * tied to xpll...
662 */
663 if (perflvl->vdec) {
664 /* see how close we can get using nvclk as a source */
665 clk = calc_div(perflvl->core, perflvl->vdec, &P1);
666
667 /* see how close we can get using xpll/hclk as a source */
668 if (nv_device(drm->device)->chipset != 0x98)
669 out = read_pll(dev, 0x004030);
670 else
671 out = read_clk(dev, clk_src_hclkm3d2);
672 out = calc_div(out, perflvl->vdec, &P2);
673
674 /* select whichever gets us closest */
675 if (abs((int)perflvl->vdec - clk) <=
676 abs((int)perflvl->vdec - out)) {
677 if (nv_device(drm->device)->chipset != 0x98)
678 mast |= 0x00000c00;
679 divs |= P1 << 8;
680 } else {
681 mast |= 0x00000800;
682 divs |= P2 << 8;
683 }
684 }
685
686 /* dom6: nfi what this is, but we're limited to various combinations
687 * of the host clock frequency
688 */
689 if (perflvl->dom6) {
690 if (clk_same(perflvl->dom6, read_clk(dev, clk_src_href))) {
691 mast |= 0x00000000;
692 } else
693 if (clk_same(perflvl->dom6, read_clk(dev, clk_src_hclk))) {
694 mast |= 0x08000000;
695 } else {
696 clk = read_clk(dev, clk_src_hclk) * 3;
697 clk = calc_div(clk, perflvl->dom6, &P1);
698
699 mast |= 0x0c000000;
700 divs |= P1;
701 }
702 }
703
704 /* vdec/dom6: complete switch to new clocks */
705 switch (nv_device(drm->device)->chipset) {
706 case 0x92:
707 case 0x94:
708 case 0x96:
709 hwsq_wr32(hwsq, 0x004800, divs);
710 break;
711 default:
712 hwsq_wr32(hwsq, 0x004700, divs);
713 break;
714 }
715
716 hwsq_wr32(hwsq, 0x00c040, mast);
717
718 /* core/shader: make sure sclk/nvclk are disconnected from their
719 * PLLs (nvclk to dom6, sclk to hclk)
720 */
721 if (nv_device(drm->device)->chipset < 0x92)
722 mast = (mast & ~0x001000b0) | 0x00100080;
723 else
724 mast = (mast & ~0x000000b3) | 0x00000081;
725
726 hwsq_wr32(hwsq, 0x00c040, mast);
727
728 /* core: for the moment at least, always use nvpll */
729 clk = calc_pll(dev, 0x4028, &pll, perflvl->core, &N, &M, &P1);
730 if (clk == 0)
731 goto error;
732
733 ctrl = nv_rd32(device, 0x004028) & ~0xc03f0100;
734 mast &= ~0x00100000;
735 mast |= 3;
736
737 hwsq_wr32(hwsq, 0x004028, 0x80000000 | (P1 << 19) | (P1 << 16) | ctrl);
738 hwsq_wr32(hwsq, 0x00402c, (N << 8) | M);
739
740 /* shader: tie to nvclk if possible, otherwise use spll. have to be
741 * very careful that the shader clock is at least twice the core, or
742 * some chipsets will be very unhappy. i expect most or all of these
743 * cases will be handled by tying to nvclk, but it's possible there's
744 * corners
745 */
746 ctrl = nv_rd32(device, 0x004020) & ~0xc03f0100;
747
748 if (P1-- && perflvl->shader == (perflvl->core << 1)) {
749 hwsq_wr32(hwsq, 0x004020, (P1 << 19) | (P1 << 16) | ctrl);
750 hwsq_wr32(hwsq, 0x00c040, 0x00000020 | mast);
751 } else {
752 clk = calc_pll(dev, 0x4020, &pll, perflvl->shader, &N, &M, &P1);
753 if (clk == 0)
754 goto error;
755 ctrl |= 0x80000000;
756
757 hwsq_wr32(hwsq, 0x004020, (P1 << 19) | (P1 << 16) | ctrl);
758 hwsq_wr32(hwsq, 0x004024, (N << 8) | M);
759 hwsq_wr32(hwsq, 0x00c040, 0x00000030 | mast);
760 }
761
762 hwsq_setf(hwsq, 0x10, 1); /* enable bus access */
763 hwsq_op5f(hwsq, 0x00, 0x00); /* wait for access enabled? */
764 hwsq_fini(hwsq);
765
766 return info;
767error:
768 kfree(info);
769 return ERR_PTR(ret);
770}
771
772static int
773prog_hwsq(struct drm_device *dev, struct hwsq_ucode *hwsq)
774{
775 struct nouveau_device *device = nouveau_dev(dev);
776 struct nouveau_drm *drm = nouveau_drm(dev);
777 u32 hwsq_data, hwsq_kick;
778 int i;
779
780 if (nv_device(drm->device)->chipset < 0x94) {
781 hwsq_data = 0x001400;
782 hwsq_kick = 0x00000003;
783 } else {
784 hwsq_data = 0x080000;
785 hwsq_kick = 0x00000001;
786 }
787 /* upload hwsq ucode */
788 nv_mask(device, 0x001098, 0x00000008, 0x00000000);
789 nv_wr32(device, 0x001304, 0x00000000);
790 if (nv_device(drm->device)->chipset >= 0x92)
791 nv_wr32(device, 0x001318, 0x00000000);
792 for (i = 0; i < hwsq->len / 4; i++)
793 nv_wr32(device, hwsq_data + (i * 4), hwsq->ptr.u32[i]);
794 nv_mask(device, 0x001098, 0x00000018, 0x00000018);
795
796 /* launch, and wait for completion */
797 nv_wr32(device, 0x00130c, hwsq_kick);
798 if (!nv_wait(device, 0x001308, 0x00000100, 0x00000000)) {
799 NV_ERROR(drm, "hwsq ucode exec timed out\n");
800 NV_ERROR(drm, "0x001308: 0x%08x\n", nv_rd32(device, 0x001308));
801 for (i = 0; i < hwsq->len / 4; i++) {
802 NV_ERROR(drm, "0x%06x: 0x%08x\n", 0x1400 + (i * 4),
803 nv_rd32(device, 0x001400 + (i * 4)));
804 }
805
806 return -EIO;
807 }
808
809 return 0;
810}
811
812int
813nv50_pm_clocks_set(struct drm_device *dev, void *data)
814{
815 struct nouveau_device *device = nouveau_dev(dev);
816 struct nv50_pm_state *info = data;
817 struct bit_entry M;
818 int ret = -EBUSY;
819
820 /* halt and idle execution engines */
821 nv_mask(device, 0x002504, 0x00000001, 0x00000001);
822 if (!nv_wait(device, 0x002504, 0x00000010, 0x00000010))
823 goto resume;
824 if (!nv_wait(device, 0x00251c, 0x0000003f, 0x0000003f))
825 goto resume;
826
827 /* program memory clock, if necessary - must come before engine clock
828 * reprogramming due to how we construct the hwsq scripts in pre()
829 */
830#define nouveau_bios_init_exec(a,b) nouveau_bios_run_init_table((a), (b), NULL, 0)
831 if (info->mclk_hwsq.len) {
832 /* execute some scripts that do ??? from the vbios.. */
833 if (!bit_table(dev, 'M', &M) && M.version == 1) {
834 if (M.length >= 6)
835 nouveau_bios_init_exec(dev, ROM16(M.data[5]));
836 if (M.length >= 8)
837 nouveau_bios_init_exec(dev, ROM16(M.data[7]));
838 if (M.length >= 10)
839 nouveau_bios_init_exec(dev, ROM16(M.data[9]));
840 nouveau_bios_init_exec(dev, info->mscript);
841 }
842
843 ret = prog_hwsq(dev, &info->mclk_hwsq);
844 if (ret)
845 goto resume;
846 }
847
848 /* program engine clocks */
849 ret = prog_hwsq(dev, &info->eclk_hwsq);
850
851resume:
852 nv_mask(device, 0x002504, 0x00000001, 0x00000000);
853 kfree(info);
854 return ret;
855}
diff --git a/drivers/gpu/drm/nouveau/nva3_pm.c b/drivers/gpu/drm/nouveau/nva3_pm.c
deleted file mode 100644
index 0d0ed597fea8..000000000000
--- a/drivers/gpu/drm/nouveau/nva3_pm.c
+++ /dev/null
@@ -1,624 +0,0 @@
1/*
2 * Copyright 2010 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <drm/drmP.h>
26#include "nouveau_drm.h"
27#include "nouveau_bios.h"
28#include "nouveau_pm.h"
29
30#include <subdev/bios/pll.h>
31#include <subdev/bios.h>
32#include <subdev/clock.h>
33#include <subdev/timer.h>
34#include <subdev/fb.h>
35
36static u32 read_clk(struct drm_device *, int, bool);
37static u32 read_pll(struct drm_device *, int, u32);
38
39static u32
40read_vco(struct drm_device *dev, int clk)
41{
42 struct nouveau_device *device = nouveau_dev(dev);
43 u32 sctl = nv_rd32(device, 0x4120 + (clk * 4));
44 if ((sctl & 0x00000030) != 0x00000030)
45 return read_pll(dev, 0x41, 0x00e820);
46 return read_pll(dev, 0x42, 0x00e8a0);
47}
48
49static u32
50read_clk(struct drm_device *dev, int clk, bool ignore_en)
51{
52 struct nouveau_device *device = nouveau_dev(dev);
53 struct nouveau_drm *drm = nouveau_drm(dev);
54 u32 sctl, sdiv, sclk;
55
56 /* refclk for the 0xe8xx plls is a fixed frequency */
57 if (clk >= 0x40) {
58 if (nv_device(drm->device)->chipset == 0xaf) {
59 /* no joke.. seriously.. sigh.. */
60 return nv_rd32(device, 0x00471c) * 1000;
61 }
62
63 return device->crystal;
64 }
65
66 sctl = nv_rd32(device, 0x4120 + (clk * 4));
67 if (!ignore_en && !(sctl & 0x00000100))
68 return 0;
69
70 switch (sctl & 0x00003000) {
71 case 0x00000000:
72 return device->crystal;
73 case 0x00002000:
74 if (sctl & 0x00000040)
75 return 108000;
76 return 100000;
77 case 0x00003000:
78 sclk = read_vco(dev, clk);
79 sdiv = ((sctl & 0x003f0000) >> 16) + 2;
80 return (sclk * 2) / sdiv;
81 default:
82 return 0;
83 }
84}
85
86static u32
87read_pll(struct drm_device *dev, int clk, u32 pll)
88{
89 struct nouveau_device *device = nouveau_dev(dev);
90 u32 ctrl = nv_rd32(device, pll + 0);
91 u32 sclk = 0, P = 1, N = 1, M = 1;
92
93 if (!(ctrl & 0x00000008)) {
94 if (ctrl & 0x00000001) {
95 u32 coef = nv_rd32(device, pll + 4);
96 M = (coef & 0x000000ff) >> 0;
97 N = (coef & 0x0000ff00) >> 8;
98 P = (coef & 0x003f0000) >> 16;
99
100 /* no post-divider on these.. */
101 if ((pll & 0x00ff00) == 0x00e800)
102 P = 1;
103
104 sclk = read_clk(dev, 0x00 + clk, false);
105 }
106 } else {
107 sclk = read_clk(dev, 0x10 + clk, false);
108 }
109
110 if (M * P)
111 return sclk * N / (M * P);
112 return 0;
113}
114
115struct creg {
116 u32 clk;
117 u32 pll;
118};
119
120static int
121calc_clk(struct drm_device *dev, int clk, u32 pll, u32 khz, struct creg *reg)
122{
123 struct nouveau_drm *drm = nouveau_drm(dev);
124 struct nouveau_device *device = nouveau_dev(dev);
125 struct nouveau_bios *bios = nouveau_bios(device);
126 struct nvbios_pll limits;
127 u32 oclk, sclk, sdiv;
128 int P, N, M, diff;
129 int ret;
130
131 reg->pll = 0;
132 reg->clk = 0;
133 if (!khz) {
134 NV_DEBUG(drm, "no clock for 0x%04x/0x%02x\n", pll, clk);
135 return 0;
136 }
137
138 switch (khz) {
139 case 27000:
140 reg->clk = 0x00000100;
141 return khz;
142 case 100000:
143 reg->clk = 0x00002100;
144 return khz;
145 case 108000:
146 reg->clk = 0x00002140;
147 return khz;
148 default:
149 sclk = read_vco(dev, clk);
150 sdiv = min((sclk * 2) / (khz - 2999), (u32)65);
151 /* if the clock has a PLL attached, and we can get a within
152 * [-2, 3) MHz of a divider, we'll disable the PLL and use
153 * the divider instead.
154 *
155 * divider can go as low as 2, limited here because NVIDIA
156 * and the VBIOS on my NVA8 seem to prefer using the PLL
157 * for 810MHz - is there a good reason?
158 */
159 if (sdiv > 4) {
160 oclk = (sclk * 2) / sdiv;
161 diff = khz - oclk;
162 if (!pll || (diff >= -2000 && diff < 3000)) {
163 reg->clk = (((sdiv - 2) << 16) | 0x00003100);
164 return oclk;
165 }
166 }
167
168 if (!pll) {
169 NV_ERROR(drm, "bad freq %02x: %d %d\n", clk, khz, sclk);
170 return -ERANGE;
171 }
172
173 break;
174 }
175
176 ret = nvbios_pll_parse(bios, pll, &limits);
177 if (ret)
178 return ret;
179
180 limits.refclk = read_clk(dev, clk - 0x10, true);
181 if (!limits.refclk)
182 return -EINVAL;
183
184 ret = nva3_calc_pll(dev, &limits, khz, &N, NULL, &M, &P);
185 if (ret >= 0) {
186 reg->clk = nv_rd32(device, 0x4120 + (clk * 4));
187 reg->pll = (P << 16) | (N << 8) | M;
188 }
189
190 return ret;
191}
192
193static void
194prog_pll(struct drm_device *dev, int clk, u32 pll, struct creg *reg)
195{
196 struct nouveau_device *device = nouveau_dev(dev);
197 struct nouveau_drm *drm = nouveau_drm(dev);
198 const u32 src0 = 0x004120 + (clk * 4);
199 const u32 src1 = 0x004160 + (clk * 4);
200 const u32 ctrl = pll + 0;
201 const u32 coef = pll + 4;
202
203 if (!reg->clk && !reg->pll) {
204 NV_DEBUG(drm, "no clock for %02x\n", clk);
205 return;
206 }
207
208 if (reg->pll) {
209 nv_mask(device, src0, 0x00000101, 0x00000101);
210 nv_wr32(device, coef, reg->pll);
211 nv_mask(device, ctrl, 0x00000015, 0x00000015);
212 nv_mask(device, ctrl, 0x00000010, 0x00000000);
213 nv_wait(device, ctrl, 0x00020000, 0x00020000);
214 nv_mask(device, ctrl, 0x00000010, 0x00000010);
215 nv_mask(device, ctrl, 0x00000008, 0x00000000);
216 nv_mask(device, src1, 0x00000100, 0x00000000);
217 nv_mask(device, src1, 0x00000001, 0x00000000);
218 } else {
219 nv_mask(device, src1, 0x003f3141, 0x00000101 | reg->clk);
220 nv_mask(device, ctrl, 0x00000018, 0x00000018);
221 udelay(20);
222 nv_mask(device, ctrl, 0x00000001, 0x00000000);
223 nv_mask(device, src0, 0x00000100, 0x00000000);
224 nv_mask(device, src0, 0x00000001, 0x00000000);
225 }
226}
227
228static void
229prog_clk(struct drm_device *dev, int clk, struct creg *reg)
230{
231 struct nouveau_device *device = nouveau_dev(dev);
232 struct nouveau_drm *drm = nouveau_drm(dev);
233
234 if (!reg->clk) {
235 NV_DEBUG(drm, "no clock for %02x\n", clk);
236 return;
237 }
238
239 nv_mask(device, 0x004120 + (clk * 4), 0x003f3141, 0x00000101 | reg->clk);
240}
241
242int
243nva3_pm_clocks_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
244{
245 perflvl->core = read_pll(dev, 0x00, 0x4200);
246 perflvl->shader = read_pll(dev, 0x01, 0x4220);
247 perflvl->memory = read_pll(dev, 0x02, 0x4000);
248 perflvl->unka0 = read_clk(dev, 0x20, false);
249 perflvl->vdec = read_clk(dev, 0x21, false);
250 perflvl->daemon = read_clk(dev, 0x25, false);
251 perflvl->copy = perflvl->core;
252 return 0;
253}
254
255struct nva3_pm_state {
256 struct nouveau_pm_level *perflvl;
257
258 struct creg nclk;
259 struct creg sclk;
260 struct creg vdec;
261 struct creg unka0;
262
263 struct creg mclk;
264 u8 *rammap;
265 u8 rammap_ver;
266 u8 rammap_len;
267 u8 *ramcfg;
268 u8 ramcfg_len;
269 u32 r004018;
270 u32 r100760;
271};
272
273void *
274nva3_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
275{
276 struct nva3_pm_state *info;
277 u8 ramcfg_cnt;
278 int ret;
279
280 info = kzalloc(sizeof(*info), GFP_KERNEL);
281 if (!info)
282 return ERR_PTR(-ENOMEM);
283
284 ret = calc_clk(dev, 0x10, 0x4200, perflvl->core, &info->nclk);
285 if (ret < 0)
286 goto out;
287
288 ret = calc_clk(dev, 0x11, 0x4220, perflvl->shader, &info->sclk);
289 if (ret < 0)
290 goto out;
291
292 ret = calc_clk(dev, 0x12, 0x4000, perflvl->memory, &info->mclk);
293 if (ret < 0)
294 goto out;
295
296 ret = calc_clk(dev, 0x20, 0x0000, perflvl->unka0, &info->unka0);
297 if (ret < 0)
298 goto out;
299
300 ret = calc_clk(dev, 0x21, 0x0000, perflvl->vdec, &info->vdec);
301 if (ret < 0)
302 goto out;
303
304 info->rammap = nouveau_perf_rammap(dev, perflvl->memory,
305 &info->rammap_ver,
306 &info->rammap_len,
307 &ramcfg_cnt, &info->ramcfg_len);
308 if (info->rammap_ver != 0x10 || info->rammap_len < 5)
309 info->rammap = NULL;
310
311 info->ramcfg = nouveau_perf_ramcfg(dev, perflvl->memory,
312 &info->rammap_ver,
313 &info->ramcfg_len);
314 if (info->rammap_ver != 0x10)
315 info->ramcfg = NULL;
316
317 info->perflvl = perflvl;
318out:
319 if (ret < 0) {
320 kfree(info);
321 info = ERR_PTR(ret);
322 }
323 return info;
324}
325
326static bool
327nva3_pm_grcp_idle(void *data)
328{
329 struct drm_device *dev = data;
330 struct nouveau_device *device = nouveau_dev(dev);
331
332 if (!(nv_rd32(device, 0x400304) & 0x00000001))
333 return true;
334 if (nv_rd32(device, 0x400308) == 0x0050001c)
335 return true;
336 return false;
337}
338
339static void
340mclk_precharge(struct nouveau_mem_exec_func *exec)
341{
342 struct nouveau_device *device = nouveau_dev(exec->dev);
343 nv_wr32(device, 0x1002d4, 0x00000001);
344}
345
346static void
347mclk_refresh(struct nouveau_mem_exec_func *exec)
348{
349 struct nouveau_device *device = nouveau_dev(exec->dev);
350 nv_wr32(device, 0x1002d0, 0x00000001);
351}
352
353static void
354mclk_refresh_auto(struct nouveau_mem_exec_func *exec, bool enable)
355{
356 struct nouveau_device *device = nouveau_dev(exec->dev);
357 nv_wr32(device, 0x100210, enable ? 0x80000000 : 0x00000000);
358}
359
360static void
361mclk_refresh_self(struct nouveau_mem_exec_func *exec, bool enable)
362{
363 struct nouveau_device *device = nouveau_dev(exec->dev);
364 nv_wr32(device, 0x1002dc, enable ? 0x00000001 : 0x00000000);
365}
366
367static void
368mclk_wait(struct nouveau_mem_exec_func *exec, u32 nsec)
369{
370 struct nouveau_device *device = nouveau_dev(exec->dev);
371 volatile u32 post = nv_rd32(device, 0); (void)post;
372 udelay((nsec + 500) / 1000);
373}
374
375static u32
376mclk_mrg(struct nouveau_mem_exec_func *exec, int mr)
377{
378 struct nouveau_device *device = nouveau_dev(exec->dev);
379 if (mr <= 1)
380 return nv_rd32(device, 0x1002c0 + ((mr - 0) * 4));
381 if (mr <= 3)
382 return nv_rd32(device, 0x1002e0 + ((mr - 2) * 4));
383 return 0;
384}
385
386static void
387mclk_mrs(struct nouveau_mem_exec_func *exec, int mr, u32 data)
388{
389 struct nouveau_device *device = nouveau_dev(exec->dev);
390 struct nouveau_fb *pfb = nouveau_fb(device);
391 if (mr <= 1) {
392 if (pfb->ram->ranks > 1)
393 nv_wr32(device, 0x1002c8 + ((mr - 0) * 4), data);
394 nv_wr32(device, 0x1002c0 + ((mr - 0) * 4), data);
395 } else
396 if (mr <= 3) {
397 if (pfb->ram->ranks > 1)
398 nv_wr32(device, 0x1002e8 + ((mr - 2) * 4), data);
399 nv_wr32(device, 0x1002e0 + ((mr - 2) * 4), data);
400 }
401}
402
403static void
404mclk_clock_set(struct nouveau_mem_exec_func *exec)
405{
406 struct nouveau_device *device = nouveau_dev(exec->dev);
407 struct nva3_pm_state *info = exec->priv;
408 u32 ctrl;
409
410 ctrl = nv_rd32(device, 0x004000);
411 if (!(ctrl & 0x00000008) && info->mclk.pll) {
412 nv_wr32(device, 0x004000, (ctrl |= 0x00000008));
413 nv_mask(device, 0x1110e0, 0x00088000, 0x00088000);
414 nv_wr32(device, 0x004018, 0x00001000);
415 nv_wr32(device, 0x004000, (ctrl &= ~0x00000001));
416 nv_wr32(device, 0x004004, info->mclk.pll);
417 nv_wr32(device, 0x004000, (ctrl |= 0x00000001));
418 udelay(64);
419 nv_wr32(device, 0x004018, 0x00005000 | info->r004018);
420 udelay(20);
421 } else
422 if (!info->mclk.pll) {
423 nv_mask(device, 0x004168, 0x003f3040, info->mclk.clk);
424 nv_wr32(device, 0x004000, (ctrl |= 0x00000008));
425 nv_mask(device, 0x1110e0, 0x00088000, 0x00088000);
426 nv_wr32(device, 0x004018, 0x0000d000 | info->r004018);
427 }
428
429 if (info->rammap) {
430 if (info->ramcfg && (info->rammap[4] & 0x08)) {
431 u32 unk5a0 = (ROM16(info->ramcfg[5]) << 8) |
432 info->ramcfg[5];
433 u32 unk5a4 = ROM16(info->ramcfg[7]);
434 u32 unk804 = (info->ramcfg[9] & 0xf0) << 16 |
435 (info->ramcfg[3] & 0x0f) << 16 |
436 (info->ramcfg[9] & 0x0f) |
437 0x80000000;
438 nv_wr32(device, 0x1005a0, unk5a0);
439 nv_wr32(device, 0x1005a4, unk5a4);
440 nv_wr32(device, 0x10f804, unk804);
441 nv_mask(device, 0x10053c, 0x00001000, 0x00000000);
442 } else {
443 nv_mask(device, 0x10053c, 0x00001000, 0x00001000);
444 nv_mask(device, 0x10f804, 0x80000000, 0x00000000);
445 nv_mask(device, 0x100760, 0x22222222, info->r100760);
446 nv_mask(device, 0x1007a0, 0x22222222, info->r100760);
447 nv_mask(device, 0x1007e0, 0x22222222, info->r100760);
448 }
449 }
450
451 if (info->mclk.pll) {
452 nv_mask(device, 0x1110e0, 0x00088000, 0x00011000);
453 nv_wr32(device, 0x004000, (ctrl &= ~0x00000008));
454 }
455}
456
457static void
458mclk_timing_set(struct nouveau_mem_exec_func *exec)
459{
460 struct nouveau_device *device = nouveau_dev(exec->dev);
461 struct nva3_pm_state *info = exec->priv;
462 struct nouveau_pm_level *perflvl = info->perflvl;
463 int i;
464
465 for (i = 0; i < 9; i++)
466 nv_wr32(device, 0x100220 + (i * 4), perflvl->timing.reg[i]);
467
468 if (info->ramcfg) {
469 u32 data = (info->ramcfg[2] & 0x08) ? 0x00000000 : 0x00001000;
470 nv_mask(device, 0x100200, 0x00001000, data);
471 }
472
473 if (info->ramcfg) {
474 u32 unk714 = nv_rd32(device, 0x100714) & ~0xf0000010;
475 u32 unk718 = nv_rd32(device, 0x100718) & ~0x00000100;
476 u32 unk71c = nv_rd32(device, 0x10071c) & ~0x00000100;
477 if ( (info->ramcfg[2] & 0x20))
478 unk714 |= 0xf0000000;
479 if (!(info->ramcfg[2] & 0x04))
480 unk714 |= 0x00000010;
481 nv_wr32(device, 0x100714, unk714);
482
483 if (info->ramcfg[2] & 0x01)
484 unk71c |= 0x00000100;
485 nv_wr32(device, 0x10071c, unk71c);
486
487 if (info->ramcfg[2] & 0x02)
488 unk718 |= 0x00000100;
489 nv_wr32(device, 0x100718, unk718);
490
491 if (info->ramcfg[2] & 0x10)
492 nv_wr32(device, 0x111100, 0x48000000); /*XXX*/
493 }
494}
495
496static void
497prog_mem(struct drm_device *dev, struct nva3_pm_state *info)
498{
499 struct nouveau_device *device = nouveau_dev(dev);
500 struct nouveau_mem_exec_func exec = {
501 .dev = dev,
502 .precharge = mclk_precharge,
503 .refresh = mclk_refresh,
504 .refresh_auto = mclk_refresh_auto,
505 .refresh_self = mclk_refresh_self,
506 .wait = mclk_wait,
507 .mrg = mclk_mrg,
508 .mrs = mclk_mrs,
509 .clock_set = mclk_clock_set,
510 .timing_set = mclk_timing_set,
511 .priv = info
512 };
513 u32 ctrl;
514
515 /* XXX: where the fuck does 750MHz come from? */
516 if (info->perflvl->memory <= 750000) {
517 info->r004018 = 0x10000000;
518 info->r100760 = 0x22222222;
519 }
520
521 ctrl = nv_rd32(device, 0x004000);
522 if (ctrl & 0x00000008) {
523 if (info->mclk.pll) {
524 nv_mask(device, 0x004128, 0x00000101, 0x00000101);
525 nv_wr32(device, 0x004004, info->mclk.pll);
526 nv_wr32(device, 0x004000, (ctrl |= 0x00000001));
527 nv_wr32(device, 0x004000, (ctrl &= 0xffffffef));
528 nv_wait(device, 0x004000, 0x00020000, 0x00020000);
529 nv_wr32(device, 0x004000, (ctrl |= 0x00000010));
530 nv_wr32(device, 0x004018, 0x00005000 | info->r004018);
531 nv_wr32(device, 0x004000, (ctrl |= 0x00000004));
532 }
533 } else {
534 u32 ssel = 0x00000101;
535 if (info->mclk.clk)
536 ssel |= info->mclk.clk;
537 else
538 ssel |= 0x00080000; /* 324MHz, shouldn't matter... */
539 nv_mask(device, 0x004168, 0x003f3141, ctrl);
540 }
541
542 if (info->ramcfg) {
543 if (info->ramcfg[2] & 0x10) {
544 nv_mask(device, 0x111104, 0x00000600, 0x00000000);
545 } else {
546 nv_mask(device, 0x111100, 0x40000000, 0x40000000);
547 nv_mask(device, 0x111104, 0x00000180, 0x00000000);
548 }
549 }
550 if (info->rammap && !(info->rammap[4] & 0x02))
551 nv_mask(device, 0x100200, 0x00000800, 0x00000000);
552 nv_wr32(device, 0x611200, 0x00003300);
553 if (!(info->ramcfg[2] & 0x10))
554 nv_wr32(device, 0x111100, 0x4c020000); /*XXX*/
555
556 nouveau_mem_exec(&exec, info->perflvl);
557
558 nv_wr32(device, 0x611200, 0x00003330);
559 if (info->rammap && (info->rammap[4] & 0x02))
560 nv_mask(device, 0x100200, 0x00000800, 0x00000800);
561 if (info->ramcfg) {
562 if (info->ramcfg[2] & 0x10) {
563 nv_mask(device, 0x111104, 0x00000180, 0x00000180);
564 nv_mask(device, 0x111100, 0x40000000, 0x00000000);
565 } else {
566 nv_mask(device, 0x111104, 0x00000600, 0x00000600);
567 }
568 }
569
570 if (info->mclk.pll) {
571 nv_mask(device, 0x004168, 0x00000001, 0x00000000);
572 nv_mask(device, 0x004168, 0x00000100, 0x00000000);
573 } else {
574 nv_mask(device, 0x004000, 0x00000001, 0x00000000);
575 nv_mask(device, 0x004128, 0x00000001, 0x00000000);
576 nv_mask(device, 0x004128, 0x00000100, 0x00000000);
577 }
578}
579
580int
581nva3_pm_clocks_set(struct drm_device *dev, void *pre_state)
582{
583 struct nouveau_device *device = nouveau_dev(dev);
584 struct nouveau_drm *drm = nouveau_drm(dev);
585 struct nva3_pm_state *info = pre_state;
586 int ret = -EAGAIN;
587
588 /* prevent any new grctx switches from starting */
589 nv_wr32(device, 0x400324, 0x00000000);
590 nv_wr32(device, 0x400328, 0x0050001c); /* wait flag 0x1c */
591 /* wait for any pending grctx switches to complete */
592 if (!nv_wait_cb(device, nva3_pm_grcp_idle, dev)) {
593 NV_ERROR(drm, "pm: ctxprog didn't go idle\n");
594 goto cleanup;
595 }
596 /* freeze PFIFO */
597 nv_mask(device, 0x002504, 0x00000001, 0x00000001);
598 if (!nv_wait(device, 0x002504, 0x00000010, 0x00000010)) {
599 NV_ERROR(drm, "pm: fifo didn't go idle\n");
600 goto cleanup;
601 }
602
603 prog_pll(dev, 0x00, 0x004200, &info->nclk);
604 prog_pll(dev, 0x01, 0x004220, &info->sclk);
605 prog_clk(dev, 0x20, &info->unka0);
606 prog_clk(dev, 0x21, &info->vdec);
607
608 if (info->mclk.clk || info->mclk.pll)
609 prog_mem(dev, info);
610
611 ret = 0;
612
613cleanup:
614 /* unfreeze PFIFO */
615 nv_mask(device, 0x002504, 0x00000001, 0x00000000);
616 /* restore ctxprog to normal */
617 nv_wr32(device, 0x400324, 0x00000000);
618 nv_wr32(device, 0x400328, 0x0070009c); /* set flag 0x1c */
619 /* unblock it if necessary */
620 if (nv_rd32(device, 0x400308) == 0x0050001c)
621 nv_mask(device, 0x400824, 0x10000000, 0x10000000);
622 kfree(info);
623 return ret;
624}
diff --git a/drivers/gpu/drm/nouveau/nvc0_pm.c b/drivers/gpu/drm/nouveau/nvc0_pm.c
deleted file mode 100644
index 3b7041cb013f..000000000000
--- a/drivers/gpu/drm/nouveau/nvc0_pm.c
+++ /dev/null
@@ -1,599 +0,0 @@
1/*
2 * Copyright 2011 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "nouveau_drm.h"
26#include "nouveau_bios.h"
27#include "nouveau_pm.h"
28
29#include <subdev/bios/pll.h>
30#include <subdev/bios.h>
31#include <subdev/clock.h>
32#include <subdev/timer.h>
33#include <subdev/fb.h>
34
35static u32 read_div(struct drm_device *, int, u32, u32);
36static u32 read_pll(struct drm_device *, u32);
37
38static u32
39read_vco(struct drm_device *dev, u32 dsrc)
40{
41 struct nouveau_device *device = nouveau_dev(dev);
42 u32 ssrc = nv_rd32(device, dsrc);
43 if (!(ssrc & 0x00000100))
44 return read_pll(dev, 0x00e800);
45 return read_pll(dev, 0x00e820);
46}
47
48static u32
49read_pll(struct drm_device *dev, u32 pll)
50{
51 struct nouveau_device *device = nouveau_dev(dev);
52 u32 ctrl = nv_rd32(device, pll + 0);
53 u32 coef = nv_rd32(device, pll + 4);
54 u32 P = (coef & 0x003f0000) >> 16;
55 u32 N = (coef & 0x0000ff00) >> 8;
56 u32 M = (coef & 0x000000ff) >> 0;
57 u32 sclk, doff;
58
59 if (!(ctrl & 0x00000001))
60 return 0;
61
62 switch (pll & 0xfff000) {
63 case 0x00e000:
64 sclk = 27000;
65 P = 1;
66 break;
67 case 0x137000:
68 doff = (pll - 0x137000) / 0x20;
69 sclk = read_div(dev, doff, 0x137120, 0x137140);
70 break;
71 case 0x132000:
72 switch (pll) {
73 case 0x132000:
74 sclk = read_pll(dev, 0x132020);
75 break;
76 case 0x132020:
77 sclk = read_div(dev, 0, 0x137320, 0x137330);
78 break;
79 default:
80 return 0;
81 }
82 break;
83 default:
84 return 0;
85 }
86
87 return sclk * N / M / P;
88}
89
90static u32
91read_div(struct drm_device *dev, int doff, u32 dsrc, u32 dctl)
92{
93 struct nouveau_device *device = nouveau_dev(dev);
94 u32 ssrc = nv_rd32(device, dsrc + (doff * 4));
95 u32 sctl = nv_rd32(device, dctl + (doff * 4));
96
97 switch (ssrc & 0x00000003) {
98 case 0:
99 if ((ssrc & 0x00030000) != 0x00030000)
100 return 27000;
101 return 108000;
102 case 2:
103 return 100000;
104 case 3:
105 if (sctl & 0x80000000) {
106 u32 sclk = read_vco(dev, dsrc + (doff * 4));
107 u32 sdiv = (sctl & 0x0000003f) + 2;
108 return (sclk * 2) / sdiv;
109 }
110
111 return read_vco(dev, dsrc + (doff * 4));
112 default:
113 return 0;
114 }
115}
116
117static u32
118read_mem(struct drm_device *dev)
119{
120 struct nouveau_device *device = nouveau_dev(dev);
121 u32 ssel = nv_rd32(device, 0x1373f0);
122 if (ssel & 0x00000001)
123 return read_div(dev, 0, 0x137300, 0x137310);
124 return read_pll(dev, 0x132000);
125}
126
127static u32
128read_clk(struct drm_device *dev, int clk)
129{
130 struct nouveau_device *device = nouveau_dev(dev);
131 u32 sctl = nv_rd32(device, 0x137250 + (clk * 4));
132 u32 ssel = nv_rd32(device, 0x137100);
133 u32 sclk, sdiv;
134
135 if (ssel & (1 << clk)) {
136 if (clk < 7)
137 sclk = read_pll(dev, 0x137000 + (clk * 0x20));
138 else
139 sclk = read_pll(dev, 0x1370e0);
140 sdiv = ((sctl & 0x00003f00) >> 8) + 2;
141 } else {
142 sclk = read_div(dev, clk, 0x137160, 0x1371d0);
143 sdiv = ((sctl & 0x0000003f) >> 0) + 2;
144 }
145
146 if (sctl & 0x80000000)
147 return (sclk * 2) / sdiv;
148 return sclk;
149}
150
151int
152nvc0_pm_clocks_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
153{
154 perflvl->shader = read_clk(dev, 0x00);
155 perflvl->core = perflvl->shader / 2;
156 perflvl->memory = read_mem(dev);
157 perflvl->rop = read_clk(dev, 0x01);
158 perflvl->hub07 = read_clk(dev, 0x02);
159 perflvl->hub06 = read_clk(dev, 0x07);
160 perflvl->hub01 = read_clk(dev, 0x08);
161 perflvl->copy = read_clk(dev, 0x09);
162 perflvl->daemon = read_clk(dev, 0x0c);
163 perflvl->vdec = read_clk(dev, 0x0e);
164 return 0;
165}
166
167struct nvc0_pm_clock {
168 u32 freq;
169 u32 ssel;
170 u32 mdiv;
171 u32 dsrc;
172 u32 ddiv;
173 u32 coef;
174};
175
176struct nvc0_pm_state {
177 struct nouveau_pm_level *perflvl;
178 struct nvc0_pm_clock eng[16];
179 struct nvc0_pm_clock mem;
180};
181
182static u32
183calc_div(struct drm_device *dev, int clk, u32 ref, u32 freq, u32 *ddiv)
184{
185 u32 div = min((ref * 2) / freq, (u32)65);
186 if (div < 2)
187 div = 2;
188
189 *ddiv = div - 2;
190 return (ref * 2) / div;
191}
192
193static u32
194calc_src(struct drm_device *dev, int clk, u32 freq, u32 *dsrc, u32 *ddiv)
195{
196 u32 sclk;
197
198 /* use one of the fixed frequencies if possible */
199 *ddiv = 0x00000000;
200 switch (freq) {
201 case 27000:
202 case 108000:
203 *dsrc = 0x00000000;
204 if (freq == 108000)
205 *dsrc |= 0x00030000;
206 return freq;
207 case 100000:
208 *dsrc = 0x00000002;
209 return freq;
210 default:
211 *dsrc = 0x00000003;
212 break;
213 }
214
215 /* otherwise, calculate the closest divider */
216 sclk = read_vco(dev, clk);
217 if (clk < 7)
218 sclk = calc_div(dev, clk, sclk, freq, ddiv);
219 return sclk;
220}
221
222static u32
223calc_pll(struct drm_device *dev, int clk, u32 freq, u32 *coef)
224{
225 struct nouveau_device *device = nouveau_dev(dev);
226 struct nouveau_bios *bios = nouveau_bios(device);
227 struct nvbios_pll limits;
228 int N, M, P, ret;
229
230 ret = nvbios_pll_parse(bios, 0x137000 + (clk * 0x20), &limits);
231 if (ret)
232 return 0;
233
234 limits.refclk = read_div(dev, clk, 0x137120, 0x137140);
235 if (!limits.refclk)
236 return 0;
237
238 ret = nva3_calc_pll(dev, &limits, freq, &N, NULL, &M, &P);
239 if (ret <= 0)
240 return 0;
241
242 *coef = (P << 16) | (N << 8) | M;
243 return ret;
244}
245
246/* A (likely rather simplified and incomplete) view of the clock tree
247 *
248 * Key:
249 *
250 * S: source select
251 * D: divider
252 * P: pll
253 * F: switch
254 *
255 * Engine clocks:
256 *
257 * 137250(D) ---- 137100(F0) ---- 137160(S)/1371d0(D) ------------------- ref
258 * (F1) ---- 1370X0(P) ---- 137120(S)/137140(D) ---- ref
259 *
260 * Not all registers exist for all clocks. For example: clocks >= 8 don't
261 * have their own PLL (all tied to clock 7's PLL when in PLL mode), nor do
262 * they have the divider at 1371d0, though the source selection at 137160
263 * still exists. You must use the divider at 137250 for these instead.
264 *
265 * Memory clock:
266 *
267 * TBD, read_mem() above is likely very wrong...
268 *
269 */
270
271static int
272calc_clk(struct drm_device *dev, int clk, struct nvc0_pm_clock *info, u32 freq)
273{
274 u32 src0, div0, div1D, div1P = 0;
275 u32 clk0, clk1 = 0;
276
277 /* invalid clock domain */
278 if (!freq)
279 return 0;
280
281 /* first possible path, using only dividers */
282 clk0 = calc_src(dev, clk, freq, &src0, &div0);
283 clk0 = calc_div(dev, clk, clk0, freq, &div1D);
284
285 /* see if we can get any closer using PLLs */
286 if (clk0 != freq && (0x00004387 & (1 << clk))) {
287 if (clk < 7)
288 clk1 = calc_pll(dev, clk, freq, &info->coef);
289 else
290 clk1 = read_pll(dev, 0x1370e0);
291 clk1 = calc_div(dev, clk, clk1, freq, &div1P);
292 }
293
294 /* select the method which gets closest to target freq */
295 if (abs((int)freq - clk0) <= abs((int)freq - clk1)) {
296 info->dsrc = src0;
297 if (div0) {
298 info->ddiv |= 0x80000000;
299 info->ddiv |= div0 << 8;
300 info->ddiv |= div0;
301 }
302 if (div1D) {
303 info->mdiv |= 0x80000000;
304 info->mdiv |= div1D;
305 }
306 info->ssel = 0;
307 info->freq = clk0;
308 } else {
309 if (div1P) {
310 info->mdiv |= 0x80000000;
311 info->mdiv |= div1P << 8;
312 }
313 info->ssel = (1 << clk);
314 info->freq = clk1;
315 }
316
317 return 0;
318}
319
320static int
321calc_mem(struct drm_device *dev, struct nvc0_pm_clock *info, u32 freq)
322{
323 struct nouveau_device *device = nouveau_dev(dev);
324 struct nouveau_bios *bios = nouveau_bios(device);
325 struct nvbios_pll pll;
326 int N, M, P, ret;
327 u32 ctrl;
328
329 /* mclk pll input freq comes from another pll, make sure it's on */
330 ctrl = nv_rd32(device, 0x132020);
331 if (!(ctrl & 0x00000001)) {
332 /* if not, program it to 567MHz. nfi where this value comes
333 * from - it looks like it's in the pll limits table for
334 * 132000 but the binary driver ignores all my attempts to
335 * change this value.
336 */
337 nv_wr32(device, 0x137320, 0x00000103);
338 nv_wr32(device, 0x137330, 0x81200606);
339 nv_wait(device, 0x132020, 0x00010000, 0x00010000);
340 nv_wr32(device, 0x132024, 0x0001150f);
341 nv_mask(device, 0x132020, 0x00000001, 0x00000001);
342 nv_wait(device, 0x137390, 0x00020000, 0x00020000);
343 nv_mask(device, 0x132020, 0x00000004, 0x00000004);
344 }
345
346 /* for the moment, until the clock tree is better understood, use
347 * pll mode for all clock frequencies
348 */
349 ret = nvbios_pll_parse(bios, 0x132000, &pll);
350 if (ret == 0) {
351 pll.refclk = read_pll(dev, 0x132020);
352 if (pll.refclk) {
353 ret = nva3_calc_pll(dev, &pll, freq, &N, NULL, &M, &P);
354 if (ret > 0) {
355 info->coef = (P << 16) | (N << 8) | M;
356 return 0;
357 }
358 }
359 }
360
361 return -EINVAL;
362}
363
364void *
365nvc0_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
366{
367 struct nouveau_device *device = nouveau_dev(dev);
368 struct nvc0_pm_state *info;
369 int ret;
370
371 info = kzalloc(sizeof(*info), GFP_KERNEL);
372 if (!info)
373 return ERR_PTR(-ENOMEM);
374
375 /* NFI why this is still in the performance table, the ROPCs appear
376 * to get their clock from clock 2 ("hub07", actually hub05 on this
377 * chip, but, anyway...) as well. nvatiming confirms hub05 and ROP
378 * are always the same freq with the binary driver even when the
379 * performance table says they should differ.
380 */
381 if (device->chipset == 0xd9)
382 perflvl->rop = 0;
383
384 if ((ret = calc_clk(dev, 0x00, &info->eng[0x00], perflvl->shader)) ||
385 (ret = calc_clk(dev, 0x01, &info->eng[0x01], perflvl->rop)) ||
386 (ret = calc_clk(dev, 0x02, &info->eng[0x02], perflvl->hub07)) ||
387 (ret = calc_clk(dev, 0x07, &info->eng[0x07], perflvl->hub06)) ||
388 (ret = calc_clk(dev, 0x08, &info->eng[0x08], perflvl->hub01)) ||
389 (ret = calc_clk(dev, 0x09, &info->eng[0x09], perflvl->copy)) ||
390 (ret = calc_clk(dev, 0x0c, &info->eng[0x0c], perflvl->daemon)) ||
391 (ret = calc_clk(dev, 0x0e, &info->eng[0x0e], perflvl->vdec))) {
392 kfree(info);
393 return ERR_PTR(ret);
394 }
395
396 if (perflvl->memory) {
397 ret = calc_mem(dev, &info->mem, perflvl->memory);
398 if (ret) {
399 kfree(info);
400 return ERR_PTR(ret);
401 }
402 }
403
404 info->perflvl = perflvl;
405 return info;
406}
407
408static void
409prog_clk(struct drm_device *dev, int clk, struct nvc0_pm_clock *info)
410{
411 struct nouveau_device *device = nouveau_dev(dev);
412
413 /* program dividers at 137160/1371d0 first */
414 if (clk < 7 && !info->ssel) {
415 nv_mask(device, 0x1371d0 + (clk * 0x04), 0x80003f3f, info->ddiv);
416 nv_wr32(device, 0x137160 + (clk * 0x04), info->dsrc);
417 }
418
419 /* switch clock to non-pll mode */
420 nv_mask(device, 0x137100, (1 << clk), 0x00000000);
421 nv_wait(device, 0x137100, (1 << clk), 0x00000000);
422
423 /* reprogram pll */
424 if (clk < 7) {
425 /* make sure it's disabled first... */
426 u32 base = 0x137000 + (clk * 0x20);
427 u32 ctrl = nv_rd32(device, base + 0x00);
428 if (ctrl & 0x00000001) {
429 nv_mask(device, base + 0x00, 0x00000004, 0x00000000);
430 nv_mask(device, base + 0x00, 0x00000001, 0x00000000);
431 }
432 /* program it to new values, if necessary */
433 if (info->ssel) {
434 nv_wr32(device, base + 0x04, info->coef);
435 nv_mask(device, base + 0x00, 0x00000001, 0x00000001);
436 nv_wait(device, base + 0x00, 0x00020000, 0x00020000);
437 nv_mask(device, base + 0x00, 0x00020004, 0x00000004);
438 }
439 }
440
441 /* select pll/non-pll mode, and program final clock divider */
442 nv_mask(device, 0x137100, (1 << clk), info->ssel);
443 nv_wait(device, 0x137100, (1 << clk), info->ssel);
444 nv_mask(device, 0x137250 + (clk * 0x04), 0x00003f3f, info->mdiv);
445}
446
447static void
448mclk_precharge(struct nouveau_mem_exec_func *exec)
449{
450}
451
452static void
453mclk_refresh(struct nouveau_mem_exec_func *exec)
454{
455}
456
457static void
458mclk_refresh_auto(struct nouveau_mem_exec_func *exec, bool enable)
459{
460 struct nouveau_device *device = nouveau_dev(exec->dev);
461 nv_wr32(device, 0x10f210, enable ? 0x80000000 : 0x00000000);
462}
463
464static void
465mclk_refresh_self(struct nouveau_mem_exec_func *exec, bool enable)
466{
467}
468
469static void
470mclk_wait(struct nouveau_mem_exec_func *exec, u32 nsec)
471{
472 udelay((nsec + 500) / 1000);
473}
474
475static u32
476mclk_mrg(struct nouveau_mem_exec_func *exec, int mr)
477{
478 struct nouveau_device *device = nouveau_dev(exec->dev);
479 struct nouveau_fb *pfb = nouveau_fb(device);
480 if (pfb->ram->type != NV_MEM_TYPE_GDDR5) {
481 if (mr <= 1)
482 return nv_rd32(device, 0x10f300 + ((mr - 0) * 4));
483 return nv_rd32(device, 0x10f320 + ((mr - 2) * 4));
484 } else {
485 if (mr == 0)
486 return nv_rd32(device, 0x10f300 + (mr * 4));
487 else
488 if (mr <= 7)
489 return nv_rd32(device, 0x10f32c + (mr * 4));
490 return nv_rd32(device, 0x10f34c);
491 }
492}
493
494static void
495mclk_mrs(struct nouveau_mem_exec_func *exec, int mr, u32 data)
496{
497 struct nouveau_device *device = nouveau_dev(exec->dev);
498 struct nouveau_fb *pfb = nouveau_fb(device);
499 if (pfb->ram->type != NV_MEM_TYPE_GDDR5) {
500 if (mr <= 1) {
501 nv_wr32(device, 0x10f300 + ((mr - 0) * 4), data);
502 if (pfb->ram->ranks > 1)
503 nv_wr32(device, 0x10f308 + ((mr - 0) * 4), data);
504 } else
505 if (mr <= 3) {
506 nv_wr32(device, 0x10f320 + ((mr - 2) * 4), data);
507 if (pfb->ram->ranks > 1)
508 nv_wr32(device, 0x10f328 + ((mr - 2) * 4), data);
509 }
510 } else {
511 if (mr == 0) nv_wr32(device, 0x10f300 + (mr * 4), data);
512 else if (mr <= 7) nv_wr32(device, 0x10f32c + (mr * 4), data);
513 else if (mr == 15) nv_wr32(device, 0x10f34c, data);
514 }
515}
516
517static void
518mclk_clock_set(struct nouveau_mem_exec_func *exec)
519{
520 struct nouveau_device *device = nouveau_dev(exec->dev);
521 struct nvc0_pm_state *info = exec->priv;
522 u32 ctrl = nv_rd32(device, 0x132000);
523
524 nv_wr32(device, 0x137360, 0x00000001);
525 nv_wr32(device, 0x137370, 0x00000000);
526 nv_wr32(device, 0x137380, 0x00000000);
527 if (ctrl & 0x00000001)
528 nv_wr32(device, 0x132000, (ctrl &= ~0x00000001));
529
530 nv_wr32(device, 0x132004, info->mem.coef);
531 nv_wr32(device, 0x132000, (ctrl |= 0x00000001));
532 nv_wait(device, 0x137390, 0x00000002, 0x00000002);
533 nv_wr32(device, 0x132018, 0x00005000);
534
535 nv_wr32(device, 0x137370, 0x00000001);
536 nv_wr32(device, 0x137380, 0x00000001);
537 nv_wr32(device, 0x137360, 0x00000000);
538}
539
540static void
541mclk_timing_set(struct nouveau_mem_exec_func *exec)
542{
543 struct nouveau_device *device = nouveau_dev(exec->dev);
544 struct nvc0_pm_state *info = exec->priv;
545 struct nouveau_pm_level *perflvl = info->perflvl;
546 int i;
547
548 for (i = 0; i < 5; i++)
549 nv_wr32(device, 0x10f290 + (i * 4), perflvl->timing.reg[i]);
550}
551
552static void
553prog_mem(struct drm_device *dev, struct nvc0_pm_state *info)
554{
555 struct nouveau_device *device = nouveau_dev(dev);
556 struct nouveau_mem_exec_func exec = {
557 .dev = dev,
558 .precharge = mclk_precharge,
559 .refresh = mclk_refresh,
560 .refresh_auto = mclk_refresh_auto,
561 .refresh_self = mclk_refresh_self,
562 .wait = mclk_wait,
563 .mrg = mclk_mrg,
564 .mrs = mclk_mrs,
565 .clock_set = mclk_clock_set,
566 .timing_set = mclk_timing_set,
567 .priv = info
568 };
569
570 if (device->chipset < 0xd0)
571 nv_wr32(device, 0x611200, 0x00003300);
572 else
573 nv_wr32(device, 0x62c000, 0x03030000);
574
575 nouveau_mem_exec(&exec, info->perflvl);
576
577 if (device->chipset < 0xd0)
578 nv_wr32(device, 0x611200, 0x00003330);
579 else
580 nv_wr32(device, 0x62c000, 0x03030300);
581}
582int
583nvc0_pm_clocks_set(struct drm_device *dev, void *data)
584{
585 struct nvc0_pm_state *info = data;
586 int i;
587
588 if (info->mem.coef)
589 prog_mem(dev, info);
590
591 for (i = 0; i < 16; i++) {
592 if (!info->eng[i].freq)
593 continue;
594 prog_clk(dev, i, &info->eng[i]);
595 }
596
597 kfree(info);
598 return 0;
599}
diff --git a/drivers/gpu/drm/omapdrm/Kconfig b/drivers/gpu/drm/omapdrm/Kconfig
index 20c41e73d448..6c220cd3497a 100644
--- a/drivers/gpu/drm/omapdrm/Kconfig
+++ b/drivers/gpu/drm/omapdrm/Kconfig
@@ -5,6 +5,7 @@ config DRM_OMAP
5 depends on ARCH_OMAP2PLUS || ARCH_MULTIPLATFORM 5 depends on ARCH_OMAP2PLUS || ARCH_MULTIPLATFORM
6 depends on OMAP2_DSS 6 depends on OMAP2_DSS
7 select DRM_KMS_HELPER 7 select DRM_KMS_HELPER
8 select DRM_KMS_FB_HELPER
8 select FB_SYS_FILLRECT 9 select FB_SYS_FILLRECT
9 select FB_SYS_COPYAREA 10 select FB_SYS_COPYAREA
10 select FB_SYS_IMAGEBLIT 11 select FB_SYS_IMAGEBLIT
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
index 2603d909f49c..e7fa3cd96743 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -620,7 +620,6 @@ static struct drm_driver omap_drm_driver = {
620 .prime_fd_to_handle = drm_gem_prime_fd_to_handle, 620 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
621 .gem_prime_export = omap_gem_prime_export, 621 .gem_prime_export = omap_gem_prime_export,
622 .gem_prime_import = omap_gem_prime_import, 622 .gem_prime_import = omap_gem_prime_import,
623 .gem_init_object = omap_gem_init_object,
624 .gem_free_object = omap_gem_free_object, 623 .gem_free_object = omap_gem_free_object,
625 .gem_vm_ops = &omap_gem_vm_ops, 624 .gem_vm_ops = &omap_gem_vm_ops,
626 .dumb_create = omap_gem_dumb_create, 625 .dumb_create = omap_gem_dumb_create,
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h
index 30b95b736658..07847693cf49 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.h
+++ b/drivers/gpu/drm/omapdrm/omap_drv.h
@@ -220,7 +220,6 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev,
220int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file, 220int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file,
221 union omap_gem_size gsize, uint32_t flags, uint32_t *handle); 221 union omap_gem_size gsize, uint32_t flags, uint32_t *handle);
222void omap_gem_free_object(struct drm_gem_object *obj); 222void omap_gem_free_object(struct drm_gem_object *obj);
223int omap_gem_init_object(struct drm_gem_object *obj);
224void *omap_gem_vaddr(struct drm_gem_object *obj); 223void *omap_gem_vaddr(struct drm_gem_object *obj);
225int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, 224int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
226 uint32_t handle, uint64_t *offset); 225 uint32_t handle, uint64_t *offset);
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
index 533f6ebec531..5aec3e81fe24 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem.c
@@ -1274,11 +1274,6 @@ unlock:
1274 return ret; 1274 return ret;
1275} 1275}
1276 1276
1277int omap_gem_init_object(struct drm_gem_object *obj)
1278{
1279 return -EINVAL; /* unused */
1280}
1281
1282/* don't call directly.. called from GEM core when it is time to actually 1277/* don't call directly.. called from GEM core when it is time to actually
1283 * free the object.. 1278 * free the object..
1284 */ 1279 */
diff --git a/drivers/gpu/drm/omapdrm/omap_irq.c b/drivers/gpu/drm/omapdrm/omap_irq.c
index 9263db117ff8..cb858600185f 100644
--- a/drivers/gpu/drm/omapdrm/omap_irq.c
+++ b/drivers/gpu/drm/omapdrm/omap_irq.c
@@ -261,7 +261,7 @@ int omap_drm_irq_install(struct drm_device *dev)
261 mutex_unlock(&dev->struct_mutex); 261 mutex_unlock(&dev->struct_mutex);
262 return -EBUSY; 262 return -EBUSY;
263 } 263 }
264 dev->irq_enabled = 1; 264 dev->irq_enabled = true;
265 mutex_unlock(&dev->struct_mutex); 265 mutex_unlock(&dev->struct_mutex);
266 266
267 /* Before installing handler */ 267 /* Before installing handler */
@@ -272,7 +272,7 @@ int omap_drm_irq_install(struct drm_device *dev)
272 272
273 if (ret < 0) { 273 if (ret < 0) {
274 mutex_lock(&dev->struct_mutex); 274 mutex_lock(&dev->struct_mutex);
275 dev->irq_enabled = 0; 275 dev->irq_enabled = false;
276 mutex_unlock(&dev->struct_mutex); 276 mutex_unlock(&dev->struct_mutex);
277 return ret; 277 return ret;
278 } 278 }
@@ -283,7 +283,7 @@ int omap_drm_irq_install(struct drm_device *dev)
283 283
284 if (ret < 0) { 284 if (ret < 0) {
285 mutex_lock(&dev->struct_mutex); 285 mutex_lock(&dev->struct_mutex);
286 dev->irq_enabled = 0; 286 dev->irq_enabled = false;
287 mutex_unlock(&dev->struct_mutex); 287 mutex_unlock(&dev->struct_mutex);
288 dispc_free_irq(dev); 288 dispc_free_irq(dev);
289 } 289 }
@@ -294,11 +294,12 @@ int omap_drm_irq_install(struct drm_device *dev)
294int omap_drm_irq_uninstall(struct drm_device *dev) 294int omap_drm_irq_uninstall(struct drm_device *dev)
295{ 295{
296 unsigned long irqflags; 296 unsigned long irqflags;
297 int irq_enabled, i; 297 bool irq_enabled;
298 int i;
298 299
299 mutex_lock(&dev->struct_mutex); 300 mutex_lock(&dev->struct_mutex);
300 irq_enabled = dev->irq_enabled; 301 irq_enabled = dev->irq_enabled;
301 dev->irq_enabled = 0; 302 dev->irq_enabled = false;
302 mutex_unlock(&dev->struct_mutex); 303 mutex_unlock(&dev->struct_mutex);
303 304
304 /* 305 /*
@@ -307,9 +308,9 @@ int omap_drm_irq_uninstall(struct drm_device *dev)
307 if (dev->num_crtcs) { 308 if (dev->num_crtcs) {
308 spin_lock_irqsave(&dev->vbl_lock, irqflags); 309 spin_lock_irqsave(&dev->vbl_lock, irqflags);
309 for (i = 0; i < dev->num_crtcs; i++) { 310 for (i = 0; i < dev->num_crtcs; i++) {
310 DRM_WAKEUP(&dev->vbl_queue[i]); 311 DRM_WAKEUP(&dev->vblank[i].queue);
311 dev->vblank_enabled[i] = 0; 312 dev->vblank[i].enabled = false;
312 dev->last_vblank[i] = 313 dev->vblank[i].last =
313 dev->driver->get_vblank_counter(dev, i); 314 dev->driver->get_vblank_counter(dev, i);
314 } 315 }
315 spin_unlock_irqrestore(&dev->vbl_lock, irqflags); 316 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
diff --git a/drivers/gpu/drm/qxl/Kconfig b/drivers/gpu/drm/qxl/Kconfig
index d6c12796023c..037d324bf58f 100644
--- a/drivers/gpu/drm/qxl/Kconfig
+++ b/drivers/gpu/drm/qxl/Kconfig
@@ -6,6 +6,7 @@ config DRM_QXL
6 select FB_SYS_IMAGEBLIT 6 select FB_SYS_IMAGEBLIT
7 select FB_DEFERRED_IO 7 select FB_DEFERRED_IO
8 select DRM_KMS_HELPER 8 select DRM_KMS_HELPER
9 select DRM_KMS_FB_HELPER
9 select DRM_TTM 10 select DRM_TTM
10 help 11 help
11 QXL virtual GPU for Spice virtualization desktop integration. Do not enable this driver unless your distro ships a corresponding X.org QXL driver that can handle kernel modesetting. 12 QXL virtual GPU for Spice virtualization desktop integration. Do not enable this driver unless your distro ships a corresponding X.org QXL driver that can handle kernel modesetting.
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 835caba026d3..5e827c29d194 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -107,10 +107,17 @@ void qxl_display_read_client_monitors_config(struct qxl_device *qdev)
107 qxl_io_log(qdev, "failed crc check for client_monitors_config," 107 qxl_io_log(qdev, "failed crc check for client_monitors_config,"
108 " retrying\n"); 108 " retrying\n");
109 } 109 }
110 drm_helper_hpd_irq_event(qdev->ddev); 110
111 if (!drm_helper_hpd_irq_event(qdev->ddev)) {
112 /* notify that the monitor configuration changed, to
113 adjust at the arbitrary resolution */
114 drm_kms_helper_hotplug_event(qdev->ddev);
115 }
111} 116}
112 117
113static int qxl_add_monitors_config_modes(struct drm_connector *connector) 118static int qxl_add_monitors_config_modes(struct drm_connector *connector,
119 unsigned *pwidth,
120 unsigned *pheight)
114{ 121{
115 struct drm_device *dev = connector->dev; 122 struct drm_device *dev = connector->dev;
116 struct qxl_device *qdev = dev->dev_private; 123 struct qxl_device *qdev = dev->dev_private;
@@ -126,11 +133,15 @@ static int qxl_add_monitors_config_modes(struct drm_connector *connector)
126 mode = drm_cvt_mode(dev, head->width, head->height, 60, false, false, 133 mode = drm_cvt_mode(dev, head->width, head->height, 60, false, false,
127 false); 134 false);
128 mode->type |= DRM_MODE_TYPE_PREFERRED; 135 mode->type |= DRM_MODE_TYPE_PREFERRED;
136 *pwidth = head->width;
137 *pheight = head->height;
129 drm_mode_probed_add(connector, mode); 138 drm_mode_probed_add(connector, mode);
130 return 1; 139 return 1;
131} 140}
132 141
133static int qxl_add_common_modes(struct drm_connector *connector) 142static int qxl_add_common_modes(struct drm_connector *connector,
143 unsigned pwidth,
144 unsigned pheight)
134{ 145{
135 struct drm_device *dev = connector->dev; 146 struct drm_device *dev = connector->dev;
136 struct drm_display_mode *mode = NULL; 147 struct drm_display_mode *mode = NULL;
@@ -159,12 +170,9 @@ static int qxl_add_common_modes(struct drm_connector *connector)
159 }; 170 };
160 171
161 for (i = 0; i < ARRAY_SIZE(common_modes); i++) { 172 for (i = 0; i < ARRAY_SIZE(common_modes); i++) {
162 if (common_modes[i].w < 320 || common_modes[i].h < 200)
163 continue;
164
165 mode = drm_cvt_mode(dev, common_modes[i].w, common_modes[i].h, 173 mode = drm_cvt_mode(dev, common_modes[i].w, common_modes[i].h,
166 60, false, false, false); 174 60, false, false, false);
167 if (common_modes[i].w == 1024 && common_modes[i].h == 768) 175 if (common_modes[i].w == pwidth && common_modes[i].h == pheight)
168 mode->type |= DRM_MODE_TYPE_PREFERRED; 176 mode->type |= DRM_MODE_TYPE_PREFERRED;
169 drm_mode_probed_add(connector, mode); 177 drm_mode_probed_add(connector, mode);
170 } 178 }
@@ -720,16 +728,18 @@ static int qxl_conn_get_modes(struct drm_connector *connector)
720{ 728{
721 int ret = 0; 729 int ret = 0;
722 struct qxl_device *qdev = connector->dev->dev_private; 730 struct qxl_device *qdev = connector->dev->dev_private;
731 unsigned pwidth = 1024;
732 unsigned pheight = 768;
723 733
724 DRM_DEBUG_KMS("monitors_config=%p\n", qdev->monitors_config); 734 DRM_DEBUG_KMS("monitors_config=%p\n", qdev->monitors_config);
725 /* TODO: what should we do here? only show the configured modes for the 735 /* TODO: what should we do here? only show the configured modes for the
726 * device, or allow the full list, or both? */ 736 * device, or allow the full list, or both? */
727 if (qdev->monitors_config && qdev->monitors_config->count) { 737 if (qdev->monitors_config && qdev->monitors_config->count) {
728 ret = qxl_add_monitors_config_modes(connector); 738 ret = qxl_add_monitors_config_modes(connector, &pwidth, &pheight);
729 if (ret < 0) 739 if (ret < 0)
730 return ret; 740 return ret;
731 } 741 }
732 ret += qxl_add_common_modes(connector); 742 ret += qxl_add_common_modes(connector, pwidth, pheight);
733 return ret; 743 return ret;
734} 744}
735 745
@@ -793,7 +803,10 @@ static enum drm_connector_status qxl_conn_detect(
793 qdev->client_monitors_config->count > output->index && 803 qdev->client_monitors_config->count > output->index &&
794 qxl_head_enabled(&qdev->client_monitors_config->heads[output->index])); 804 qxl_head_enabled(&qdev->client_monitors_config->heads[output->index]));
795 805
796 DRM_DEBUG("\n"); 806 DRM_DEBUG("#%d connected: %d\n", output->index, connected);
807 if (!connected)
808 qxl_monitors_config_set(qdev, output->index, 0, 0, 0, 0, 0);
809
797 return connected ? connector_status_connected 810 return connected ? connector_status_connected
798 : connector_status_disconnected; 811 : connector_status_disconnected;
799} 812}
@@ -835,8 +848,21 @@ static const struct drm_encoder_funcs qxl_enc_funcs = {
835 .destroy = qxl_enc_destroy, 848 .destroy = qxl_enc_destroy,
836}; 849};
837 850
851static int qxl_mode_create_hotplug_mode_update_property(struct qxl_device *qdev)
852{
853 if (qdev->hotplug_mode_update_property)
854 return 0;
855
856 qdev->hotplug_mode_update_property =
857 drm_property_create_range(qdev->ddev, DRM_MODE_PROP_IMMUTABLE,
858 "hotplug_mode_update", 0, 1);
859
860 return 0;
861}
862
838static int qdev_output_init(struct drm_device *dev, int num_output) 863static int qdev_output_init(struct drm_device *dev, int num_output)
839{ 864{
865 struct qxl_device *qdev = dev->dev_private;
840 struct qxl_output *qxl_output; 866 struct qxl_output *qxl_output;
841 struct drm_connector *connector; 867 struct drm_connector *connector;
842 struct drm_encoder *encoder; 868 struct drm_encoder *encoder;
@@ -863,6 +889,8 @@ static int qdev_output_init(struct drm_device *dev, int num_output)
863 drm_encoder_helper_add(encoder, &qxl_enc_helper_funcs); 889 drm_encoder_helper_add(encoder, &qxl_enc_helper_funcs);
864 drm_connector_helper_add(connector, &qxl_connector_helper_funcs); 890 drm_connector_helper_add(connector, &qxl_connector_helper_funcs);
865 891
892 drm_object_attach_property(&connector->base,
893 qdev->hotplug_mode_update_property, 0);
866 drm_sysfs_connector_add(connector); 894 drm_sysfs_connector_add(connector);
867 return 0; 895 return 0;
868} 896}
@@ -975,6 +1003,9 @@ int qxl_modeset_init(struct qxl_device *qdev)
975 qdev->ddev->mode_config.max_height = 8192; 1003 qdev->ddev->mode_config.max_height = 8192;
976 1004
977 qdev->ddev->mode_config.fb_base = qdev->vram_base; 1005 qdev->ddev->mode_config.fb_base = qdev->vram_base;
1006
1007 qxl_mode_create_hotplug_mode_update_property(qdev);
1008
978 for (i = 0 ; i < qxl_num_crtc; ++i) { 1009 for (i = 0 ; i < qxl_num_crtc; ++i) {
979 qdev_crtc_init(qdev->ddev, i); 1010 qdev_crtc_init(qdev->ddev, i);
980 qdev_output_init(qdev->ddev, i); 1011 qdev_output_init(qdev->ddev, i);
diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c
index 514118ae72d4..fee8748bdca5 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.c
+++ b/drivers/gpu/drm/qxl/qxl_drv.c
@@ -225,7 +225,6 @@ static struct drm_driver qxl_driver = {
225 .debugfs_init = qxl_debugfs_init, 225 .debugfs_init = qxl_debugfs_init,
226 .debugfs_cleanup = qxl_debugfs_takedown, 226 .debugfs_cleanup = qxl_debugfs_takedown,
227#endif 227#endif
228 .gem_init_object = qxl_gem_object_init,
229 .gem_free_object = qxl_gem_object_free, 228 .gem_free_object = qxl_gem_object_free,
230 .gem_open_object = qxl_gem_object_open, 229 .gem_open_object = qxl_gem_object_open,
231 .gem_close_object = qxl_gem_object_close, 230 .gem_close_object = qxl_gem_object_close,
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index f7c9adde46a0..7bda32f68d3b 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -323,6 +323,8 @@ struct qxl_device {
323 struct work_struct gc_work; 323 struct work_struct gc_work;
324 324
325 struct work_struct fb_work; 325 struct work_struct fb_work;
326
327 struct drm_property *hotplug_mode_update_property;
326}; 328};
327 329
328/* forward declaration for QXL_INFO_IO */ 330/* forward declaration for QXL_INFO_IO */
@@ -412,7 +414,6 @@ int qxl_gem_object_create_with_handle(struct qxl_device *qdev,
412 struct qxl_surface *surf, 414 struct qxl_surface *surf,
413 struct qxl_bo **qobj, 415 struct qxl_bo **qobj,
414 uint32_t *handle); 416 uint32_t *handle);
415int qxl_gem_object_init(struct drm_gem_object *obj);
416void qxl_gem_object_free(struct drm_gem_object *gobj); 417void qxl_gem_object_free(struct drm_gem_object *gobj);
417int qxl_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv); 418int qxl_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv);
418void qxl_gem_object_close(struct drm_gem_object *obj, 419void qxl_gem_object_close(struct drm_gem_object *obj,
diff --git a/drivers/gpu/drm/qxl/qxl_fb.c b/drivers/gpu/drm/qxl/qxl_fb.c
index 88722f233430..f437b30ce689 100644
--- a/drivers/gpu/drm/qxl/qxl_fb.c
+++ b/drivers/gpu/drm/qxl/qxl_fb.c
@@ -108,7 +108,7 @@ static void qxl_fb_dirty_flush(struct fb_info *info)
108 u32 x1, x2, y1, y2; 108 u32 x1, x2, y1, y2;
109 109
110 /* TODO: hard coding 32 bpp */ 110 /* TODO: hard coding 32 bpp */
111 int stride = qfbdev->qfb.base.pitches[0] * 4; 111 int stride = qfbdev->qfb.base.pitches[0];
112 112
113 x1 = qfbdev->dirty.x1; 113 x1 = qfbdev->dirty.x1;
114 x2 = qfbdev->dirty.x2; 114 x2 = qfbdev->dirty.x2;
diff --git a/drivers/gpu/drm/qxl/qxl_gem.c b/drivers/gpu/drm/qxl/qxl_gem.c
index 1648e4125af7..b96f0c9d89b2 100644
--- a/drivers/gpu/drm/qxl/qxl_gem.c
+++ b/drivers/gpu/drm/qxl/qxl_gem.c
@@ -28,12 +28,6 @@
28#include "qxl_drv.h" 28#include "qxl_drv.h"
29#include "qxl_object.h" 29#include "qxl_object.h"
30 30
31int qxl_gem_object_init(struct drm_gem_object *obj)
32{
33 /* we do nothings here */
34 return 0;
35}
36
37void qxl_gem_object_free(struct drm_gem_object *gobj) 31void qxl_gem_object_free(struct drm_gem_object *gobj)
38{ 32{
39 struct qxl_bo *qobj = gem_to_qxl_bo(gobj); 33 struct qxl_bo *qobj = gem_to_qxl_bo(gobj);
diff --git a/drivers/gpu/drm/qxl/qxl_kms.c b/drivers/gpu/drm/qxl/qxl_kms.c
index 9e8da9ee9731..e5ca498be920 100644
--- a/drivers/gpu/drm/qxl/qxl_kms.c
+++ b/drivers/gpu/drm/qxl/qxl_kms.c
@@ -120,7 +120,7 @@ int qxl_device_init(struct qxl_device *qdev,
120 struct pci_dev *pdev, 120 struct pci_dev *pdev,
121 unsigned long flags) 121 unsigned long flags)
122{ 122{
123 int r; 123 int r, sb;
124 124
125 qdev->dev = &pdev->dev; 125 qdev->dev = &pdev->dev;
126 qdev->ddev = ddev; 126 qdev->ddev = ddev;
@@ -136,21 +136,39 @@ int qxl_device_init(struct qxl_device *qdev,
136 qdev->rom_base = pci_resource_start(pdev, 2); 136 qdev->rom_base = pci_resource_start(pdev, 2);
137 qdev->rom_size = pci_resource_len(pdev, 2); 137 qdev->rom_size = pci_resource_len(pdev, 2);
138 qdev->vram_base = pci_resource_start(pdev, 0); 138 qdev->vram_base = pci_resource_start(pdev, 0);
139 qdev->surfaceram_base = pci_resource_start(pdev, 1);
140 qdev->surfaceram_size = pci_resource_len(pdev, 1);
141 qdev->io_base = pci_resource_start(pdev, 3); 139 qdev->io_base = pci_resource_start(pdev, 3);
142 140
143 qdev->vram_mapping = io_mapping_create_wc(qdev->vram_base, pci_resource_len(pdev, 0)); 141 qdev->vram_mapping = io_mapping_create_wc(qdev->vram_base, pci_resource_len(pdev, 0));
144 qdev->surface_mapping = io_mapping_create_wc(qdev->surfaceram_base, qdev->surfaceram_size); 142
145 DRM_DEBUG_KMS("qxl: vram %llx-%llx(%dM %dk), surface %llx-%llx(%dM %dk)\n", 143 if (pci_resource_len(pdev, 4) > 0) {
144 /* 64bit surface bar present */
145 sb = 4;
146 qdev->surfaceram_base = pci_resource_start(pdev, sb);
147 qdev->surfaceram_size = pci_resource_len(pdev, sb);
148 qdev->surface_mapping =
149 io_mapping_create_wc(qdev->surfaceram_base,
150 qdev->surfaceram_size);
151 }
152 if (qdev->surface_mapping == NULL) {
153 /* 64bit surface bar not present (or mapping failed) */
154 sb = 1;
155 qdev->surfaceram_base = pci_resource_start(pdev, sb);
156 qdev->surfaceram_size = pci_resource_len(pdev, sb);
157 qdev->surface_mapping =
158 io_mapping_create_wc(qdev->surfaceram_base,
159 qdev->surfaceram_size);
160 }
161
162 DRM_DEBUG_KMS("qxl: vram %llx-%llx(%dM %dk), surface %llx-%llx(%dM %dk, %s)\n",
146 (unsigned long long)qdev->vram_base, 163 (unsigned long long)qdev->vram_base,
147 (unsigned long long)pci_resource_end(pdev, 0), 164 (unsigned long long)pci_resource_end(pdev, 0),
148 (int)pci_resource_len(pdev, 0) / 1024 / 1024, 165 (int)pci_resource_len(pdev, 0) / 1024 / 1024,
149 (int)pci_resource_len(pdev, 0) / 1024, 166 (int)pci_resource_len(pdev, 0) / 1024,
150 (unsigned long long)qdev->surfaceram_base, 167 (unsigned long long)qdev->surfaceram_base,
151 (unsigned long long)pci_resource_end(pdev, 1), 168 (unsigned long long)pci_resource_end(pdev, sb),
152 (int)qdev->surfaceram_size / 1024 / 1024, 169 (int)qdev->surfaceram_size / 1024 / 1024,
153 (int)qdev->surfaceram_size / 1024); 170 (int)qdev->surfaceram_size / 1024,
171 (sb == 4) ? "64bit" : "32bit");
154 172
155 qdev->rom = ioremap(qdev->rom_base, qdev->rom_size); 173 qdev->rom = ioremap(qdev->rom_base, qdev->rom_size);
156 if (!qdev->rom) { 174 if (!qdev->rom) {
@@ -230,9 +248,13 @@ int qxl_device_init(struct qxl_device *qdev,
230 qdev->surfaces_mem_slot = setup_slot(qdev, 1, 248 qdev->surfaces_mem_slot = setup_slot(qdev, 1,
231 (unsigned long)qdev->surfaceram_base, 249 (unsigned long)qdev->surfaceram_base,
232 (unsigned long)qdev->surfaceram_base + qdev->surfaceram_size); 250 (unsigned long)qdev->surfaceram_base + qdev->surfaceram_size);
233 DRM_INFO("main mem slot %d [%lx,%x)\n", 251 DRM_INFO("main mem slot %d [%lx,%x]\n",
234 qdev->main_mem_slot, 252 qdev->main_mem_slot,
235 (unsigned long)qdev->vram_base, qdev->rom->ram_header_offset); 253 (unsigned long)qdev->vram_base, qdev->rom->ram_header_offset);
254 DRM_INFO("surface mem slot %d [%lx,%lx]\n",
255 qdev->surfaces_mem_slot,
256 (unsigned long)qdev->surfaceram_base,
257 (unsigned long)qdev->surfaceram_size);
236 258
237 259
238 qdev->gc_queue = create_singlethread_workqueue("qxl_gc"); 260 qdev->gc_queue = create_singlethread_workqueue("qxl_gc");
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
index 037786d7c1dc..c7e7e6590c2b 100644
--- a/drivers/gpu/drm/qxl/qxl_ttm.c
+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
@@ -516,6 +516,8 @@ int qxl_ttm_init(struct qxl_device *qdev)
516 (unsigned)qdev->vram_size / (1024 * 1024)); 516 (unsigned)qdev->vram_size / (1024 * 1024));
517 DRM_INFO("qxl: %luM of IO pages memory ready (VRAM domain)\n", 517 DRM_INFO("qxl: %luM of IO pages memory ready (VRAM domain)\n",
518 ((unsigned)num_io_pages * PAGE_SIZE) / (1024 * 1024)); 518 ((unsigned)num_io_pages * PAGE_SIZE) / (1024 * 1024));
519 DRM_INFO("qxl: %uM of Surface memory size\n",
520 (unsigned)qdev->surfaceram_size / (1024 * 1024));
519 if (unlikely(qdev->mman.bdev.dev_mapping == NULL)) 521 if (unlikely(qdev->mman.bdev.dev_mapping == NULL))
520 qdev->mman.bdev.dev_mapping = qdev->ddev->dev_mapping; 522 qdev->mman.bdev.dev_mapping = qdev->ddev->dev_mapping;
521 r = qxl_ttm_debugfs_init(qdev); 523 r = qxl_ttm_debugfs_init(qdev);
diff --git a/drivers/gpu/drm/radeon/atombios.h b/drivers/gpu/drm/radeon/atombios.h
index af10f8571d87..92be50c39ffd 100644
--- a/drivers/gpu/drm/radeon/atombios.h
+++ b/drivers/gpu/drm/radeon/atombios.h
@@ -1711,7 +1711,9 @@ typedef struct _PIXEL_CLOCK_PARAMETERS_V6
1711#define PIXEL_CLOCK_V6_MISC_HDMI_BPP_MASK 0x0c 1711#define PIXEL_CLOCK_V6_MISC_HDMI_BPP_MASK 0x0c
1712#define PIXEL_CLOCK_V6_MISC_HDMI_24BPP 0x00 1712#define PIXEL_CLOCK_V6_MISC_HDMI_24BPP 0x00
1713#define PIXEL_CLOCK_V6_MISC_HDMI_36BPP 0x04 1713#define PIXEL_CLOCK_V6_MISC_HDMI_36BPP 0x04
1714#define PIXEL_CLOCK_V6_MISC_HDMI_36BPP_V6 0x08 //for V6, the correct defintion for 36bpp should be 2 for 36bpp(2:1)
1714#define PIXEL_CLOCK_V6_MISC_HDMI_30BPP 0x08 1715#define PIXEL_CLOCK_V6_MISC_HDMI_30BPP 0x08
1716#define PIXEL_CLOCK_V6_MISC_HDMI_30BPP_V6 0x04 //for V6, the correct defintion for 30bpp should be 1 for 36bpp(5:4)
1715#define PIXEL_CLOCK_V6_MISC_HDMI_48BPP 0x0c 1717#define PIXEL_CLOCK_V6_MISC_HDMI_48BPP 0x0c
1716#define PIXEL_CLOCK_V6_MISC_REF_DIV_SRC 0x10 1718#define PIXEL_CLOCK_V6_MISC_REF_DIV_SRC 0x10
1717#define PIXEL_CLOCK_V6_MISC_GEN_DPREFCLK 0x40 1719#define PIXEL_CLOCK_V6_MISC_GEN_DPREFCLK 0x40
@@ -2223,7 +2225,7 @@ typedef struct _SET_VOLTAGE_PARAMETERS_V2
2223 USHORT usVoltageLevel; // real voltage level 2225 USHORT usVoltageLevel; // real voltage level
2224}SET_VOLTAGE_PARAMETERS_V2; 2226}SET_VOLTAGE_PARAMETERS_V2;
2225 2227
2226 2228// used by both SetVoltageTable v1.3 and v1.4
2227typedef struct _SET_VOLTAGE_PARAMETERS_V1_3 2229typedef struct _SET_VOLTAGE_PARAMETERS_V1_3
2228{ 2230{
2229 UCHAR ucVoltageType; // To tell which voltage to set up, VDDC/MVDDC/MVDDQ/VDDCI 2231 UCHAR ucVoltageType; // To tell which voltage to set up, VDDC/MVDDC/MVDDQ/VDDCI
@@ -2290,15 +2292,36 @@ typedef struct _GET_LEAKAGE_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_1
2290#define ATOM_GET_VOLTAGE_VID 0x00 2292#define ATOM_GET_VOLTAGE_VID 0x00
2291#define ATOM_GET_VOTLAGE_INIT_SEQ 0x03 2293#define ATOM_GET_VOTLAGE_INIT_SEQ 0x03
2292#define ATOM_GET_VOLTTAGE_PHASE_PHASE_VID 0x04 2294#define ATOM_GET_VOLTTAGE_PHASE_PHASE_VID 0x04
2293// for SI, this state map to 0xff02 voltage state in Power Play table, which is power boost state 2295#define ATOM_GET_VOLTAGE_SVID2 0x07 //Get SVI2 Regulator Info
2294#define ATOM_GET_VOLTAGE_STATE0_LEAKAGE_VID 0x10
2295 2296
2297// for SI, this state map to 0xff02 voltage state in Power Play table, which is power boost state
2298#define ATOM_GET_VOLTAGE_STATE0_LEAKAGE_VID 0x10
2296// for SI, this state map to 0xff01 voltage state in Power Play table, which is performance state 2299// for SI, this state map to 0xff01 voltage state in Power Play table, which is performance state
2297#define ATOM_GET_VOLTAGE_STATE1_LEAKAGE_VID 0x11 2300#define ATOM_GET_VOLTAGE_STATE1_LEAKAGE_VID 0x11
2298// undefined power state 2301
2299#define ATOM_GET_VOLTAGE_STATE2_LEAKAGE_VID 0x12 2302#define ATOM_GET_VOLTAGE_STATE2_LEAKAGE_VID 0x12
2300#define ATOM_GET_VOLTAGE_STATE3_LEAKAGE_VID 0x13 2303#define ATOM_GET_VOLTAGE_STATE3_LEAKAGE_VID 0x13
2301 2304
2305// New Added from CI Hawaii for GetVoltageInfoTable, input parameter structure
2306typedef struct _GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_2
2307{
2308 UCHAR ucVoltageType; // Input: To tell which voltage to set up, VDDC/MVDDC/MVDDQ/VDDCI
2309 UCHAR ucVoltageMode; // Input: Indicate action: Get voltage info
2310 USHORT usVoltageLevel; // Input: real voltage level in unit of mv or Voltage Phase (0, 1, 2, .. ) or Leakage Id
2311 ULONG ulSCLKFreq; // Input: when ucVoltageMode= ATOM_GET_VOLTAGE_EVV_VOLTAGE, DPM state SCLK frequency, Define in PPTable SCLK/Voltage dependence table
2312}GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_2;
2313
2314// New in GetVoltageInfo v1.2 ucVoltageMode
2315#define ATOM_GET_VOLTAGE_EVV_VOLTAGE 0x09
2316
2317// New Added from CI Hawaii for EVV feature
2318typedef struct _GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_2
2319{
2320 USHORT usVoltageLevel; // real voltage level in unit of mv
2321 USHORT usVoltageId; // Voltage Id programmed in Voltage Regulator
2322 ULONG ulReseved;
2323}GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_2;
2324
2302/****************************************************************************/ 2325/****************************************************************************/
2303// Structures used by TVEncoderControlTable 2326// Structures used by TVEncoderControlTable
2304/****************************************************************************/ 2327/****************************************************************************/
@@ -3864,6 +3887,8 @@ typedef struct _ATOM_GPIO_PIN_ASSIGNMENT
3864#define PP_AC_DC_SWITCH_GPIO_PINID 60 3887#define PP_AC_DC_SWITCH_GPIO_PINID 60
3865//from SMU7.x, if ucGPIO_ID=VDDC_REGULATOR_VRHOT_GPIO_PINID in GPIO_LUTable, VRHot feature is enable 3888//from SMU7.x, if ucGPIO_ID=VDDC_REGULATOR_VRHOT_GPIO_PINID in GPIO_LUTable, VRHot feature is enable
3866#define VDDC_VRHOT_GPIO_PINID 61 3889#define VDDC_VRHOT_GPIO_PINID 61
3890//if ucGPIO_ID=VDDC_PCC_GPIO_PINID in GPIO_LUTable, Peak Current Control feature is enabled
3891#define VDDC_PCC_GPIO_PINID 62
3867 3892
3868typedef struct _ATOM_GPIO_PIN_LUT 3893typedef struct _ATOM_GPIO_PIN_LUT
3869{ 3894{
@@ -4169,10 +4194,10 @@ typedef struct _ATOM_COMMON_RECORD_HEADER
4169#define ATOM_OBJECT_LINK_RECORD_TYPE 18 //Once this record is present under one object, it indicats the oobject is linked to another obj described by the record 4194#define ATOM_OBJECT_LINK_RECORD_TYPE 18 //Once this record is present under one object, it indicats the oobject is linked to another obj described by the record
4170#define ATOM_CONNECTOR_REMOTE_CAP_RECORD_TYPE 19 4195#define ATOM_CONNECTOR_REMOTE_CAP_RECORD_TYPE 19
4171#define ATOM_ENCODER_CAP_RECORD_TYPE 20 4196#define ATOM_ENCODER_CAP_RECORD_TYPE 20
4172 4197#define ATOM_BRACKET_LAYOUT_RECORD_TYPE 21
4173 4198
4174//Must be updated when new record type is added,equal to that record definition! 4199//Must be updated when new record type is added,equal to that record definition!
4175#define ATOM_MAX_OBJECT_RECORD_NUMBER ATOM_ENCODER_CAP_RECORD_TYPE 4200#define ATOM_MAX_OBJECT_RECORD_NUMBER ATOM_BRACKET_LAYOUT_RECORD_TYPE
4176 4201
4177typedef struct _ATOM_I2C_RECORD 4202typedef struct _ATOM_I2C_RECORD
4178{ 4203{
@@ -4397,6 +4422,31 @@ typedef struct _ATOM_CONNECTOR_REMOTE_CAP_RECORD
4397 USHORT usReserved; 4422 USHORT usReserved;
4398}ATOM_CONNECTOR_REMOTE_CAP_RECORD; 4423}ATOM_CONNECTOR_REMOTE_CAP_RECORD;
4399 4424
4425typedef struct _ATOM_CONNECTOR_LAYOUT_INFO
4426{
4427 USHORT usConnectorObjectId;
4428 UCHAR ucConnectorType;
4429 UCHAR ucPosition;
4430}ATOM_CONNECTOR_LAYOUT_INFO;
4431
4432// define ATOM_CONNECTOR_LAYOUT_INFO.ucConnectorType to describe the display connector size
4433#define CONNECTOR_TYPE_DVI_D 1
4434#define CONNECTOR_TYPE_DVI_I 2
4435#define CONNECTOR_TYPE_VGA 3
4436#define CONNECTOR_TYPE_HDMI 4
4437#define CONNECTOR_TYPE_DISPLAY_PORT 5
4438#define CONNECTOR_TYPE_MINI_DISPLAY_PORT 6
4439
4440typedef struct _ATOM_BRACKET_LAYOUT_RECORD
4441{
4442 ATOM_COMMON_RECORD_HEADER sheader;
4443 UCHAR ucLength;
4444 UCHAR ucWidth;
4445 UCHAR ucConnNum;
4446 UCHAR ucReserved;
4447 ATOM_CONNECTOR_LAYOUT_INFO asConnInfo[1];
4448}ATOM_BRACKET_LAYOUT_RECORD;
4449
4400/****************************************************************************/ 4450/****************************************************************************/
4401// ASIC voltage data table 4451// ASIC voltage data table
4402/****************************************************************************/ 4452/****************************************************************************/
@@ -4524,8 +4574,9 @@ typedef struct _ATOM_VOLTAGE_OBJECT_HEADER_V3{
4524#define VOLTAGE_OBJ_VR_I2C_INIT_SEQ 3 //VOLTAGE REGULATOR INIT sequece through I2C -> ATOM_I2C_VOLTAGE_OBJECT_V3 4574#define VOLTAGE_OBJ_VR_I2C_INIT_SEQ 3 //VOLTAGE REGULATOR INIT sequece through I2C -> ATOM_I2C_VOLTAGE_OBJECT_V3
4525#define VOLTAGE_OBJ_PHASE_LUT 4 //Set Vregulator Phase lookup table ->ATOM_GPIO_VOLTAGE_OBJECT_V3 4575#define VOLTAGE_OBJ_PHASE_LUT 4 //Set Vregulator Phase lookup table ->ATOM_GPIO_VOLTAGE_OBJECT_V3
4526#define VOLTAGE_OBJ_SVID2 7 //Indicate voltage control by SVID2 ->ATOM_SVID2_VOLTAGE_OBJECT_V3 4576#define VOLTAGE_OBJ_SVID2 7 //Indicate voltage control by SVID2 ->ATOM_SVID2_VOLTAGE_OBJECT_V3
4527#define VOLTAGE_OBJ_PWRBOOST_LEAKAGE_LUT 0x10 //Powerboost Voltage and LeakageId lookup table->ATOM_LEAKAGE_VOLTAGE_OBJECT_V3 4577#define VOLTAGE_OBJ_EVV 8
4528#define VOLTAGE_OBJ_HIGH_STATE_LEAKAGE_LUT 0x11 //High voltage state Voltage and LeakageId lookup table->ATOM_LEAKAGE_VOLTAGE_OBJECT_V3 4578#define VOLTAGE_OBJ_PWRBOOST_LEAKAGE_LUT 0x10 //Powerboost Voltage and LeakageId lookup table->ATOM_LEAKAGE_VOLTAGE_OBJECT_V3
4579#define VOLTAGE_OBJ_HIGH_STATE_LEAKAGE_LUT 0x11 //High voltage state Voltage and LeakageId lookup table->ATOM_LEAKAGE_VOLTAGE_OBJECT_V3
4529#define VOLTAGE_OBJ_HIGH1_STATE_LEAKAGE_LUT 0x12 //High1 voltage state Voltage and LeakageId lookup table->ATOM_LEAKAGE_VOLTAGE_OBJECT_V3 4580#define VOLTAGE_OBJ_HIGH1_STATE_LEAKAGE_LUT 0x12 //High1 voltage state Voltage and LeakageId lookup table->ATOM_LEAKAGE_VOLTAGE_OBJECT_V3
4530 4581
4531typedef struct _VOLTAGE_LUT_ENTRY_V2 4582typedef struct _VOLTAGE_LUT_ENTRY_V2
@@ -4552,6 +4603,10 @@ typedef struct _ATOM_I2C_VOLTAGE_OBJECT_V3
4552 VOLTAGE_LUT_ENTRY asVolI2cLut[1]; // end with 0xff 4603 VOLTAGE_LUT_ENTRY asVolI2cLut[1]; // end with 0xff
4553}ATOM_I2C_VOLTAGE_OBJECT_V3; 4604}ATOM_I2C_VOLTAGE_OBJECT_V3;
4554 4605
4606// ATOM_I2C_VOLTAGE_OBJECT_V3.ucVoltageControlFlag
4607#define VOLTAGE_DATA_ONE_BYTE 0
4608#define VOLTAGE_DATA_TWO_BYTE 1
4609
4555typedef struct _ATOM_GPIO_VOLTAGE_OBJECT_V3 4610typedef struct _ATOM_GPIO_VOLTAGE_OBJECT_V3
4556{ 4611{
4557 ATOM_VOLTAGE_OBJECT_HEADER_V3 sHeader; // voltage mode = VOLTAGE_OBJ_GPIO_LUT or VOLTAGE_OBJ_PHASE_LUT 4612 ATOM_VOLTAGE_OBJECT_HEADER_V3 sHeader; // voltage mode = VOLTAGE_OBJ_GPIO_LUT or VOLTAGE_OBJ_PHASE_LUT
@@ -4584,7 +4639,8 @@ typedef struct _ATOM_SVID2_VOLTAGE_OBJECT_V3
4584// 1:0 – offset trim, 4639// 1:0 – offset trim,
4585 USHORT usLoadLine_PSI; 4640 USHORT usLoadLine_PSI;
4586// GPU GPIO pin Id to SVID2 regulator VRHot pin. possible value 0~31. 0 means GPIO0, 31 means GPIO31 4641// GPU GPIO pin Id to SVID2 regulator VRHot pin. possible value 0~31. 0 means GPIO0, 31 means GPIO31
4587 UCHAR ucReserved[2]; 4642 UCHAR ucSVDGpioId; //0~31 indicate GPIO0~31
4643 UCHAR ucSVCGpioId; //0~31 indicate GPIO0~31
4588 ULONG ulReserved; 4644 ULONG ulReserved;
4589}ATOM_SVID2_VOLTAGE_OBJECT_V3; 4645}ATOM_SVID2_VOLTAGE_OBJECT_V3;
4590 4646
@@ -4637,6 +4693,49 @@ typedef struct _ATOM_ASIC_PROFILING_INFO_V2_1
4637 USHORT usElbVDDCI_LevelArrayOffset; // offset of 2 dimension voltage level USHORT array 4693 USHORT usElbVDDCI_LevelArrayOffset; // offset of 2 dimension voltage level USHORT array
4638}ATOM_ASIC_PROFILING_INFO_V2_1; 4694}ATOM_ASIC_PROFILING_INFO_V2_1;
4639 4695
4696typedef struct _ATOM_ASIC_PROFILING_INFO_V3_1
4697{
4698 ATOM_COMMON_TABLE_HEADER asHeader;
4699 ULONG ulEvvDerateTdp;
4700 ULONG ulEvvDerateTdc;
4701 ULONG ulBoardCoreTemp;
4702 ULONG ulMaxVddc;
4703 ULONG ulMinVddc;
4704 ULONG ulLoadLineSlop;
4705 ULONG ulLeakageTemp;
4706 ULONG ulLeakageVoltage;
4707 ULONG ulCACmEncodeRange;
4708 ULONG ulCACmEncodeAverage;
4709 ULONG ulCACbEncodeRange;
4710 ULONG ulCACbEncodeAverage;
4711 ULONG ulKt_bEncodeRange;
4712 ULONG ulKt_bEncodeAverage;
4713 ULONG ulKv_mEncodeRange;
4714 ULONG ulKv_mEncodeAverage;
4715 ULONG ulKv_bEncodeRange;
4716 ULONG ulKv_bEncodeAverage;
4717 ULONG ulLkgEncodeLn_MaxDivMin;
4718 ULONG ulLkgEncodeMin;
4719 ULONG ulEfuseLogisticAlpha;
4720 USHORT usPowerDpm0;
4721 USHORT usCurrentDpm0;
4722 USHORT usPowerDpm1;
4723 USHORT usCurrentDpm1;
4724 USHORT usPowerDpm2;
4725 USHORT usCurrentDpm2;
4726 USHORT usPowerDpm3;
4727 USHORT usCurrentDpm3;
4728 USHORT usPowerDpm4;
4729 USHORT usCurrentDpm4;
4730 USHORT usPowerDpm5;
4731 USHORT usCurrentDpm5;
4732 USHORT usPowerDpm6;
4733 USHORT usCurrentDpm6;
4734 USHORT usPowerDpm7;
4735 USHORT usCurrentDpm7;
4736}ATOM_ASIC_PROFILING_INFO_V3_1;
4737
4738
4640typedef struct _ATOM_POWER_SOURCE_OBJECT 4739typedef struct _ATOM_POWER_SOURCE_OBJECT
4641{ 4740{
4642 UCHAR ucPwrSrcId; // Power source 4741 UCHAR ucPwrSrcId; // Power source
@@ -5808,6 +5907,8 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO_V3
5808#define ATOM_S7_DOS_MODE_PIXEL_DEPTHb0 0x0C 5907#define ATOM_S7_DOS_MODE_PIXEL_DEPTHb0 0x0C
5809#define ATOM_S7_DOS_MODE_PIXEL_FORMATb0 0xF0 5908#define ATOM_S7_DOS_MODE_PIXEL_FORMATb0 0xF0
5810#define ATOM_S7_DOS_8BIT_DAC_ENb1 0x01 5909#define ATOM_S7_DOS_8BIT_DAC_ENb1 0x01
5910#define ATOM_S7_ASIC_INIT_COMPLETEb1 0x02
5911#define ATOM_S7_ASIC_INIT_COMPLETE_MASK 0x00000200
5811#define ATOM_S7_DOS_MODE_NUMBERw1 0x0FFFF 5912#define ATOM_S7_DOS_MODE_NUMBERw1 0x0FFFF
5812 5913
5813#define ATOM_S7_DOS_8BIT_DAC_EN_SHIFT 8 5914#define ATOM_S7_DOS_8BIT_DAC_EN_SHIFT 8
@@ -6242,6 +6343,7 @@ typedef struct _ATOM_MC_INIT_PARAM_TABLE
6242#define _128Mx32 0x53 6343#define _128Mx32 0x53
6243#define _256Mx8 0x61 6344#define _256Mx8 0x61
6244#define _256Mx16 0x62 6345#define _256Mx16 0x62
6346#define _512Mx8 0x71
6245 6347
6246#define SAMSUNG 0x1 6348#define SAMSUNG 0x1
6247#define INFINEON 0x2 6349#define INFINEON 0x2
@@ -6987,9 +7089,10 @@ typedef struct _ATOM_DISP_OUT_INFO_V3
6987 UCHAR ucMaxDispEngineNum; 7089 UCHAR ucMaxDispEngineNum;
6988 UCHAR ucMaxActiveDispEngineNum; 7090 UCHAR ucMaxActiveDispEngineNum;
6989 UCHAR ucMaxPPLLNum; 7091 UCHAR ucMaxPPLLNum;
6990 UCHAR ucCoreRefClkSource; // value of CORE_REF_CLK_SOURCE 7092 UCHAR ucCoreRefClkSource; // value of CORE_REF_CLK_SOURCE
6991 UCHAR ucReserved[3]; 7093 UCHAR ucDispCaps;
6992 ASIC_TRANSMITTER_INFO_V2 asTransmitterInfo[1]; // for alligment only 7094 UCHAR ucReserved[2];
7095 ASIC_TRANSMITTER_INFO_V2 asTransmitterInfo[1]; // for alligment only
6993}ATOM_DISP_OUT_INFO_V3; 7096}ATOM_DISP_OUT_INFO_V3;
6994 7097
6995//ucDispCaps 7098//ucDispCaps
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index bf87f6d435f8..80a20120e625 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -1753,7 +1753,7 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc)
1753 if (pll != ATOM_PPLL_INVALID) 1753 if (pll != ATOM_PPLL_INVALID)
1754 return pll; 1754 return pll;
1755 } 1755 }
1756 } else { 1756 } else if (!ASIC_IS_DCE41(rdev)) { /* Don't share PLLs on DCE4.1 chips */
1757 /* use the same PPLL for all monitors with the same clock */ 1757 /* use the same PPLL for all monitors with the same clock */
1758 pll = radeon_get_shared_nondp_ppll(crtc); 1758 pll = radeon_get_shared_nondp_ppll(crtc);
1759 if (pll != ATOM_PPLL_INVALID) 1759 if (pll != ATOM_PPLL_INVALID)
@@ -1910,6 +1910,21 @@ static void atombios_crtc_disable(struct drm_crtc *crtc)
1910 int i; 1910 int i;
1911 1911
1912 atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); 1912 atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
1913 if (crtc->fb) {
1914 int r;
1915 struct radeon_framebuffer *radeon_fb;
1916 struct radeon_bo *rbo;
1917
1918 radeon_fb = to_radeon_framebuffer(crtc->fb);
1919 rbo = gem_to_radeon_bo(radeon_fb->obj);
1920 r = radeon_bo_reserve(rbo, false);
1921 if (unlikely(r))
1922 DRM_ERROR("failed to reserve rbo before unpin\n");
1923 else {
1924 radeon_bo_unpin(rbo);
1925 radeon_bo_unreserve(rbo);
1926 }
1927 }
1913 /* disable the GRPH */ 1928 /* disable the GRPH */
1914 if (ASIC_IS_DCE4(rdev)) 1929 if (ASIC_IS_DCE4(rdev))
1915 WREG32(EVERGREEN_GRPH_ENABLE + radeon_crtc->crtc_offset, 0); 1930 WREG32(EVERGREEN_GRPH_ENABLE + radeon_crtc->crtc_offset, 0);
@@ -1940,7 +1955,9 @@ static void atombios_crtc_disable(struct drm_crtc *crtc)
1940 break; 1955 break;
1941 case ATOM_PPLL0: 1956 case ATOM_PPLL0:
1942 /* disable the ppll */ 1957 /* disable the ppll */
1943 if ((rdev->family == CHIP_ARUBA) || (rdev->family == CHIP_BONAIRE)) 1958 if ((rdev->family == CHIP_ARUBA) ||
1959 (rdev->family == CHIP_BONAIRE) ||
1960 (rdev->family == CHIP_HAWAII))
1944 atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id, 1961 atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id,
1945 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss); 1962 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
1946 break; 1963 break;
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 00885417ffff..fb3ae07a1469 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -690,8 +690,7 @@ static int radeon_dp_link_train_init(struct radeon_dp_link_train_info *dp_info)
690 690
691 /* set the lane count on the sink */ 691 /* set the lane count on the sink */
692 tmp = dp_info->dp_lane_count; 692 tmp = dp_info->dp_lane_count;
693 if (dp_info->dpcd[DP_DPCD_REV] >= 0x11 && 693 if (drm_dp_enhanced_frame_cap(dp_info->dpcd))
694 dp_info->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)
695 tmp |= DP_LANE_COUNT_ENHANCED_FRAME_EN; 694 tmp |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
696 radeon_write_dpcd_reg(dp_info->radeon_connector, DP_LANE_COUNT_SET, tmp); 695 radeon_write_dpcd_reg(dp_info->radeon_connector, DP_LANE_COUNT_SET, tmp);
697 696
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index 5e891b226acf..a42d61571f49 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -213,7 +213,7 @@ void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder,
213 props.type = BACKLIGHT_RAW; 213 props.type = BACKLIGHT_RAW;
214 snprintf(bl_name, sizeof(bl_name), 214 snprintf(bl_name, sizeof(bl_name),
215 "radeon_bl%d", dev->primary->index); 215 "radeon_bl%d", dev->primary->index);
216 bd = backlight_device_register(bl_name, &drm_connector->kdev, 216 bd = backlight_device_register(bl_name, drm_connector->kdev,
217 pdata, &radeon_atom_backlight_ops, &props); 217 pdata, &radeon_atom_backlight_ops, &props);
218 if (IS_ERR(bd)) { 218 if (IS_ERR(bd)) {
219 DRM_ERROR("Backlight registration failed\n"); 219 DRM_ERROR("Backlight registration failed\n");
@@ -1662,19 +1662,11 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
1662 atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0); 1662 atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0);
1663 /* enable the transmitter */ 1663 /* enable the transmitter */
1664 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0); 1664 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
1665 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
1666 } else { 1665 } else {
1667 /* setup and enable the encoder and transmitter */ 1666 /* setup and enable the encoder and transmitter */
1668 atombios_dig_encoder_setup(encoder, ATOM_ENABLE, 0); 1667 atombios_dig_encoder_setup(encoder, ATOM_ENABLE, 0);
1669 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0); 1668 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0);
1670 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0); 1669 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
1671 /* some dce3.x boards have a bug in their transmitter control table.
1672 * ACTION_ENABLE_OUTPUT can probably be dropped since ACTION_ENABLE
1673 * does the same thing and more.
1674 */
1675 if ((rdev->family != CHIP_RV710) && (rdev->family != CHIP_RV730) &&
1676 (rdev->family != CHIP_RS780) && (rdev->family != CHIP_RS880))
1677 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
1678 } 1670 }
1679 if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) { 1671 if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) {
1680 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { 1672 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
@@ -1692,16 +1684,11 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
1692 case DRM_MODE_DPMS_STANDBY: 1684 case DRM_MODE_DPMS_STANDBY:
1693 case DRM_MODE_DPMS_SUSPEND: 1685 case DRM_MODE_DPMS_SUSPEND:
1694 case DRM_MODE_DPMS_OFF: 1686 case DRM_MODE_DPMS_OFF:
1695 if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) { 1687 if (ASIC_IS_DCE4(rdev)) {
1696 /* disable the transmitter */
1697 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
1698 } else if (ASIC_IS_DCE4(rdev)) {
1699 /* disable the transmitter */ 1688 /* disable the transmitter */
1700 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0);
1701 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0); 1689 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
1702 } else { 1690 } else {
1703 /* disable the encoder and transmitter */ 1691 /* disable the encoder and transmitter */
1704 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0);
1705 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0); 1692 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
1706 atombios_dig_encoder_setup(encoder, ATOM_DISABLE, 0); 1693 atombios_dig_encoder_setup(encoder, ATOM_DISABLE, 0);
1707 } 1694 }
@@ -2410,6 +2397,15 @@ static void radeon_atom_encoder_prepare(struct drm_encoder *encoder)
2410 2397
2411 /* this is needed for the pll/ss setup to work correctly in some cases */ 2398 /* this is needed for the pll/ss setup to work correctly in some cases */
2412 atombios_set_encoder_crtc_source(encoder); 2399 atombios_set_encoder_crtc_source(encoder);
2400 /* set up the FMT blocks */
2401 if (ASIC_IS_DCE8(rdev))
2402 dce8_program_fmt(encoder);
2403 else if (ASIC_IS_DCE4(rdev))
2404 dce4_program_fmt(encoder);
2405 else if (ASIC_IS_DCE3(rdev))
2406 dce3_program_fmt(encoder);
2407 else if (ASIC_IS_AVIVO(rdev))
2408 avivo_program_fmt(encoder);
2413} 2409}
2414 2410
2415static void radeon_atom_encoder_commit(struct drm_encoder *encoder) 2411static void radeon_atom_encoder_commit(struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
index 51e947a97edf..1ed479976358 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.c
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
@@ -40,6 +40,20 @@
40#define VOLTAGE_VID_OFFSET_SCALE1 625 40#define VOLTAGE_VID_OFFSET_SCALE1 625
41#define VOLTAGE_VID_OFFSET_SCALE2 100 41#define VOLTAGE_VID_OFFSET_SCALE2 100
42 42
43static const struct ci_pt_defaults defaults_hawaii_xt =
44{
45 1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0xB0000,
46 { 0x84, 0x0, 0x0, 0x7F, 0x0, 0x0, 0x5A, 0x60, 0x51, 0x8E, 0x79, 0x6B, 0x5F, 0x90, 0x79 },
47 { 0x1EA, 0x1EA, 0x1EA, 0x224, 0x224, 0x224, 0x24F, 0x24F, 0x24F, 0x28E, 0x28E, 0x28E, 0x2BC, 0x2BC, 0x2BC }
48};
49
50static const struct ci_pt_defaults defaults_hawaii_pro =
51{
52 1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0x65062,
53 { 0x93, 0x0, 0x0, 0x97, 0x0, 0x0, 0x6B, 0x60, 0x51, 0x95, 0x79, 0x6B, 0x5F, 0x90, 0x79 },
54 { 0x1EA, 0x1EA, 0x1EA, 0x224, 0x224, 0x224, 0x24F, 0x24F, 0x24F, 0x28E, 0x28E, 0x28E, 0x2BC, 0x2BC, 0x2BC }
55};
56
43static const struct ci_pt_defaults defaults_bonaire_xt = 57static const struct ci_pt_defaults defaults_bonaire_xt =
44{ 58{
45 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000, 59 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
@@ -187,22 +201,38 @@ static void ci_initialize_powertune_defaults(struct radeon_device *rdev)
187 struct ci_power_info *pi = ci_get_pi(rdev); 201 struct ci_power_info *pi = ci_get_pi(rdev);
188 202
189 switch (rdev->pdev->device) { 203 switch (rdev->pdev->device) {
190 case 0x6650: 204 case 0x6650:
191 case 0x6658: 205 case 0x6658:
192 case 0x665C: 206 case 0x665C:
193 default: 207 default:
194 pi->powertune_defaults = &defaults_bonaire_xt; 208 pi->powertune_defaults = &defaults_bonaire_xt;
195 break; 209 break;
196 case 0x6651: 210 case 0x6651:
197 case 0x665D: 211 case 0x665D:
198 pi->powertune_defaults = &defaults_bonaire_pro; 212 pi->powertune_defaults = &defaults_bonaire_pro;
199 break; 213 break;
200 case 0x6640: 214 case 0x6640:
201 pi->powertune_defaults = &defaults_saturn_xt; 215 pi->powertune_defaults = &defaults_saturn_xt;
202 break; 216 break;
203 case 0x6641: 217 case 0x6641:
204 pi->powertune_defaults = &defaults_saturn_pro; 218 pi->powertune_defaults = &defaults_saturn_pro;
205 break; 219 break;
220 case 0x67B8:
221 case 0x67B0:
222 case 0x67A0:
223 case 0x67A1:
224 case 0x67A2:
225 case 0x67A8:
226 case 0x67A9:
227 case 0x67AA:
228 case 0x67B9:
229 case 0x67BE:
230 pi->powertune_defaults = &defaults_hawaii_xt;
231 break;
232 case 0x67BA:
233 case 0x67B1:
234 pi->powertune_defaults = &defaults_hawaii_pro;
235 break;
206 } 236 }
207 237
208 pi->dte_tj_offset = 0; 238 pi->dte_tj_offset = 0;
@@ -5142,9 +5172,15 @@ int ci_dpm_init(struct radeon_device *rdev)
5142 rdev->pm.dpm.dyn_state.valid_mclk_values.count = 0; 5172 rdev->pm.dpm.dyn_state.valid_mclk_values.count = 0;
5143 rdev->pm.dpm.dyn_state.valid_mclk_values.values = NULL; 5173 rdev->pm.dpm.dyn_state.valid_mclk_values.values = NULL;
5144 5174
5145 pi->thermal_temp_setting.temperature_low = 99500; 5175 if (rdev->family == CHIP_HAWAII) {
5146 pi->thermal_temp_setting.temperature_high = 100000; 5176 pi->thermal_temp_setting.temperature_low = 94500;
5147 pi->thermal_temp_setting.temperature_shutdown = 104000; 5177 pi->thermal_temp_setting.temperature_high = 95000;
5178 pi->thermal_temp_setting.temperature_shutdown = 104000;
5179 } else {
5180 pi->thermal_temp_setting.temperature_low = 99500;
5181 pi->thermal_temp_setting.temperature_high = 100000;
5182 pi->thermal_temp_setting.temperature_shutdown = 104000;
5183 }
5148 5184
5149 pi->uvd_enabled = false; 5185 pi->uvd_enabled = false;
5150 5186
diff --git a/drivers/gpu/drm/radeon/ci_smc.c b/drivers/gpu/drm/radeon/ci_smc.c
index 252e10a41cf5..9c745dd22438 100644
--- a/drivers/gpu/drm/radeon/ci_smc.c
+++ b/drivers/gpu/drm/radeon/ci_smc.c
@@ -217,6 +217,10 @@ int ci_load_smc_ucode(struct radeon_device *rdev, u32 limit)
217 ucode_start_address = BONAIRE_SMC_UCODE_START; 217 ucode_start_address = BONAIRE_SMC_UCODE_START;
218 ucode_size = BONAIRE_SMC_UCODE_SIZE; 218 ucode_size = BONAIRE_SMC_UCODE_SIZE;
219 break; 219 break;
220 case CHIP_HAWAII:
221 ucode_start_address = HAWAII_SMC_UCODE_START;
222 ucode_size = HAWAII_SMC_UCODE_SIZE;
223 break;
220 default: 224 default:
221 DRM_ERROR("unknown asic in smc ucode loader\n"); 225 DRM_ERROR("unknown asic in smc ucode loader\n");
222 BUG(); 226 BUG();
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index 9cd2bc989ac7..ae92aa041c6a 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -41,6 +41,14 @@ MODULE_FIRMWARE("radeon/BONAIRE_mc.bin");
41MODULE_FIRMWARE("radeon/BONAIRE_rlc.bin"); 41MODULE_FIRMWARE("radeon/BONAIRE_rlc.bin");
42MODULE_FIRMWARE("radeon/BONAIRE_sdma.bin"); 42MODULE_FIRMWARE("radeon/BONAIRE_sdma.bin");
43MODULE_FIRMWARE("radeon/BONAIRE_smc.bin"); 43MODULE_FIRMWARE("radeon/BONAIRE_smc.bin");
44MODULE_FIRMWARE("radeon/HAWAII_pfp.bin");
45MODULE_FIRMWARE("radeon/HAWAII_me.bin");
46MODULE_FIRMWARE("radeon/HAWAII_ce.bin");
47MODULE_FIRMWARE("radeon/HAWAII_mec.bin");
48MODULE_FIRMWARE("radeon/HAWAII_mc.bin");
49MODULE_FIRMWARE("radeon/HAWAII_rlc.bin");
50MODULE_FIRMWARE("radeon/HAWAII_sdma.bin");
51MODULE_FIRMWARE("radeon/HAWAII_smc.bin");
44MODULE_FIRMWARE("radeon/KAVERI_pfp.bin"); 52MODULE_FIRMWARE("radeon/KAVERI_pfp.bin");
45MODULE_FIRMWARE("radeon/KAVERI_me.bin"); 53MODULE_FIRMWARE("radeon/KAVERI_me.bin");
46MODULE_FIRMWARE("radeon/KAVERI_ce.bin"); 54MODULE_FIRMWARE("radeon/KAVERI_ce.bin");
@@ -67,11 +75,6 @@ extern void si_init_uvd_internal_cg(struct radeon_device *rdev);
67extern int cik_sdma_resume(struct radeon_device *rdev); 75extern int cik_sdma_resume(struct radeon_device *rdev);
68extern void cik_sdma_enable(struct radeon_device *rdev, bool enable); 76extern void cik_sdma_enable(struct radeon_device *rdev, bool enable);
69extern void cik_sdma_fini(struct radeon_device *rdev); 77extern void cik_sdma_fini(struct radeon_device *rdev);
70extern void cik_sdma_vm_set_page(struct radeon_device *rdev,
71 struct radeon_ib *ib,
72 uint64_t pe,
73 uint64_t addr, unsigned count,
74 uint32_t incr, uint32_t flags);
75static void cik_rlc_stop(struct radeon_device *rdev); 78static void cik_rlc_stop(struct radeon_device *rdev);
76static void cik_pcie_gen3_enable(struct radeon_device *rdev); 79static void cik_pcie_gen3_enable(struct radeon_device *rdev);
77static void cik_program_aspm(struct radeon_device *rdev); 80static void cik_program_aspm(struct radeon_device *rdev);
@@ -1302,6 +1305,171 @@ static const u32 kalindi_mgcg_cgcg_init[] =
1302 0xd80c, 0xff000ff0, 0x00000100 1305 0xd80c, 0xff000ff0, 0x00000100
1303}; 1306};
1304 1307
1308static const u32 hawaii_golden_spm_registers[] =
1309{
1310 0x30800, 0xe0ffffff, 0xe0000000
1311};
1312
1313static const u32 hawaii_golden_common_registers[] =
1314{
1315 0x30800, 0xffffffff, 0xe0000000,
1316 0x28350, 0xffffffff, 0x3a00161a,
1317 0x28354, 0xffffffff, 0x0000002e,
1318 0x9a10, 0xffffffff, 0x00018208,
1319 0x98f8, 0xffffffff, 0x12011003
1320};
1321
1322static const u32 hawaii_golden_registers[] =
1323{
1324 0x3354, 0x00000333, 0x00000333,
1325 0x9a10, 0x00010000, 0x00058208,
1326 0x9830, 0xffffffff, 0x00000000,
1327 0x9834, 0xf00fffff, 0x00000400,
1328 0x9838, 0x0002021c, 0x00020200,
1329 0xc78, 0x00000080, 0x00000000,
1330 0x5bb0, 0x000000f0, 0x00000070,
1331 0x5bc0, 0xf0311fff, 0x80300000,
1332 0x350c, 0x00810000, 0x408af000,
1333 0x7030, 0x31000111, 0x00000011,
1334 0x2f48, 0x73773777, 0x12010001,
1335 0x2120, 0x0000007f, 0x0000001b,
1336 0x21dc, 0x00007fb6, 0x00002191,
1337 0x3628, 0x0000003f, 0x0000000a,
1338 0x362c, 0x0000003f, 0x0000000a,
1339 0x2ae4, 0x00073ffe, 0x000022a2,
1340 0x240c, 0x000007ff, 0x00000000,
1341 0x8bf0, 0x00002001, 0x00000001,
1342 0x8b24, 0xffffffff, 0x00ffffff,
1343 0x30a04, 0x0000ff0f, 0x00000000,
1344 0x28a4c, 0x07ffffff, 0x06000000,
1345 0x3e78, 0x00000001, 0x00000002,
1346 0xc768, 0x00000008, 0x00000008,
1347 0xc770, 0x00000f00, 0x00000800,
1348 0xc774, 0x00000f00, 0x00000800,
1349 0xc798, 0x00ffffff, 0x00ff7fbf,
1350 0xc79c, 0x00ffffff, 0x00ff7faf,
1351 0x8c00, 0x000000ff, 0x00000800,
1352 0xe40, 0x00001fff, 0x00001fff,
1353 0x9060, 0x0000007f, 0x00000020,
1354 0x9508, 0x00010000, 0x00010000,
1355 0xae00, 0x00100000, 0x000ff07c,
1356 0xac14, 0x000003ff, 0x0000000f,
1357 0xac10, 0xffffffff, 0x7564fdec,
1358 0xac0c, 0xffffffff, 0x3120b9a8,
1359 0xac08, 0x20000000, 0x0f9c0000
1360};
1361
1362static const u32 hawaii_mgcg_cgcg_init[] =
1363{
1364 0xc420, 0xffffffff, 0xfffffffd,
1365 0x30800, 0xffffffff, 0xe0000000,
1366 0x3c2a0, 0xffffffff, 0x00000100,
1367 0x3c208, 0xffffffff, 0x00000100,
1368 0x3c2c0, 0xffffffff, 0x00000100,
1369 0x3c2c8, 0xffffffff, 0x00000100,
1370 0x3c2c4, 0xffffffff, 0x00000100,
1371 0x55e4, 0xffffffff, 0x00200100,
1372 0x3c280, 0xffffffff, 0x00000100,
1373 0x3c214, 0xffffffff, 0x06000100,
1374 0x3c220, 0xffffffff, 0x00000100,
1375 0x3c218, 0xffffffff, 0x06000100,
1376 0x3c204, 0xffffffff, 0x00000100,
1377 0x3c2e0, 0xffffffff, 0x00000100,
1378 0x3c224, 0xffffffff, 0x00000100,
1379 0x3c200, 0xffffffff, 0x00000100,
1380 0x3c230, 0xffffffff, 0x00000100,
1381 0x3c234, 0xffffffff, 0x00000100,
1382 0x3c250, 0xffffffff, 0x00000100,
1383 0x3c254, 0xffffffff, 0x00000100,
1384 0x3c258, 0xffffffff, 0x00000100,
1385 0x3c25c, 0xffffffff, 0x00000100,
1386 0x3c260, 0xffffffff, 0x00000100,
1387 0x3c27c, 0xffffffff, 0x00000100,
1388 0x3c278, 0xffffffff, 0x00000100,
1389 0x3c210, 0xffffffff, 0x06000100,
1390 0x3c290, 0xffffffff, 0x00000100,
1391 0x3c274, 0xffffffff, 0x00000100,
1392 0x3c2b4, 0xffffffff, 0x00000100,
1393 0x3c2b0, 0xffffffff, 0x00000100,
1394 0x3c270, 0xffffffff, 0x00000100,
1395 0x30800, 0xffffffff, 0xe0000000,
1396 0x3c020, 0xffffffff, 0x00010000,
1397 0x3c024, 0xffffffff, 0x00030002,
1398 0x3c028, 0xffffffff, 0x00040007,
1399 0x3c02c, 0xffffffff, 0x00060005,
1400 0x3c030, 0xffffffff, 0x00090008,
1401 0x3c034, 0xffffffff, 0x00010000,
1402 0x3c038, 0xffffffff, 0x00030002,
1403 0x3c03c, 0xffffffff, 0x00040007,
1404 0x3c040, 0xffffffff, 0x00060005,
1405 0x3c044, 0xffffffff, 0x00090008,
1406 0x3c048, 0xffffffff, 0x00010000,
1407 0x3c04c, 0xffffffff, 0x00030002,
1408 0x3c050, 0xffffffff, 0x00040007,
1409 0x3c054, 0xffffffff, 0x00060005,
1410 0x3c058, 0xffffffff, 0x00090008,
1411 0x3c05c, 0xffffffff, 0x00010000,
1412 0x3c060, 0xffffffff, 0x00030002,
1413 0x3c064, 0xffffffff, 0x00040007,
1414 0x3c068, 0xffffffff, 0x00060005,
1415 0x3c06c, 0xffffffff, 0x00090008,
1416 0x3c070, 0xffffffff, 0x00010000,
1417 0x3c074, 0xffffffff, 0x00030002,
1418 0x3c078, 0xffffffff, 0x00040007,
1419 0x3c07c, 0xffffffff, 0x00060005,
1420 0x3c080, 0xffffffff, 0x00090008,
1421 0x3c084, 0xffffffff, 0x00010000,
1422 0x3c088, 0xffffffff, 0x00030002,
1423 0x3c08c, 0xffffffff, 0x00040007,
1424 0x3c090, 0xffffffff, 0x00060005,
1425 0x3c094, 0xffffffff, 0x00090008,
1426 0x3c098, 0xffffffff, 0x00010000,
1427 0x3c09c, 0xffffffff, 0x00030002,
1428 0x3c0a0, 0xffffffff, 0x00040007,
1429 0x3c0a4, 0xffffffff, 0x00060005,
1430 0x3c0a8, 0xffffffff, 0x00090008,
1431 0x3c0ac, 0xffffffff, 0x00010000,
1432 0x3c0b0, 0xffffffff, 0x00030002,
1433 0x3c0b4, 0xffffffff, 0x00040007,
1434 0x3c0b8, 0xffffffff, 0x00060005,
1435 0x3c0bc, 0xffffffff, 0x00090008,
1436 0x3c0c0, 0xffffffff, 0x00010000,
1437 0x3c0c4, 0xffffffff, 0x00030002,
1438 0x3c0c8, 0xffffffff, 0x00040007,
1439 0x3c0cc, 0xffffffff, 0x00060005,
1440 0x3c0d0, 0xffffffff, 0x00090008,
1441 0x3c0d4, 0xffffffff, 0x00010000,
1442 0x3c0d8, 0xffffffff, 0x00030002,
1443 0x3c0dc, 0xffffffff, 0x00040007,
1444 0x3c0e0, 0xffffffff, 0x00060005,
1445 0x3c0e4, 0xffffffff, 0x00090008,
1446 0x3c0e8, 0xffffffff, 0x00010000,
1447 0x3c0ec, 0xffffffff, 0x00030002,
1448 0x3c0f0, 0xffffffff, 0x00040007,
1449 0x3c0f4, 0xffffffff, 0x00060005,
1450 0x3c0f8, 0xffffffff, 0x00090008,
1451 0xc318, 0xffffffff, 0x00020200,
1452 0x3350, 0xffffffff, 0x00000200,
1453 0x15c0, 0xffffffff, 0x00000400,
1454 0x55e8, 0xffffffff, 0x00000000,
1455 0x2f50, 0xffffffff, 0x00000902,
1456 0x3c000, 0xffffffff, 0x96940200,
1457 0x8708, 0xffffffff, 0x00900100,
1458 0xc424, 0xffffffff, 0x0020003f,
1459 0x38, 0xffffffff, 0x0140001c,
1460 0x3c, 0x000f0000, 0x000f0000,
1461 0x220, 0xffffffff, 0xc060000c,
1462 0x224, 0xc0000fff, 0x00000100,
1463 0xf90, 0xffffffff, 0x00000100,
1464 0xf98, 0x00000101, 0x00000000,
1465 0x20a8, 0xffffffff, 0x00000104,
1466 0x55e4, 0xff000fff, 0x00000100,
1467 0x30cc, 0xc0000fff, 0x00000104,
1468 0xc1e4, 0x00000001, 0x00000001,
1469 0xd00c, 0xff000ff0, 0x00000100,
1470 0xd80c, 0xff000ff0, 0x00000100
1471};
1472
1305static void cik_init_golden_registers(struct radeon_device *rdev) 1473static void cik_init_golden_registers(struct radeon_device *rdev)
1306{ 1474{
1307 switch (rdev->family) { 1475 switch (rdev->family) {
@@ -1347,6 +1515,20 @@ static void cik_init_golden_registers(struct radeon_device *rdev)
1347 spectre_golden_spm_registers, 1515 spectre_golden_spm_registers,
1348 (const u32)ARRAY_SIZE(spectre_golden_spm_registers)); 1516 (const u32)ARRAY_SIZE(spectre_golden_spm_registers));
1349 break; 1517 break;
1518 case CHIP_HAWAII:
1519 radeon_program_register_sequence(rdev,
1520 hawaii_mgcg_cgcg_init,
1521 (const u32)ARRAY_SIZE(hawaii_mgcg_cgcg_init));
1522 radeon_program_register_sequence(rdev,
1523 hawaii_golden_registers,
1524 (const u32)ARRAY_SIZE(hawaii_golden_registers));
1525 radeon_program_register_sequence(rdev,
1526 hawaii_golden_common_registers,
1527 (const u32)ARRAY_SIZE(hawaii_golden_common_registers));
1528 radeon_program_register_sequence(rdev,
1529 hawaii_golden_spm_registers,
1530 (const u32)ARRAY_SIZE(hawaii_golden_spm_registers));
1531 break;
1350 default: 1532 default:
1351 break; 1533 break;
1352 } 1534 }
@@ -1454,6 +1636,35 @@ static const u32 bonaire_io_mc_regs[BONAIRE_IO_MC_REGS_SIZE][2] =
1454 {0x0000009f, 0x00b48000} 1636 {0x0000009f, 0x00b48000}
1455}; 1637};
1456 1638
1639#define HAWAII_IO_MC_REGS_SIZE 22
1640
1641static const u32 hawaii_io_mc_regs[HAWAII_IO_MC_REGS_SIZE][2] =
1642{
1643 {0x0000007d, 0x40000000},
1644 {0x0000007e, 0x40180304},
1645 {0x0000007f, 0x0000ff00},
1646 {0x00000081, 0x00000000},
1647 {0x00000083, 0x00000800},
1648 {0x00000086, 0x00000000},
1649 {0x00000087, 0x00000100},
1650 {0x00000088, 0x00020100},
1651 {0x00000089, 0x00000000},
1652 {0x0000008b, 0x00040000},
1653 {0x0000008c, 0x00000100},
1654 {0x0000008e, 0xff010000},
1655 {0x00000090, 0xffffefff},
1656 {0x00000091, 0xfff3efff},
1657 {0x00000092, 0xfff3efbf},
1658 {0x00000093, 0xf7ffffff},
1659 {0x00000094, 0xffffff7f},
1660 {0x00000095, 0x00000fff},
1661 {0x00000096, 0x00116fff},
1662 {0x00000097, 0x60010000},
1663 {0x00000098, 0x10010000},
1664 {0x0000009f, 0x00c79000}
1665};
1666
1667
1457/** 1668/**
1458 * cik_srbm_select - select specific register instances 1669 * cik_srbm_select - select specific register instances
1459 * 1670 *
@@ -1498,11 +1709,17 @@ static int ci_mc_load_microcode(struct radeon_device *rdev)
1498 1709
1499 switch (rdev->family) { 1710 switch (rdev->family) {
1500 case CHIP_BONAIRE: 1711 case CHIP_BONAIRE:
1501 default:
1502 io_mc_regs = (u32 *)&bonaire_io_mc_regs; 1712 io_mc_regs = (u32 *)&bonaire_io_mc_regs;
1503 ucode_size = CIK_MC_UCODE_SIZE; 1713 ucode_size = CIK_MC_UCODE_SIZE;
1504 regs_size = BONAIRE_IO_MC_REGS_SIZE; 1714 regs_size = BONAIRE_IO_MC_REGS_SIZE;
1505 break; 1715 break;
1716 case CHIP_HAWAII:
1717 io_mc_regs = (u32 *)&hawaii_io_mc_regs;
1718 ucode_size = HAWAII_MC_UCODE_SIZE;
1719 regs_size = HAWAII_IO_MC_REGS_SIZE;
1720 break;
1721 default:
1722 return -EINVAL;
1506 } 1723 }
1507 1724
1508 running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK; 1725 running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK;
@@ -1564,8 +1781,8 @@ static int cik_init_microcode(struct radeon_device *rdev)
1564{ 1781{
1565 const char *chip_name; 1782 const char *chip_name;
1566 size_t pfp_req_size, me_req_size, ce_req_size, 1783 size_t pfp_req_size, me_req_size, ce_req_size,
1567 mec_req_size, rlc_req_size, mc_req_size, 1784 mec_req_size, rlc_req_size, mc_req_size = 0,
1568 sdma_req_size, smc_req_size; 1785 sdma_req_size, smc_req_size = 0;
1569 char fw_name[30]; 1786 char fw_name[30];
1570 int err; 1787 int err;
1571 1788
@@ -1583,6 +1800,17 @@ static int cik_init_microcode(struct radeon_device *rdev)
1583 sdma_req_size = CIK_SDMA_UCODE_SIZE * 4; 1800 sdma_req_size = CIK_SDMA_UCODE_SIZE * 4;
1584 smc_req_size = ALIGN(BONAIRE_SMC_UCODE_SIZE, 4); 1801 smc_req_size = ALIGN(BONAIRE_SMC_UCODE_SIZE, 4);
1585 break; 1802 break;
1803 case CHIP_HAWAII:
1804 chip_name = "HAWAII";
1805 pfp_req_size = CIK_PFP_UCODE_SIZE * 4;
1806 me_req_size = CIK_ME_UCODE_SIZE * 4;
1807 ce_req_size = CIK_CE_UCODE_SIZE * 4;
1808 mec_req_size = CIK_MEC_UCODE_SIZE * 4;
1809 rlc_req_size = BONAIRE_RLC_UCODE_SIZE * 4;
1810 mc_req_size = HAWAII_MC_UCODE_SIZE * 4;
1811 sdma_req_size = CIK_SDMA_UCODE_SIZE * 4;
1812 smc_req_size = ALIGN(HAWAII_SMC_UCODE_SIZE, 4);
1813 break;
1586 case CHIP_KAVERI: 1814 case CHIP_KAVERI:
1587 chip_name = "KAVERI"; 1815 chip_name = "KAVERI";
1588 pfp_req_size = CIK_PFP_UCODE_SIZE * 4; 1816 pfp_req_size = CIK_PFP_UCODE_SIZE * 4;
@@ -1763,9 +1991,227 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev)
1763 1991
1764 num_pipe_configs = rdev->config.cik.max_tile_pipes; 1992 num_pipe_configs = rdev->config.cik.max_tile_pipes;
1765 if (num_pipe_configs > 8) 1993 if (num_pipe_configs > 8)
1766 num_pipe_configs = 8; /* ??? */ 1994 num_pipe_configs = 16;
1767 1995
1768 if (num_pipe_configs == 8) { 1996 if (num_pipe_configs == 16) {
1997 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
1998 switch (reg_offset) {
1999 case 0:
2000 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2001 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
2002 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2003 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B));
2004 break;
2005 case 1:
2006 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2007 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
2008 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2009 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B));
2010 break;
2011 case 2:
2012 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2013 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
2014 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2015 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
2016 break;
2017 case 3:
2018 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2019 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
2020 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2021 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B));
2022 break;
2023 case 4:
2024 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2025 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
2026 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2027 TILE_SPLIT(split_equal_to_row_size));
2028 break;
2029 case 5:
2030 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2031 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2032 break;
2033 case 6:
2034 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
2035 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
2036 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2037 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
2038 break;
2039 case 7:
2040 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
2041 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
2042 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2043 TILE_SPLIT(split_equal_to_row_size));
2044 break;
2045 case 8:
2046 gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
2047 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16));
2048 break;
2049 case 9:
2050 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2051 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
2052 break;
2053 case 10:
2054 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2055 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2056 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2057 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2058 break;
2059 case 11:
2060 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2061 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2062 PIPE_CONFIG(ADDR_SURF_P16_32x32_8x16) |
2063 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2064 break;
2065 case 12:
2066 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
2067 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2068 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2069 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2070 break;
2071 case 13:
2072 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2073 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
2074 break;
2075 case 14:
2076 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2077 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2078 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2079 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2080 break;
2081 case 16:
2082 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2083 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2084 PIPE_CONFIG(ADDR_SURF_P16_32x32_8x16) |
2085 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2086 break;
2087 case 17:
2088 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
2089 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2090 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2091 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2092 break;
2093 case 27:
2094 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2095 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
2096 break;
2097 case 28:
2098 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2099 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2100 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2101 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2102 break;
2103 case 29:
2104 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2105 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2106 PIPE_CONFIG(ADDR_SURF_P16_32x32_8x16) |
2107 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2108 break;
2109 case 30:
2110 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
2111 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2112 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2113 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2114 break;
2115 default:
2116 gb_tile_moden = 0;
2117 break;
2118 }
2119 rdev->config.cik.tile_mode_array[reg_offset] = gb_tile_moden;
2120 WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden);
2121 }
2122 for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) {
2123 switch (reg_offset) {
2124 case 0:
2125 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2126 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2127 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2128 NUM_BANKS(ADDR_SURF_16_BANK));
2129 break;
2130 case 1:
2131 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2132 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2133 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2134 NUM_BANKS(ADDR_SURF_16_BANK));
2135 break;
2136 case 2:
2137 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2138 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2139 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2140 NUM_BANKS(ADDR_SURF_16_BANK));
2141 break;
2142 case 3:
2143 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2144 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2145 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2146 NUM_BANKS(ADDR_SURF_16_BANK));
2147 break;
2148 case 4:
2149 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2150 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2151 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2152 NUM_BANKS(ADDR_SURF_8_BANK));
2153 break;
2154 case 5:
2155 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2156 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2157 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2158 NUM_BANKS(ADDR_SURF_4_BANK));
2159 break;
2160 case 6:
2161 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2162 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2163 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2164 NUM_BANKS(ADDR_SURF_2_BANK));
2165 break;
2166 case 8:
2167 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2168 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2169 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2170 NUM_BANKS(ADDR_SURF_16_BANK));
2171 break;
2172 case 9:
2173 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2174 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2175 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2176 NUM_BANKS(ADDR_SURF_16_BANK));
2177 break;
2178 case 10:
2179 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2180 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2181 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2182 NUM_BANKS(ADDR_SURF_16_BANK));
2183 break;
2184 case 11:
2185 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2186 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2187 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2188 NUM_BANKS(ADDR_SURF_8_BANK));
2189 break;
2190 case 12:
2191 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2192 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2193 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2194 NUM_BANKS(ADDR_SURF_4_BANK));
2195 break;
2196 case 13:
2197 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2198 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2199 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2200 NUM_BANKS(ADDR_SURF_2_BANK));
2201 break;
2202 case 14:
2203 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2204 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2205 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2206 NUM_BANKS(ADDR_SURF_2_BANK));
2207 break;
2208 default:
2209 gb_tile_moden = 0;
2210 break;
2211 }
2212 WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden);
2213 }
2214 } else if (num_pipe_configs == 8) {
1769 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) { 2215 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
1770 switch (reg_offset) { 2216 switch (reg_offset) {
1771 case 0: 2217 case 0:
@@ -2650,7 +3096,10 @@ static void cik_setup_rb(struct radeon_device *rdev,
2650 for (j = 0; j < sh_per_se; j++) { 3096 for (j = 0; j < sh_per_se; j++) {
2651 cik_select_se_sh(rdev, i, j); 3097 cik_select_se_sh(rdev, i, j);
2652 data = cik_get_rb_disabled(rdev, max_rb_num, se_num, sh_per_se); 3098 data = cik_get_rb_disabled(rdev, max_rb_num, se_num, sh_per_se);
2653 disabled_rbs |= data << ((i * sh_per_se + j) * CIK_RB_BITMAP_WIDTH_PER_SH); 3099 if (rdev->family == CHIP_HAWAII)
3100 disabled_rbs |= data << ((i * sh_per_se + j) * HAWAII_RB_BITMAP_WIDTH_PER_SH);
3101 else
3102 disabled_rbs |= data << ((i * sh_per_se + j) * CIK_RB_BITMAP_WIDTH_PER_SH);
2654 } 3103 }
2655 } 3104 }
2656 cik_select_se_sh(rdev, 0xffffffff, 0xffffffff); 3105 cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
@@ -2667,6 +3116,12 @@ static void cik_setup_rb(struct radeon_device *rdev,
2667 data = 0; 3116 data = 0;
2668 for (j = 0; j < sh_per_se; j++) { 3117 for (j = 0; j < sh_per_se; j++) {
2669 switch (enabled_rbs & 3) { 3118 switch (enabled_rbs & 3) {
3119 case 0:
3120 if (j == 0)
3121 data |= PKR_MAP(RASTER_CONFIG_RB_MAP_3);
3122 else
3123 data |= PKR_MAP(RASTER_CONFIG_RB_MAP_0);
3124 break;
2670 case 1: 3125 case 1:
2671 data |= (RASTER_CONFIG_RB_MAP_0 << (i * sh_per_se + j) * 2); 3126 data |= (RASTER_CONFIG_RB_MAP_0 << (i * sh_per_se + j) * 2);
2672 break; 3127 break;
@@ -2719,6 +3174,23 @@ static void cik_gpu_init(struct radeon_device *rdev)
2719 rdev->config.cik.sc_earlyz_tile_fifo_size = 0x130; 3174 rdev->config.cik.sc_earlyz_tile_fifo_size = 0x130;
2720 gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN; 3175 gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN;
2721 break; 3176 break;
3177 case CHIP_HAWAII:
3178 rdev->config.cik.max_shader_engines = 4;
3179 rdev->config.cik.max_tile_pipes = 16;
3180 rdev->config.cik.max_cu_per_sh = 11;
3181 rdev->config.cik.max_sh_per_se = 1;
3182 rdev->config.cik.max_backends_per_se = 4;
3183 rdev->config.cik.max_texture_channel_caches = 16;
3184 rdev->config.cik.max_gprs = 256;
3185 rdev->config.cik.max_gs_threads = 32;
3186 rdev->config.cik.max_hw_contexts = 8;
3187
3188 rdev->config.cik.sc_prim_fifo_size_frontend = 0x20;
3189 rdev->config.cik.sc_prim_fifo_size_backend = 0x100;
3190 rdev->config.cik.sc_hiz_tile_fifo_size = 0x30;
3191 rdev->config.cik.sc_earlyz_tile_fifo_size = 0x130;
3192 gb_addr_config = HAWAII_GB_ADDR_CONFIG_GOLDEN;
3193 break;
2722 case CHIP_KAVERI: 3194 case CHIP_KAVERI:
2723 rdev->config.cik.max_shader_engines = 1; 3195 rdev->config.cik.max_shader_engines = 1;
2724 rdev->config.cik.max_tile_pipes = 4; 3196 rdev->config.cik.max_tile_pipes = 4;
@@ -3097,6 +3569,85 @@ void cik_semaphore_ring_emit(struct radeon_device *rdev,
3097 radeon_ring_write(ring, (upper_32_bits(addr) & 0xffff) | sel); 3569 radeon_ring_write(ring, (upper_32_bits(addr) & 0xffff) | sel);
3098} 3570}
3099 3571
3572/**
3573 * cik_copy_cpdma - copy pages using the CP DMA engine
3574 *
3575 * @rdev: radeon_device pointer
3576 * @src_offset: src GPU address
3577 * @dst_offset: dst GPU address
3578 * @num_gpu_pages: number of GPU pages to xfer
3579 * @fence: radeon fence object
3580 *
3581 * Copy GPU paging using the CP DMA engine (CIK+).
3582 * Used by the radeon ttm implementation to move pages if
3583 * registered as the asic copy callback.
3584 */
3585int cik_copy_cpdma(struct radeon_device *rdev,
3586 uint64_t src_offset, uint64_t dst_offset,
3587 unsigned num_gpu_pages,
3588 struct radeon_fence **fence)
3589{
3590 struct radeon_semaphore *sem = NULL;
3591 int ring_index = rdev->asic->copy.blit_ring_index;
3592 struct radeon_ring *ring = &rdev->ring[ring_index];
3593 u32 size_in_bytes, cur_size_in_bytes, control;
3594 int i, num_loops;
3595 int r = 0;
3596
3597 r = radeon_semaphore_create(rdev, &sem);
3598 if (r) {
3599 DRM_ERROR("radeon: moving bo (%d).\n", r);
3600 return r;
3601 }
3602
3603 size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
3604 num_loops = DIV_ROUND_UP(size_in_bytes, 0x1fffff);
3605 r = radeon_ring_lock(rdev, ring, num_loops * 7 + 18);
3606 if (r) {
3607 DRM_ERROR("radeon: moving bo (%d).\n", r);
3608 radeon_semaphore_free(rdev, &sem, NULL);
3609 return r;
3610 }
3611
3612 if (radeon_fence_need_sync(*fence, ring->idx)) {
3613 radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
3614 ring->idx);
3615 radeon_fence_note_sync(*fence, ring->idx);
3616 } else {
3617 radeon_semaphore_free(rdev, &sem, NULL);
3618 }
3619
3620 for (i = 0; i < num_loops; i++) {
3621 cur_size_in_bytes = size_in_bytes;
3622 if (cur_size_in_bytes > 0x1fffff)
3623 cur_size_in_bytes = 0x1fffff;
3624 size_in_bytes -= cur_size_in_bytes;
3625 control = 0;
3626 if (size_in_bytes == 0)
3627 control |= PACKET3_DMA_DATA_CP_SYNC;
3628 radeon_ring_write(ring, PACKET3(PACKET3_DMA_DATA, 5));
3629 radeon_ring_write(ring, control);
3630 radeon_ring_write(ring, lower_32_bits(src_offset));
3631 radeon_ring_write(ring, upper_32_bits(src_offset));
3632 radeon_ring_write(ring, lower_32_bits(dst_offset));
3633 radeon_ring_write(ring, upper_32_bits(dst_offset));
3634 radeon_ring_write(ring, cur_size_in_bytes);
3635 src_offset += cur_size_in_bytes;
3636 dst_offset += cur_size_in_bytes;
3637 }
3638
3639 r = radeon_fence_emit(rdev, fence, ring->idx);
3640 if (r) {
3641 radeon_ring_unlock_undo(rdev, ring);
3642 return r;
3643 }
3644
3645 radeon_ring_unlock_commit(rdev, ring);
3646 radeon_semaphore_free(rdev, &sem, *fence);
3647
3648 return r;
3649}
3650
3100/* 3651/*
3101 * IB stuff 3652 * IB stuff
3102 */ 3653 */
@@ -3403,7 +3954,8 @@ static int cik_cp_gfx_resume(struct radeon_device *rdev)
3403 int r; 3954 int r;
3404 3955
3405 WREG32(CP_SEM_WAIT_TIMER, 0x0); 3956 WREG32(CP_SEM_WAIT_TIMER, 0x0);
3406 WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0); 3957 if (rdev->family != CHIP_HAWAII)
3958 WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0);
3407 3959
3408 /* Set the write pointer delay */ 3960 /* Set the write pointer delay */
3409 WREG32(CP_RB_WPTR_DELAY, 0); 3961 WREG32(CP_RB_WPTR_DELAY, 0);
@@ -4740,12 +5292,17 @@ void cik_vm_fini(struct radeon_device *rdev)
4740static void cik_vm_decode_fault(struct radeon_device *rdev, 5292static void cik_vm_decode_fault(struct radeon_device *rdev,
4741 u32 status, u32 addr, u32 mc_client) 5293 u32 status, u32 addr, u32 mc_client)
4742{ 5294{
4743 u32 mc_id = (status & MEMORY_CLIENT_ID_MASK) >> MEMORY_CLIENT_ID_SHIFT; 5295 u32 mc_id;
4744 u32 vmid = (status & FAULT_VMID_MASK) >> FAULT_VMID_SHIFT; 5296 u32 vmid = (status & FAULT_VMID_MASK) >> FAULT_VMID_SHIFT;
4745 u32 protections = (status & PROTECTIONS_MASK) >> PROTECTIONS_SHIFT; 5297 u32 protections = (status & PROTECTIONS_MASK) >> PROTECTIONS_SHIFT;
4746 char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff, 5298 char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff,
4747 (mc_client >> 8) & 0xff, mc_client & 0xff, 0 }; 5299 (mc_client >> 8) & 0xff, mc_client & 0xff, 0 };
4748 5300
5301 if (rdev->family == CHIP_HAWAII)
5302 mc_id = (status & HAWAII_MEMORY_CLIENT_ID_MASK) >> MEMORY_CLIENT_ID_SHIFT;
5303 else
5304 mc_id = (status & MEMORY_CLIENT_ID_MASK) >> MEMORY_CLIENT_ID_SHIFT;
5305
4749 printk("VM fault (0x%02x, vmid %d) at page %u, %s from '%s' (0x%08x) (%d)\n", 5306 printk("VM fault (0x%02x, vmid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",
4750 protections, vmid, addr, 5307 protections, vmid, addr,
4751 (status & MEMORY_CLIENT_RW_MASK) ? "write" : "read", 5308 (status & MEMORY_CLIENT_RW_MASK) ? "write" : "read",
@@ -4834,62 +5391,6 @@ void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
4834 } 5391 }
4835} 5392}
4836 5393
4837/**
4838 * cik_vm_set_page - update the page tables using sDMA
4839 *
4840 * @rdev: radeon_device pointer
4841 * @ib: indirect buffer to fill with commands
4842 * @pe: addr of the page entry
4843 * @addr: dst addr to write into pe
4844 * @count: number of page entries to update
4845 * @incr: increase next addr by incr bytes
4846 * @flags: access flags
4847 *
4848 * Update the page tables using CP or sDMA (CIK).
4849 */
4850void cik_vm_set_page(struct radeon_device *rdev,
4851 struct radeon_ib *ib,
4852 uint64_t pe,
4853 uint64_t addr, unsigned count,
4854 uint32_t incr, uint32_t flags)
4855{
4856 uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
4857 uint64_t value;
4858 unsigned ndw;
4859
4860 if (rdev->asic->vm.pt_ring_index == RADEON_RING_TYPE_GFX_INDEX) {
4861 /* CP */
4862 while (count) {
4863 ndw = 2 + count * 2;
4864 if (ndw > 0x3FFE)
4865 ndw = 0x3FFE;
4866
4867 ib->ptr[ib->length_dw++] = PACKET3(PACKET3_WRITE_DATA, ndw);
4868 ib->ptr[ib->length_dw++] = (WRITE_DATA_ENGINE_SEL(0) |
4869 WRITE_DATA_DST_SEL(1));
4870 ib->ptr[ib->length_dw++] = pe;
4871 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
4872 for (; ndw > 2; ndw -= 2, --count, pe += 8) {
4873 if (flags & RADEON_VM_PAGE_SYSTEM) {
4874 value = radeon_vm_map_gart(rdev, addr);
4875 value &= 0xFFFFFFFFFFFFF000ULL;
4876 } else if (flags & RADEON_VM_PAGE_VALID) {
4877 value = addr;
4878 } else {
4879 value = 0;
4880 }
4881 addr += incr;
4882 value |= r600_flags;
4883 ib->ptr[ib->length_dw++] = value;
4884 ib->ptr[ib->length_dw++] = upper_32_bits(value);
4885 }
4886 }
4887 } else {
4888 /* DMA */
4889 cik_sdma_vm_set_page(rdev, ib, pe, addr, count, incr, flags);
4890 }
4891}
4892
4893/* 5394/*
4894 * RLC 5395 * RLC
4895 * The RLC is a multi-purpose microengine that handles a 5396 * The RLC is a multi-purpose microengine that handles a
@@ -5058,6 +5559,7 @@ static int cik_rlc_resume(struct radeon_device *rdev)
5058 5559
5059 switch (rdev->family) { 5560 switch (rdev->family) {
5060 case CHIP_BONAIRE: 5561 case CHIP_BONAIRE:
5562 case CHIP_HAWAII:
5061 default: 5563 default:
5062 size = BONAIRE_RLC_UCODE_SIZE; 5564 size = BONAIRE_RLC_UCODE_SIZE;
5063 break; 5565 break;
@@ -5556,7 +6058,7 @@ void cik_init_cp_pg_table(struct radeon_device *rdev)
5556 } 6058 }
5557 6059
5558 for (i = 0; i < CP_ME_TABLE_SIZE; i ++) { 6060 for (i = 0; i < CP_ME_TABLE_SIZE; i ++) {
5559 dst_ptr[bo_offset + i] = be32_to_cpu(fw_data[table_offset + i]); 6061 dst_ptr[bo_offset + i] = cpu_to_le32(be32_to_cpu(fw_data[table_offset + i]));
5560 } 6062 }
5561 bo_offset += CP_ME_TABLE_SIZE; 6063 bo_offset += CP_ME_TABLE_SIZE;
5562 } 6064 }
@@ -5778,52 +6280,57 @@ void cik_get_csb_buffer(struct radeon_device *rdev, volatile u32 *buffer)
5778 if (buffer == NULL) 6280 if (buffer == NULL)
5779 return; 6281 return;
5780 6282
5781 buffer[count++] = PACKET3(PACKET3_PREAMBLE_CNTL, 0); 6283 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
5782 buffer[count++] = PACKET3_PREAMBLE_BEGIN_CLEAR_STATE; 6284 buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
5783 6285
5784 buffer[count++] = PACKET3(PACKET3_CONTEXT_CONTROL, 1); 6286 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
5785 buffer[count++] = 0x80000000; 6287 buffer[count++] = cpu_to_le32(0x80000000);
5786 buffer[count++] = 0x80000000; 6288 buffer[count++] = cpu_to_le32(0x80000000);
5787 6289
5788 for (sect = rdev->rlc.cs_data; sect->section != NULL; ++sect) { 6290 for (sect = rdev->rlc.cs_data; sect->section != NULL; ++sect) {
5789 for (ext = sect->section; ext->extent != NULL; ++ext) { 6291 for (ext = sect->section; ext->extent != NULL; ++ext) {
5790 if (sect->id == SECT_CONTEXT) { 6292 if (sect->id == SECT_CONTEXT) {
5791 buffer[count++] = PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count); 6293 buffer[count++] =
5792 buffer[count++] = ext->reg_index - 0xa000; 6294 cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
6295 buffer[count++] = cpu_to_le32(ext->reg_index - 0xa000);
5793 for (i = 0; i < ext->reg_count; i++) 6296 for (i = 0; i < ext->reg_count; i++)
5794 buffer[count++] = ext->extent[i]; 6297 buffer[count++] = cpu_to_le32(ext->extent[i]);
5795 } else { 6298 } else {
5796 return; 6299 return;
5797 } 6300 }
5798 } 6301 }
5799 } 6302 }
5800 6303
5801 buffer[count++] = PACKET3(PACKET3_SET_CONTEXT_REG, 2); 6304 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 2));
5802 buffer[count++] = PA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START; 6305 buffer[count++] = cpu_to_le32(PA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START);
5803 switch (rdev->family) { 6306 switch (rdev->family) {
5804 case CHIP_BONAIRE: 6307 case CHIP_BONAIRE:
5805 buffer[count++] = 0x16000012; 6308 buffer[count++] = cpu_to_le32(0x16000012);
5806 buffer[count++] = 0x00000000; 6309 buffer[count++] = cpu_to_le32(0x00000000);
5807 break; 6310 break;
5808 case CHIP_KAVERI: 6311 case CHIP_KAVERI:
5809 buffer[count++] = 0x00000000; /* XXX */ 6312 buffer[count++] = cpu_to_le32(0x00000000); /* XXX */
5810 buffer[count++] = 0x00000000; 6313 buffer[count++] = cpu_to_le32(0x00000000);
5811 break; 6314 break;
5812 case CHIP_KABINI: 6315 case CHIP_KABINI:
5813 buffer[count++] = 0x00000000; /* XXX */ 6316 buffer[count++] = cpu_to_le32(0x00000000); /* XXX */
5814 buffer[count++] = 0x00000000; 6317 buffer[count++] = cpu_to_le32(0x00000000);
6318 break;
6319 case CHIP_HAWAII:
6320 buffer[count++] = 0x3a00161a;
6321 buffer[count++] = 0x0000002e;
5815 break; 6322 break;
5816 default: 6323 default:
5817 buffer[count++] = 0x00000000; 6324 buffer[count++] = cpu_to_le32(0x00000000);
5818 buffer[count++] = 0x00000000; 6325 buffer[count++] = cpu_to_le32(0x00000000);
5819 break; 6326 break;
5820 } 6327 }
5821 6328
5822 buffer[count++] = PACKET3(PACKET3_PREAMBLE_CNTL, 0); 6329 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
5823 buffer[count++] = PACKET3_PREAMBLE_END_CLEAR_STATE; 6330 buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
5824 6331
5825 buffer[count++] = PACKET3(PACKET3_CLEAR_STATE, 0); 6332 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
5826 buffer[count++] = 0; 6333 buffer[count++] = cpu_to_le32(0);
5827} 6334}
5828 6335
5829static void cik_init_pg(struct radeon_device *rdev) 6336static void cik_init_pg(struct radeon_device *rdev)
@@ -7118,7 +7625,7 @@ static int cik_startup(struct radeon_device *rdev)
7118 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 7625 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
7119 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET, 7626 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
7120 CP_RB0_RPTR, CP_RB0_WPTR, 7627 CP_RB0_RPTR, CP_RB0_WPTR,
7121 RADEON_CP_PACKET2); 7628 PACKET3(PACKET3_NOP, 0x3FFF));
7122 if (r) 7629 if (r)
7123 return r; 7630 return r;
7124 7631
@@ -7428,6 +7935,70 @@ void cik_fini(struct radeon_device *rdev)
7428 rdev->bios = NULL; 7935 rdev->bios = NULL;
7429} 7936}
7430 7937
7938void dce8_program_fmt(struct drm_encoder *encoder)
7939{
7940 struct drm_device *dev = encoder->dev;
7941 struct radeon_device *rdev = dev->dev_private;
7942 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
7943 struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
7944 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
7945 int bpc = 0;
7946 u32 tmp = 0;
7947 enum radeon_connector_dither dither = RADEON_FMT_DITHER_DISABLE;
7948
7949 if (connector) {
7950 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
7951 bpc = radeon_get_monitor_bpc(connector);
7952 dither = radeon_connector->dither;
7953 }
7954
7955 /* LVDS/eDP FMT is set up by atom */
7956 if (radeon_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
7957 return;
7958
7959 /* not needed for analog */
7960 if ((radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1) ||
7961 (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2))
7962 return;
7963
7964 if (bpc == 0)
7965 return;
7966
7967 switch (bpc) {
7968 case 6:
7969 if (dither == RADEON_FMT_DITHER_ENABLE)
7970 /* XXX sort out optimal dither settings */
7971 tmp |= (FMT_FRAME_RANDOM_ENABLE | FMT_HIGHPASS_RANDOM_ENABLE |
7972 FMT_SPATIAL_DITHER_EN | FMT_SPATIAL_DITHER_DEPTH(0));
7973 else
7974 tmp |= (FMT_TRUNCATE_EN | FMT_TRUNCATE_DEPTH(0));
7975 break;
7976 case 8:
7977 if (dither == RADEON_FMT_DITHER_ENABLE)
7978 /* XXX sort out optimal dither settings */
7979 tmp |= (FMT_FRAME_RANDOM_ENABLE | FMT_HIGHPASS_RANDOM_ENABLE |
7980 FMT_RGB_RANDOM_ENABLE |
7981 FMT_SPATIAL_DITHER_EN | FMT_SPATIAL_DITHER_DEPTH(1));
7982 else
7983 tmp |= (FMT_TRUNCATE_EN | FMT_TRUNCATE_DEPTH(1));
7984 break;
7985 case 10:
7986 if (dither == RADEON_FMT_DITHER_ENABLE)
7987 /* XXX sort out optimal dither settings */
7988 tmp |= (FMT_FRAME_RANDOM_ENABLE | FMT_HIGHPASS_RANDOM_ENABLE |
7989 FMT_RGB_RANDOM_ENABLE |
7990 FMT_SPATIAL_DITHER_EN | FMT_SPATIAL_DITHER_DEPTH(2));
7991 else
7992 tmp |= (FMT_TRUNCATE_EN | FMT_TRUNCATE_DEPTH(2));
7993 break;
7994 default:
7995 /* not needed */
7996 break;
7997 }
7998
7999 WREG32(FMT_BIT_DEPTH_CONTROL + radeon_crtc->crtc_offset, tmp);
8000}
8001
7431/* display watermark setup */ 8002/* display watermark setup */
7432/** 8003/**
7433 * dce8_line_buffer_adjust - Set up the line buffer 8004 * dce8_line_buffer_adjust - Set up the line buffer
diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c
index b6286068e111..9c9529de20ee 100644
--- a/drivers/gpu/drm/radeon/cik_sdma.c
+++ b/drivers/gpu/drm/radeon/cik_sdma.c
@@ -25,6 +25,7 @@
25#include <drm/drmP.h> 25#include <drm/drmP.h>
26#include "radeon.h" 26#include "radeon.h"
27#include "radeon_asic.h" 27#include "radeon_asic.h"
28#include "radeon_trace.h"
28#include "cikd.h" 29#include "cikd.h"
29 30
30/* sdma */ 31/* sdma */
@@ -101,14 +102,6 @@ void cik_sdma_fence_ring_emit(struct radeon_device *rdev,
101{ 102{
102 struct radeon_ring *ring = &rdev->ring[fence->ring]; 103 struct radeon_ring *ring = &rdev->ring[fence->ring];
103 u64 addr = rdev->fence_drv[fence->ring].gpu_addr; 104 u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
104 u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(1) |
105 SDMA_POLL_REG_MEM_EXTRA_FUNC(3)); /* == */
106 u32 ref_and_mask;
107
108 if (fence->ring == R600_RING_TYPE_DMA_INDEX)
109 ref_and_mask = SDMA0;
110 else
111 ref_and_mask = SDMA1;
112 105
113 /* write the fence */ 106 /* write the fence */
114 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_FENCE, 0, 0)); 107 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_FENCE, 0, 0));
@@ -118,12 +111,12 @@ void cik_sdma_fence_ring_emit(struct radeon_device *rdev,
118 /* generate an interrupt */ 111 /* generate an interrupt */
119 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_TRAP, 0, 0)); 112 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_TRAP, 0, 0));
120 /* flush HDP */ 113 /* flush HDP */
121 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits)); 114 /* We should be using the new POLL_REG_MEM special op packet here
122 radeon_ring_write(ring, GPU_HDP_FLUSH_DONE); 115 * but it causes sDMA to hang sometimes
123 radeon_ring_write(ring, GPU_HDP_FLUSH_REQ); 116 */
124 radeon_ring_write(ring, ref_and_mask); /* REFERENCE */ 117 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
125 radeon_ring_write(ring, ref_and_mask); /* MASK */ 118 radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2);
126 radeon_ring_write(ring, (4 << 16) | 10); /* RETRY_COUNT, POLL_INTERVAL */ 119 radeon_ring_write(ring, 0);
127} 120}
128 121
129/** 122/**
@@ -653,11 +646,12 @@ void cik_sdma_vm_set_page(struct radeon_device *rdev,
653 uint64_t addr, unsigned count, 646 uint64_t addr, unsigned count,
654 uint32_t incr, uint32_t flags) 647 uint32_t incr, uint32_t flags)
655{ 648{
656 uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
657 uint64_t value; 649 uint64_t value;
658 unsigned ndw; 650 unsigned ndw;
659 651
660 if (flags & RADEON_VM_PAGE_SYSTEM) { 652 trace_radeon_vm_set_page(pe, addr, count, incr, flags);
653
654 if (flags & R600_PTE_SYSTEM) {
661 while (count) { 655 while (count) {
662 ndw = count * 2; 656 ndw = count * 2;
663 if (ndw > 0xFFFFE) 657 if (ndw > 0xFFFFE)
@@ -669,16 +663,10 @@ void cik_sdma_vm_set_page(struct radeon_device *rdev,
669 ib->ptr[ib->length_dw++] = upper_32_bits(pe); 663 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
670 ib->ptr[ib->length_dw++] = ndw; 664 ib->ptr[ib->length_dw++] = ndw;
671 for (; ndw > 0; ndw -= 2, --count, pe += 8) { 665 for (; ndw > 0; ndw -= 2, --count, pe += 8) {
672 if (flags & RADEON_VM_PAGE_SYSTEM) { 666 value = radeon_vm_map_gart(rdev, addr);
673 value = radeon_vm_map_gart(rdev, addr); 667 value &= 0xFFFFFFFFFFFFF000ULL;
674 value &= 0xFFFFFFFFFFFFF000ULL;
675 } else if (flags & RADEON_VM_PAGE_VALID) {
676 value = addr;
677 } else {
678 value = 0;
679 }
680 addr += incr; 668 addr += incr;
681 value |= r600_flags; 669 value |= flags;
682 ib->ptr[ib->length_dw++] = value; 670 ib->ptr[ib->length_dw++] = value;
683 ib->ptr[ib->length_dw++] = upper_32_bits(value); 671 ib->ptr[ib->length_dw++] = upper_32_bits(value);
684 } 672 }
@@ -689,7 +677,7 @@ void cik_sdma_vm_set_page(struct radeon_device *rdev,
689 if (ndw > 0x7FFFF) 677 if (ndw > 0x7FFFF)
690 ndw = 0x7FFFF; 678 ndw = 0x7FFFF;
691 679
692 if (flags & RADEON_VM_PAGE_VALID) 680 if (flags & R600_PTE_VALID)
693 value = addr; 681 value = addr;
694 else 682 else
695 value = 0; 683 value = 0;
@@ -697,7 +685,7 @@ void cik_sdma_vm_set_page(struct radeon_device *rdev,
697 ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_GENERATE_PTE_PDE, 0, 0); 685 ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_GENERATE_PTE_PDE, 0, 0);
698 ib->ptr[ib->length_dw++] = pe; /* dst addr */ 686 ib->ptr[ib->length_dw++] = pe; /* dst addr */
699 ib->ptr[ib->length_dw++] = upper_32_bits(pe); 687 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
700 ib->ptr[ib->length_dw++] = r600_flags; /* mask */ 688 ib->ptr[ib->length_dw++] = flags; /* mask */
701 ib->ptr[ib->length_dw++] = 0; 689 ib->ptr[ib->length_dw++] = 0;
702 ib->ptr[ib->length_dw++] = value; /* value */ 690 ib->ptr[ib->length_dw++] = value; /* value */
703 ib->ptr[ib->length_dw++] = upper_32_bits(value); 691 ib->ptr[ib->length_dw++] = upper_32_bits(value);
@@ -724,18 +712,10 @@ void cik_sdma_vm_set_page(struct radeon_device *rdev,
724void cik_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm) 712void cik_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
725{ 713{
726 struct radeon_ring *ring = &rdev->ring[ridx]; 714 struct radeon_ring *ring = &rdev->ring[ridx];
727 u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(1) |
728 SDMA_POLL_REG_MEM_EXTRA_FUNC(3)); /* == */
729 u32 ref_and_mask;
730 715
731 if (vm == NULL) 716 if (vm == NULL)
732 return; 717 return;
733 718
734 if (ridx == R600_RING_TYPE_DMA_INDEX)
735 ref_and_mask = SDMA0;
736 else
737 ref_and_mask = SDMA1;
738
739 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000)); 719 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
740 if (vm->id < 8) { 720 if (vm->id < 8) {
741 radeon_ring_write(ring, (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2); 721 radeon_ring_write(ring, (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2);
@@ -770,12 +750,12 @@ void cik_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm
770 radeon_ring_write(ring, VMID(0)); 750 radeon_ring_write(ring, VMID(0));
771 751
772 /* flush HDP */ 752 /* flush HDP */
773 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits)); 753 /* We should be using the new POLL_REG_MEM special op packet here
774 radeon_ring_write(ring, GPU_HDP_FLUSH_DONE); 754 * but it causes sDMA to hang sometimes
775 radeon_ring_write(ring, GPU_HDP_FLUSH_REQ); 755 */
776 radeon_ring_write(ring, ref_and_mask); /* REFERENCE */ 756 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
777 radeon_ring_write(ring, ref_and_mask); /* MASK */ 757 radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2);
778 radeon_ring_write(ring, (4 << 16) | 10); /* RETRY_COUNT, POLL_INTERVAL */ 758 radeon_ring_write(ring, 0);
779 759
780 /* flush TLB */ 760 /* flush TLB */
781 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000)); 761 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
diff --git a/drivers/gpu/drm/radeon/cikd.h b/drivers/gpu/drm/radeon/cikd.h
index 203d2a09a1f5..5964af5e5b2d 100644
--- a/drivers/gpu/drm/radeon/cikd.h
+++ b/drivers/gpu/drm/radeon/cikd.h
@@ -25,8 +25,10 @@
25#define CIK_H 25#define CIK_H
26 26
27#define BONAIRE_GB_ADDR_CONFIG_GOLDEN 0x12010001 27#define BONAIRE_GB_ADDR_CONFIG_GOLDEN 0x12010001
28#define HAWAII_GB_ADDR_CONFIG_GOLDEN 0x12011003
28 29
29#define CIK_RB_BITMAP_WIDTH_PER_SH 2 30#define CIK_RB_BITMAP_WIDTH_PER_SH 2
31#define HAWAII_RB_BITMAP_WIDTH_PER_SH 4
30 32
31/* DIDT IND registers */ 33/* DIDT IND registers */
32#define DIDT_SQ_CTRL0 0x0 34#define DIDT_SQ_CTRL0 0x0
@@ -499,6 +501,7 @@
499 * bit 4: write 501 * bit 4: write
500 */ 502 */
501#define MEMORY_CLIENT_ID_MASK (0xff << 12) 503#define MEMORY_CLIENT_ID_MASK (0xff << 12)
504#define HAWAII_MEMORY_CLIENT_ID_MASK (0x1ff << 12)
502#define MEMORY_CLIENT_ID_SHIFT 12 505#define MEMORY_CLIENT_ID_SHIFT 12
503#define MEMORY_CLIENT_RW_MASK (1 << 24) 506#define MEMORY_CLIENT_RW_MASK (1 << 24)
504#define MEMORY_CLIENT_RW_SHIFT 24 507#define MEMORY_CLIENT_RW_SHIFT 24
@@ -906,6 +909,39 @@
906#define DPG_PIPE_STUTTER_CONTROL 0x6cd4 909#define DPG_PIPE_STUTTER_CONTROL 0x6cd4
907# define STUTTER_ENABLE (1 << 0) 910# define STUTTER_ENABLE (1 << 0)
908 911
912/* DCE8 FMT blocks */
913#define FMT_DYNAMIC_EXP_CNTL 0x6fb4
914# define FMT_DYNAMIC_EXP_EN (1 << 0)
915# define FMT_DYNAMIC_EXP_MODE (1 << 4)
916 /* 0 = 10bit -> 12bit, 1 = 8bit -> 12bit */
917#define FMT_CONTROL 0x6fb8
918# define FMT_PIXEL_ENCODING (1 << 16)
919 /* 0 = RGB 4:4:4 or YCbCr 4:4:4, 1 = YCbCr 4:2:2 */
920#define FMT_BIT_DEPTH_CONTROL 0x6fc8
921# define FMT_TRUNCATE_EN (1 << 0)
922# define FMT_TRUNCATE_MODE (1 << 1)
923# define FMT_TRUNCATE_DEPTH(x) ((x) << 4) /* 0 - 18bpp, 1 - 24bpp, 2 - 30bpp */
924# define FMT_SPATIAL_DITHER_EN (1 << 8)
925# define FMT_SPATIAL_DITHER_MODE(x) ((x) << 9)
926# define FMT_SPATIAL_DITHER_DEPTH(x) ((x) << 11) /* 0 - 18bpp, 1 - 24bpp, 2 - 30bpp */
927# define FMT_FRAME_RANDOM_ENABLE (1 << 13)
928# define FMT_RGB_RANDOM_ENABLE (1 << 14)
929# define FMT_HIGHPASS_RANDOM_ENABLE (1 << 15)
930# define FMT_TEMPORAL_DITHER_EN (1 << 16)
931# define FMT_TEMPORAL_DITHER_DEPTH(x) ((x) << 17) /* 0 - 18bpp, 1 - 24bpp, 2 - 30bpp */
932# define FMT_TEMPORAL_DITHER_OFFSET(x) ((x) << 21)
933# define FMT_TEMPORAL_LEVEL (1 << 24)
934# define FMT_TEMPORAL_DITHER_RESET (1 << 25)
935# define FMT_25FRC_SEL(x) ((x) << 26)
936# define FMT_50FRC_SEL(x) ((x) << 28)
937# define FMT_75FRC_SEL(x) ((x) << 30)
938#define FMT_CLAMP_CONTROL 0x6fe4
939# define FMT_CLAMP_DATA_EN (1 << 0)
940# define FMT_CLAMP_COLOR_FORMAT(x) ((x) << 16)
941# define FMT_CLAMP_6BPC 0
942# define FMT_CLAMP_8BPC 1
943# define FMT_CLAMP_10BPC 2
944
909#define GRBM_CNTL 0x8000 945#define GRBM_CNTL 0x8000
910#define GRBM_READ_TIMEOUT(x) ((x) << 0) 946#define GRBM_READ_TIMEOUT(x) ((x) << 0)
911 947
@@ -1129,6 +1165,8 @@
1129# define ADDR_SURF_P8_32x32_16x16 12 1165# define ADDR_SURF_P8_32x32_16x16 12
1130# define ADDR_SURF_P8_32x32_16x32 13 1166# define ADDR_SURF_P8_32x32_16x32 13
1131# define ADDR_SURF_P8_32x64_32x32 14 1167# define ADDR_SURF_P8_32x64_32x32 14
1168# define ADDR_SURF_P16_32x32_8x16 16
1169# define ADDR_SURF_P16_32x32_16x16 17
1132# define TILE_SPLIT(x) ((x) << 11) 1170# define TILE_SPLIT(x) ((x) << 11)
1133# define ADDR_SURF_TILE_SPLIT_64B 0 1171# define ADDR_SURF_TILE_SPLIT_64B 0
1134# define ADDR_SURF_TILE_SPLIT_128B 1 1172# define ADDR_SURF_TILE_SPLIT_128B 1
@@ -1422,6 +1460,7 @@
1422# define RASTER_CONFIG_RB_MAP_1 1 1460# define RASTER_CONFIG_RB_MAP_1 1
1423# define RASTER_CONFIG_RB_MAP_2 2 1461# define RASTER_CONFIG_RB_MAP_2 2
1424# define RASTER_CONFIG_RB_MAP_3 3 1462# define RASTER_CONFIG_RB_MAP_3 3
1463#define PKR_MAP(x) ((x) << 8)
1425 1464
1426#define VGT_EVENT_INITIATOR 0x28a90 1465#define VGT_EVENT_INITIATOR 0x28a90
1427# define SAMPLE_STREAMOUTSTATS1 (1 << 0) 1466# define SAMPLE_STREAMOUTSTATS1 (1 << 0)
@@ -1714,6 +1753,68 @@
1714# define PACKET3_PREAMBLE_BEGIN_CLEAR_STATE (2 << 28) 1753# define PACKET3_PREAMBLE_BEGIN_CLEAR_STATE (2 << 28)
1715# define PACKET3_PREAMBLE_END_CLEAR_STATE (3 << 28) 1754# define PACKET3_PREAMBLE_END_CLEAR_STATE (3 << 28)
1716#define PACKET3_DMA_DATA 0x50 1755#define PACKET3_DMA_DATA 0x50
1756/* 1. header
1757 * 2. CONTROL
1758 * 3. SRC_ADDR_LO or DATA [31:0]
1759 * 4. SRC_ADDR_HI [31:0]
1760 * 5. DST_ADDR_LO [31:0]
1761 * 6. DST_ADDR_HI [7:0]
1762 * 7. COMMAND [30:21] | BYTE_COUNT [20:0]
1763 */
1764/* CONTROL */
1765# define PACKET3_DMA_DATA_ENGINE(x) ((x) << 0)
1766 /* 0 - ME
1767 * 1 - PFP
1768 */
1769# define PACKET3_DMA_DATA_SRC_CACHE_POLICY(x) ((x) << 13)
1770 /* 0 - LRU
1771 * 1 - Stream
1772 * 2 - Bypass
1773 */
1774# define PACKET3_DMA_DATA_SRC_VOLATILE (1 << 15)
1775# define PACKET3_DMA_DATA_DST_SEL(x) ((x) << 20)
1776 /* 0 - DST_ADDR using DAS
1777 * 1 - GDS
1778 * 3 - DST_ADDR using L2
1779 */
1780# define PACKET3_DMA_DATA_DST_CACHE_POLICY(x) ((x) << 25)
1781 /* 0 - LRU
1782 * 1 - Stream
1783 * 2 - Bypass
1784 */
1785# define PACKET3_DMA_DATA_DST_VOLATILE (1 << 27)
1786# define PACKET3_DMA_DATA_SRC_SEL(x) ((x) << 29)
1787 /* 0 - SRC_ADDR using SAS
1788 * 1 - GDS
1789 * 2 - DATA
1790 * 3 - SRC_ADDR using L2
1791 */
1792# define PACKET3_DMA_DATA_CP_SYNC (1 << 31)
1793/* COMMAND */
1794# define PACKET3_DMA_DATA_DIS_WC (1 << 21)
1795# define PACKET3_DMA_DATA_CMD_SRC_SWAP(x) ((x) << 22)
1796 /* 0 - none
1797 * 1 - 8 in 16
1798 * 2 - 8 in 32
1799 * 3 - 8 in 64
1800 */
1801# define PACKET3_DMA_DATA_CMD_DST_SWAP(x) ((x) << 24)
1802 /* 0 - none
1803 * 1 - 8 in 16
1804 * 2 - 8 in 32
1805 * 3 - 8 in 64
1806 */
1807# define PACKET3_DMA_DATA_CMD_SAS (1 << 26)
1808 /* 0 - memory
1809 * 1 - register
1810 */
1811# define PACKET3_DMA_DATA_CMD_DAS (1 << 27)
1812 /* 0 - memory
1813 * 1 - register
1814 */
1815# define PACKET3_DMA_DATA_CMD_SAIC (1 << 28)
1816# define PACKET3_DMA_DATA_CMD_DAIC (1 << 29)
1817# define PACKET3_DMA_DATA_CMD_RAW_WAIT (1 << 30)
1717#define PACKET3_AQUIRE_MEM 0x58 1818#define PACKET3_AQUIRE_MEM 0x58
1718#define PACKET3_REWIND 0x59 1819#define PACKET3_REWIND 0x59
1719#define PACKET3_LOAD_UCONFIG_REG 0x5E 1820#define PACKET3_LOAD_UCONFIG_REG 0x5E
diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c
index 9fcd338c0fcf..009f46e0ce72 100644
--- a/drivers/gpu/drm/radeon/dce6_afmt.c
+++ b/drivers/gpu/drm/radeon/dce6_afmt.c
@@ -102,6 +102,49 @@ void dce6_afmt_select_pin(struct drm_encoder *encoder)
102 AFMT_AUDIO_SRC_SELECT(dig->afmt->pin->id)); 102 AFMT_AUDIO_SRC_SELECT(dig->afmt->pin->id));
103} 103}
104 104
105void dce6_afmt_write_latency_fields(struct drm_encoder *encoder,
106 struct drm_display_mode *mode)
107{
108 struct radeon_device *rdev = encoder->dev->dev_private;
109 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
110 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
111 struct drm_connector *connector;
112 struct radeon_connector *radeon_connector = NULL;
113 u32 tmp = 0, offset;
114
115 if (!dig->afmt->pin)
116 return;
117
118 offset = dig->afmt->pin->offset;
119
120 list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
121 if (connector->encoder == encoder) {
122 radeon_connector = to_radeon_connector(connector);
123 break;
124 }
125 }
126
127 if (!radeon_connector) {
128 DRM_ERROR("Couldn't find encoder's connector\n");
129 return;
130 }
131
132 if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
133 if (connector->latency_present[1])
134 tmp = VIDEO_LIPSYNC(connector->video_latency[1]) |
135 AUDIO_LIPSYNC(connector->audio_latency[1]);
136 else
137 tmp = VIDEO_LIPSYNC(255) | AUDIO_LIPSYNC(255);
138 } else {
139 if (connector->latency_present[0])
140 tmp = VIDEO_LIPSYNC(connector->video_latency[0]) |
141 AUDIO_LIPSYNC(connector->audio_latency[0]);
142 else
143 tmp = VIDEO_LIPSYNC(255) | AUDIO_LIPSYNC(255);
144 }
145 WREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp);
146}
147
105void dce6_afmt_write_speaker_allocation(struct drm_encoder *encoder) 148void dce6_afmt_write_speaker_allocation(struct drm_encoder *encoder)
106{ 149{
107 struct radeon_device *rdev = encoder->dev->dev_private; 150 struct radeon_device *rdev = encoder->dev->dev_private;
@@ -113,9 +156,6 @@ void dce6_afmt_write_speaker_allocation(struct drm_encoder *encoder)
113 u8 *sadb; 156 u8 *sadb;
114 int sad_count; 157 int sad_count;
115 158
116 /* XXX: setting this register causes hangs on some asics */
117 return;
118
119 if (!dig->afmt->pin) 159 if (!dig->afmt->pin)
120 return; 160 return;
121 161
@@ -201,20 +241,30 @@ void dce6_afmt_write_sad_regs(struct drm_encoder *encoder)
201 241
202 for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) { 242 for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
203 u32 value = 0; 243 u32 value = 0;
244 u8 stereo_freqs = 0;
245 int max_channels = -1;
204 int j; 246 int j;
205 247
206 for (j = 0; j < sad_count; j++) { 248 for (j = 0; j < sad_count; j++) {
207 struct cea_sad *sad = &sads[j]; 249 struct cea_sad *sad = &sads[j];
208 250
209 if (sad->format == eld_reg_to_type[i][1]) { 251 if (sad->format == eld_reg_to_type[i][1]) {
210 value = MAX_CHANNELS(sad->channels) | 252 if (sad->channels > max_channels) {
211 DESCRIPTOR_BYTE_2(sad->byte2) | 253 value = MAX_CHANNELS(sad->channels) |
212 SUPPORTED_FREQUENCIES(sad->freq); 254 DESCRIPTOR_BYTE_2(sad->byte2) |
255 SUPPORTED_FREQUENCIES(sad->freq);
256 max_channels = sad->channels;
257 }
258
213 if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM) 259 if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM)
214 value |= SUPPORTED_FREQUENCIES_STEREO(sad->freq); 260 stereo_freqs |= sad->freq;
215 break; 261 else
262 break;
216 } 263 }
217 } 264 }
265
266 value |= SUPPORTED_FREQUENCIES_STEREO(stereo_freqs);
267
218 WREG32_ENDPOINT(offset, eld_reg_to_type[i][0], value); 268 WREG32_ENDPOINT(offset, eld_reg_to_type[i][0], value);
219 } 269 }
220 270
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 56f6bec34af5..9702e55e924e 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -1186,6 +1186,62 @@ void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev)
1186 pcie_set_readrq(rdev->pdev, 512); 1186 pcie_set_readrq(rdev->pdev, 512);
1187} 1187}
1188 1188
1189void dce4_program_fmt(struct drm_encoder *encoder)
1190{
1191 struct drm_device *dev = encoder->dev;
1192 struct radeon_device *rdev = dev->dev_private;
1193 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1194 struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
1195 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
1196 int bpc = 0;
1197 u32 tmp = 0;
1198 enum radeon_connector_dither dither = RADEON_FMT_DITHER_DISABLE;
1199
1200 if (connector) {
1201 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
1202 bpc = radeon_get_monitor_bpc(connector);
1203 dither = radeon_connector->dither;
1204 }
1205
1206 /* LVDS/eDP FMT is set up by atom */
1207 if (radeon_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
1208 return;
1209
1210 /* not needed for analog */
1211 if ((radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1) ||
1212 (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2))
1213 return;
1214
1215 if (bpc == 0)
1216 return;
1217
1218 switch (bpc) {
1219 case 6:
1220 if (dither == RADEON_FMT_DITHER_ENABLE)
1221 /* XXX sort out optimal dither settings */
1222 tmp |= (FMT_FRAME_RANDOM_ENABLE | FMT_HIGHPASS_RANDOM_ENABLE |
1223 FMT_SPATIAL_DITHER_EN);
1224 else
1225 tmp |= FMT_TRUNCATE_EN;
1226 break;
1227 case 8:
1228 if (dither == RADEON_FMT_DITHER_ENABLE)
1229 /* XXX sort out optimal dither settings */
1230 tmp |= (FMT_FRAME_RANDOM_ENABLE | FMT_HIGHPASS_RANDOM_ENABLE |
1231 FMT_RGB_RANDOM_ENABLE |
1232 FMT_SPATIAL_DITHER_EN | FMT_SPATIAL_DITHER_DEPTH);
1233 else
1234 tmp |= (FMT_TRUNCATE_EN | FMT_TRUNCATE_DEPTH);
1235 break;
1236 case 10:
1237 default:
1238 /* not needed */
1239 break;
1240 }
1241
1242 WREG32(FMT_BIT_DEPTH_CONTROL + radeon_crtc->crtc_offset, tmp);
1243}
1244
1189static bool dce4_is_in_vblank(struct radeon_device *rdev, int crtc) 1245static bool dce4_is_in_vblank(struct radeon_device *rdev, int crtc)
1190{ 1246{
1191 if (RREG32(EVERGREEN_CRTC_STATUS + crtc_offsets[crtc]) & EVERGREEN_CRTC_V_BLANK) 1247 if (RREG32(EVERGREEN_CRTC_STATUS + crtc_offsets[crtc]) & EVERGREEN_CRTC_V_BLANK)
@@ -3956,7 +4012,7 @@ int sumo_rlc_init(struct radeon_device *rdev)
3956 if (rdev->family >= CHIP_TAHITI) { 4012 if (rdev->family >= CHIP_TAHITI) {
3957 /* SI */ 4013 /* SI */
3958 for (i = 0; i < rdev->rlc.reg_list_size; i++) 4014 for (i = 0; i < rdev->rlc.reg_list_size; i++)
3959 dst_ptr[i] = src_ptr[i]; 4015 dst_ptr[i] = cpu_to_le32(src_ptr[i]);
3960 } else { 4016 } else {
3961 /* ON/LN/TN */ 4017 /* ON/LN/TN */
3962 /* format: 4018 /* format:
@@ -3970,10 +4026,10 @@ int sumo_rlc_init(struct radeon_device *rdev)
3970 if (i < dws) 4026 if (i < dws)
3971 data |= (src_ptr[i] >> 2) << 16; 4027 data |= (src_ptr[i] >> 2) << 16;
3972 j = (((i - 1) * 3) / 2); 4028 j = (((i - 1) * 3) / 2);
3973 dst_ptr[j] = data; 4029 dst_ptr[j] = cpu_to_le32(data);
3974 } 4030 }
3975 j = ((i * 3) / 2); 4031 j = ((i * 3) / 2);
3976 dst_ptr[j] = RLC_SAVE_RESTORE_LIST_END_MARKER; 4032 dst_ptr[j] = cpu_to_le32(RLC_SAVE_RESTORE_LIST_END_MARKER);
3977 } 4033 }
3978 radeon_bo_kunmap(rdev->rlc.save_restore_obj); 4034 radeon_bo_kunmap(rdev->rlc.save_restore_obj);
3979 radeon_bo_unreserve(rdev->rlc.save_restore_obj); 4035 radeon_bo_unreserve(rdev->rlc.save_restore_obj);
@@ -4035,40 +4091,40 @@ int sumo_rlc_init(struct radeon_device *rdev)
4035 cik_get_csb_buffer(rdev, dst_ptr); 4091 cik_get_csb_buffer(rdev, dst_ptr);
4036 } else if (rdev->family >= CHIP_TAHITI) { 4092 } else if (rdev->family >= CHIP_TAHITI) {
4037 reg_list_mc_addr = rdev->rlc.clear_state_gpu_addr + 256; 4093 reg_list_mc_addr = rdev->rlc.clear_state_gpu_addr + 256;
4038 dst_ptr[0] = upper_32_bits(reg_list_mc_addr); 4094 dst_ptr[0] = cpu_to_le32(upper_32_bits(reg_list_mc_addr));
4039 dst_ptr[1] = lower_32_bits(reg_list_mc_addr); 4095 dst_ptr[1] = cpu_to_le32(lower_32_bits(reg_list_mc_addr));
4040 dst_ptr[2] = rdev->rlc.clear_state_size; 4096 dst_ptr[2] = cpu_to_le32(rdev->rlc.clear_state_size);
4041 si_get_csb_buffer(rdev, &dst_ptr[(256/4)]); 4097 si_get_csb_buffer(rdev, &dst_ptr[(256/4)]);
4042 } else { 4098 } else {
4043 reg_list_hdr_blk_index = 0; 4099 reg_list_hdr_blk_index = 0;
4044 reg_list_mc_addr = rdev->rlc.clear_state_gpu_addr + (reg_list_blk_index * 4); 4100 reg_list_mc_addr = rdev->rlc.clear_state_gpu_addr + (reg_list_blk_index * 4);
4045 data = upper_32_bits(reg_list_mc_addr); 4101 data = upper_32_bits(reg_list_mc_addr);
4046 dst_ptr[reg_list_hdr_blk_index] = data; 4102 dst_ptr[reg_list_hdr_blk_index] = cpu_to_le32(data);
4047 reg_list_hdr_blk_index++; 4103 reg_list_hdr_blk_index++;
4048 for (i = 0; cs_data[i].section != NULL; i++) { 4104 for (i = 0; cs_data[i].section != NULL; i++) {
4049 for (j = 0; cs_data[i].section[j].extent != NULL; j++) { 4105 for (j = 0; cs_data[i].section[j].extent != NULL; j++) {
4050 reg_num = cs_data[i].section[j].reg_count; 4106 reg_num = cs_data[i].section[j].reg_count;
4051 data = reg_list_mc_addr & 0xffffffff; 4107 data = reg_list_mc_addr & 0xffffffff;
4052 dst_ptr[reg_list_hdr_blk_index] = data; 4108 dst_ptr[reg_list_hdr_blk_index] = cpu_to_le32(data);
4053 reg_list_hdr_blk_index++; 4109 reg_list_hdr_blk_index++;
4054 4110
4055 data = (cs_data[i].section[j].reg_index * 4) & 0xffffffff; 4111 data = (cs_data[i].section[j].reg_index * 4) & 0xffffffff;
4056 dst_ptr[reg_list_hdr_blk_index] = data; 4112 dst_ptr[reg_list_hdr_blk_index] = cpu_to_le32(data);
4057 reg_list_hdr_blk_index++; 4113 reg_list_hdr_blk_index++;
4058 4114
4059 data = 0x08000000 | (reg_num * 4); 4115 data = 0x08000000 | (reg_num * 4);
4060 dst_ptr[reg_list_hdr_blk_index] = data; 4116 dst_ptr[reg_list_hdr_blk_index] = cpu_to_le32(data);
4061 reg_list_hdr_blk_index++; 4117 reg_list_hdr_blk_index++;
4062 4118
4063 for (k = 0; k < reg_num; k++) { 4119 for (k = 0; k < reg_num; k++) {
4064 data = cs_data[i].section[j].extent[k]; 4120 data = cs_data[i].section[j].extent[k];
4065 dst_ptr[reg_list_blk_index + k] = data; 4121 dst_ptr[reg_list_blk_index + k] = cpu_to_le32(data);
4066 } 4122 }
4067 reg_list_mc_addr += reg_num * 4; 4123 reg_list_mc_addr += reg_num * 4;
4068 reg_list_blk_index += reg_num; 4124 reg_list_blk_index += reg_num;
4069 } 4125 }
4070 } 4126 }
4071 dst_ptr[reg_list_hdr_blk_index] = RLC_CLEAR_STATE_END_MARKER; 4127 dst_ptr[reg_list_hdr_blk_index] = cpu_to_le32(RLC_CLEAR_STATE_END_MARKER);
4072 } 4128 }
4073 radeon_bo_kunmap(rdev->rlc.clear_state_obj); 4129 radeon_bo_kunmap(rdev->rlc.clear_state_obj);
4074 radeon_bo_unreserve(rdev->rlc.clear_state_obj); 4130 radeon_bo_unreserve(rdev->rlc.clear_state_obj);
diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c
index 57fcc4b16a52..aa695c4feb3d 100644
--- a/drivers/gpu/drm/radeon/evergreen_hdmi.c
+++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c
@@ -35,6 +35,8 @@
35extern void dce6_afmt_write_speaker_allocation(struct drm_encoder *encoder); 35extern void dce6_afmt_write_speaker_allocation(struct drm_encoder *encoder);
36extern void dce6_afmt_write_sad_regs(struct drm_encoder *encoder); 36extern void dce6_afmt_write_sad_regs(struct drm_encoder *encoder);
37extern void dce6_afmt_select_pin(struct drm_encoder *encoder); 37extern void dce6_afmt_select_pin(struct drm_encoder *encoder);
38extern void dce6_afmt_write_latency_fields(struct drm_encoder *encoder,
39 struct drm_display_mode *mode);
38 40
39/* 41/*
40 * update the N and CTS parameters for a given pixel clock rate 42 * update the N and CTS parameters for a given pixel clock rate
@@ -58,6 +60,42 @@ static void evergreen_hdmi_update_ACR(struct drm_encoder *encoder, uint32_t cloc
58 WREG32(HDMI_ACR_48_1 + offset, acr.n_48khz); 60 WREG32(HDMI_ACR_48_1 + offset, acr.n_48khz);
59} 61}
60 62
63static void dce4_afmt_write_latency_fields(struct drm_encoder *encoder,
64 struct drm_display_mode *mode)
65{
66 struct radeon_device *rdev = encoder->dev->dev_private;
67 struct drm_connector *connector;
68 struct radeon_connector *radeon_connector = NULL;
69 u32 tmp = 0;
70
71 list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
72 if (connector->encoder == encoder) {
73 radeon_connector = to_radeon_connector(connector);
74 break;
75 }
76 }
77
78 if (!radeon_connector) {
79 DRM_ERROR("Couldn't find encoder's connector\n");
80 return;
81 }
82
83 if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
84 if (connector->latency_present[1])
85 tmp = VIDEO_LIPSYNC(connector->video_latency[1]) |
86 AUDIO_LIPSYNC(connector->audio_latency[1]);
87 else
88 tmp = VIDEO_LIPSYNC(255) | AUDIO_LIPSYNC(255);
89 } else {
90 if (connector->latency_present[0])
91 tmp = VIDEO_LIPSYNC(connector->video_latency[0]) |
92 AUDIO_LIPSYNC(connector->audio_latency[0]);
93 else
94 tmp = VIDEO_LIPSYNC(255) | AUDIO_LIPSYNC(255);
95 }
96 WREG32(AZ_F0_CODEC_PIN0_CONTROL_RESPONSE_LIPSYNC, tmp);
97}
98
61static void dce4_afmt_write_speaker_allocation(struct drm_encoder *encoder) 99static void dce4_afmt_write_speaker_allocation(struct drm_encoder *encoder)
62{ 100{
63 struct radeon_device *rdev = encoder->dev->dev_private; 101 struct radeon_device *rdev = encoder->dev->dev_private;
@@ -67,12 +105,11 @@ static void dce4_afmt_write_speaker_allocation(struct drm_encoder *encoder)
67 u8 *sadb; 105 u8 *sadb;
68 int sad_count; 106 int sad_count;
69 107
70 /* XXX: setting this register causes hangs on some asics */
71 return;
72
73 list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { 108 list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
74 if (connector->encoder == encoder) 109 if (connector->encoder == encoder) {
75 radeon_connector = to_radeon_connector(connector); 110 radeon_connector = to_radeon_connector(connector);
111 break;
112 }
76 } 113 }
77 114
78 if (!radeon_connector) { 115 if (!radeon_connector) {
@@ -124,8 +161,10 @@ static void evergreen_hdmi_write_sad_regs(struct drm_encoder *encoder)
124 }; 161 };
125 162
126 list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { 163 list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
127 if (connector->encoder == encoder) 164 if (connector->encoder == encoder) {
128 radeon_connector = to_radeon_connector(connector); 165 radeon_connector = to_radeon_connector(connector);
166 break;
167 }
129 } 168 }
130 169
131 if (!radeon_connector) { 170 if (!radeon_connector) {
@@ -142,20 +181,30 @@ static void evergreen_hdmi_write_sad_regs(struct drm_encoder *encoder)
142 181
143 for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) { 182 for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
144 u32 value = 0; 183 u32 value = 0;
184 u8 stereo_freqs = 0;
185 int max_channels = -1;
145 int j; 186 int j;
146 187
147 for (j = 0; j < sad_count; j++) { 188 for (j = 0; j < sad_count; j++) {
148 struct cea_sad *sad = &sads[j]; 189 struct cea_sad *sad = &sads[j];
149 190
150 if (sad->format == eld_reg_to_type[i][1]) { 191 if (sad->format == eld_reg_to_type[i][1]) {
151 value = MAX_CHANNELS(sad->channels) | 192 if (sad->channels > max_channels) {
152 DESCRIPTOR_BYTE_2(sad->byte2) | 193 value = MAX_CHANNELS(sad->channels) |
153 SUPPORTED_FREQUENCIES(sad->freq); 194 DESCRIPTOR_BYTE_2(sad->byte2) |
195 SUPPORTED_FREQUENCIES(sad->freq);
196 max_channels = sad->channels;
197 }
198
154 if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM) 199 if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM)
155 value |= SUPPORTED_FREQUENCIES_STEREO(sad->freq); 200 stereo_freqs |= sad->freq;
156 break; 201 else
202 break;
157 } 203 }
158 } 204 }
205
206 value |= SUPPORTED_FREQUENCIES_STEREO(stereo_freqs);
207
159 WREG32(eld_reg_to_type[i][0], value); 208 WREG32(eld_reg_to_type[i][0], value);
160 } 209 }
161 210
@@ -324,8 +373,10 @@ void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode
324 if (ASIC_IS_DCE6(rdev)) { 373 if (ASIC_IS_DCE6(rdev)) {
325 dce6_afmt_select_pin(encoder); 374 dce6_afmt_select_pin(encoder);
326 dce6_afmt_write_sad_regs(encoder); 375 dce6_afmt_write_sad_regs(encoder);
376 dce6_afmt_write_latency_fields(encoder, mode);
327 } else { 377 } else {
328 evergreen_hdmi_write_sad_regs(encoder); 378 evergreen_hdmi_write_sad_regs(encoder);
379 dce4_afmt_write_latency_fields(encoder, mode);
329 } 380 }
330 381
331 err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode); 382 err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index 4f6d2962767d..17f990798992 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -750,6 +750,44 @@
750 * bit6 = 192 kHz 750 * bit6 = 192 kHz
751 */ 751 */
752 752
753#define AZ_CHANNEL_COUNT_CONTROL 0x5fe4
754# define HBR_CHANNEL_COUNT(x) (((x) & 0x7) << 0)
755# define COMPRESSED_CHANNEL_COUNT(x) (((x) & 0x7) << 4)
756/* HBR_CHANNEL_COUNT, COMPRESSED_CHANNEL_COUNT
757 * 0 = use stream header
758 * 1-7 = channel count - 1
759 */
760#define AZ_F0_CODEC_PIN0_CONTROL_RESPONSE_LIPSYNC 0x5fe8
761# define VIDEO_LIPSYNC(x) (((x) & 0xff) << 0)
762# define AUDIO_LIPSYNC(x) (((x) & 0xff) << 8)
763/* VIDEO_LIPSYNC, AUDIO_LIPSYNC
764 * 0 = invalid
765 * x = legal delay value
766 * 255 = sync not supported
767 */
768#define AZ_F0_CODEC_PIN0_CONTROL_RESPONSE_HBR 0x5fec
769# define HBR_CAPABLE (1 << 0) /* enabled by default */
770
771#define AZ_F0_CODEC_PIN0_CONTROL_RESPONSE_AV_ASSOCIATION0 0x5ff4
772# define DISPLAY0_TYPE(x) (((x) & 0x3) << 0)
773# define DISPLAY_TYPE_NONE 0
774# define DISPLAY_TYPE_HDMI 1
775# define DISPLAY_TYPE_DP 2
776# define DISPLAY0_ID(x) (((x) & 0x3f) << 2)
777# define DISPLAY1_TYPE(x) (((x) & 0x3) << 8)
778# define DISPLAY1_ID(x) (((x) & 0x3f) << 10)
779# define DISPLAY2_TYPE(x) (((x) & 0x3) << 16)
780# define DISPLAY2_ID(x) (((x) & 0x3f) << 18)
781# define DISPLAY3_TYPE(x) (((x) & 0x3) << 24)
782# define DISPLAY3_ID(x) (((x) & 0x3f) << 26)
783#define AZ_F0_CODEC_PIN0_CONTROL_RESPONSE_AV_ASSOCIATION1 0x5ff8
784# define DISPLAY4_TYPE(x) (((x) & 0x3) << 0)
785# define DISPLAY4_ID(x) (((x) & 0x3f) << 2)
786# define DISPLAY5_TYPE(x) (((x) & 0x3) << 8)
787# define DISPLAY5_ID(x) (((x) & 0x3f) << 10)
788#define AZ_F0_CODEC_PIN0_CONTROL_RESPONSE_AV_NUMBER 0x5ffc
789# define NUMBER_OF_DISPLAY_ID(x) (((x) & 0x7) << 0)
790
753#define AZ_HOT_PLUG_CONTROL 0x5e78 791#define AZ_HOT_PLUG_CONTROL 0x5e78
754# define AZ_FORCE_CODEC_WAKE (1 << 0) 792# define AZ_FORCE_CODEC_WAKE (1 << 0)
755# define PIN0_JACK_DETECTION_ENABLE (1 << 4) 793# define PIN0_JACK_DETECTION_ENABLE (1 << 4)
@@ -1312,6 +1350,38 @@
1312# define DC_HPDx_RX_INT_TIMER(x) ((x) << 16) 1350# define DC_HPDx_RX_INT_TIMER(x) ((x) << 16)
1313# define DC_HPDx_EN (1 << 28) 1351# define DC_HPDx_EN (1 << 28)
1314 1352
1353/* DCE4/5/6 FMT blocks */
1354#define FMT_DYNAMIC_EXP_CNTL 0x6fb4
1355# define FMT_DYNAMIC_EXP_EN (1 << 0)
1356# define FMT_DYNAMIC_EXP_MODE (1 << 4)
1357 /* 0 = 10bit -> 12bit, 1 = 8bit -> 12bit */
1358#define FMT_CONTROL 0x6fb8
1359# define FMT_PIXEL_ENCODING (1 << 16)
1360 /* 0 = RGB 4:4:4 or YCbCr 4:4:4, 1 = YCbCr 4:2:2 */
1361#define FMT_BIT_DEPTH_CONTROL 0x6fc8
1362# define FMT_TRUNCATE_EN (1 << 0)
1363# define FMT_TRUNCATE_DEPTH (1 << 4)
1364# define FMT_SPATIAL_DITHER_EN (1 << 8)
1365# define FMT_SPATIAL_DITHER_MODE(x) ((x) << 9)
1366# define FMT_SPATIAL_DITHER_DEPTH (1 << 12)
1367# define FMT_FRAME_RANDOM_ENABLE (1 << 13)
1368# define FMT_RGB_RANDOM_ENABLE (1 << 14)
1369# define FMT_HIGHPASS_RANDOM_ENABLE (1 << 15)
1370# define FMT_TEMPORAL_DITHER_EN (1 << 16)
1371# define FMT_TEMPORAL_DITHER_DEPTH (1 << 20)
1372# define FMT_TEMPORAL_DITHER_OFFSET(x) ((x) << 21)
1373# define FMT_TEMPORAL_LEVEL (1 << 24)
1374# define FMT_TEMPORAL_DITHER_RESET (1 << 25)
1375# define FMT_25FRC_SEL(x) ((x) << 26)
1376# define FMT_50FRC_SEL(x) ((x) << 28)
1377# define FMT_75FRC_SEL(x) ((x) << 30)
1378#define FMT_CLAMP_CONTROL 0x6fe4
1379# define FMT_CLAMP_DATA_EN (1 << 0)
1380# define FMT_CLAMP_COLOR_FORMAT(x) ((x) << 16)
1381# define FMT_CLAMP_6BPC 0
1382# define FMT_CLAMP_8BPC 1
1383# define FMT_CLAMP_10BPC 2
1384
1315/* ASYNC DMA */ 1385/* ASYNC DMA */
1316#define DMA_RB_RPTR 0xd008 1386#define DMA_RB_RPTR 0xd008
1317#define DMA_RB_WPTR 0xd00c 1387#define DMA_RB_WPTR 0xd00c
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index cac2866d79da..11aab2ab54ce 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -174,11 +174,6 @@ extern void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
174extern void evergreen_program_aspm(struct radeon_device *rdev); 174extern void evergreen_program_aspm(struct radeon_device *rdev);
175extern void sumo_rlc_fini(struct radeon_device *rdev); 175extern void sumo_rlc_fini(struct radeon_device *rdev);
176extern int sumo_rlc_init(struct radeon_device *rdev); 176extern int sumo_rlc_init(struct radeon_device *rdev);
177extern void cayman_dma_vm_set_page(struct radeon_device *rdev,
178 struct radeon_ib *ib,
179 uint64_t pe,
180 uint64_t addr, unsigned count,
181 uint32_t incr, uint32_t flags);
182 177
183/* Firmware Names */ 178/* Firmware Names */
184MODULE_FIRMWARE("radeon/BARTS_pfp.bin"); 179MODULE_FIRMWARE("radeon/BARTS_pfp.bin");
@@ -2400,77 +2395,6 @@ void cayman_vm_decode_fault(struct radeon_device *rdev,
2400 block, mc_id); 2395 block, mc_id);
2401} 2396}
2402 2397
2403#define R600_ENTRY_VALID (1 << 0)
2404#define R600_PTE_SYSTEM (1 << 1)
2405#define R600_PTE_SNOOPED (1 << 2)
2406#define R600_PTE_READABLE (1 << 5)
2407#define R600_PTE_WRITEABLE (1 << 6)
2408
2409uint32_t cayman_vm_page_flags(struct radeon_device *rdev, uint32_t flags)
2410{
2411 uint32_t r600_flags = 0;
2412 r600_flags |= (flags & RADEON_VM_PAGE_VALID) ? R600_ENTRY_VALID : 0;
2413 r600_flags |= (flags & RADEON_VM_PAGE_READABLE) ? R600_PTE_READABLE : 0;
2414 r600_flags |= (flags & RADEON_VM_PAGE_WRITEABLE) ? R600_PTE_WRITEABLE : 0;
2415 if (flags & RADEON_VM_PAGE_SYSTEM) {
2416 r600_flags |= R600_PTE_SYSTEM;
2417 r600_flags |= (flags & RADEON_VM_PAGE_SNOOPED) ? R600_PTE_SNOOPED : 0;
2418 }
2419 return r600_flags;
2420}
2421
2422/**
2423 * cayman_vm_set_page - update the page tables using the CP
2424 *
2425 * @rdev: radeon_device pointer
2426 * @ib: indirect buffer to fill with commands
2427 * @pe: addr of the page entry
2428 * @addr: dst addr to write into pe
2429 * @count: number of page entries to update
2430 * @incr: increase next addr by incr bytes
2431 * @flags: access flags
2432 *
2433 * Update the page tables using the CP (cayman/TN).
2434 */
2435void cayman_vm_set_page(struct radeon_device *rdev,
2436 struct radeon_ib *ib,
2437 uint64_t pe,
2438 uint64_t addr, unsigned count,
2439 uint32_t incr, uint32_t flags)
2440{
2441 uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
2442 uint64_t value;
2443 unsigned ndw;
2444
2445 if (rdev->asic->vm.pt_ring_index == RADEON_RING_TYPE_GFX_INDEX) {
2446 while (count) {
2447 ndw = 1 + count * 2;
2448 if (ndw > 0x3FFF)
2449 ndw = 0x3FFF;
2450
2451 ib->ptr[ib->length_dw++] = PACKET3(PACKET3_ME_WRITE, ndw);
2452 ib->ptr[ib->length_dw++] = pe;
2453 ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
2454 for (; ndw > 1; ndw -= 2, --count, pe += 8) {
2455 if (flags & RADEON_VM_PAGE_SYSTEM) {
2456 value = radeon_vm_map_gart(rdev, addr);
2457 value &= 0xFFFFFFFFFFFFF000ULL;
2458 } else if (flags & RADEON_VM_PAGE_VALID) {
2459 value = addr;
2460 } else {
2461 value = 0;
2462 }
2463 addr += incr;
2464 value |= r600_flags;
2465 ib->ptr[ib->length_dw++] = value;
2466 ib->ptr[ib->length_dw++] = upper_32_bits(value);
2467 }
2468 }
2469 } else {
2470 cayman_dma_vm_set_page(rdev, ib, pe, addr, count, incr, flags);
2471 }
2472}
2473
2474/** 2398/**
2475 * cayman_vm_flush - vm flush using the CP 2399 * cayman_vm_flush - vm flush using the CP
2476 * 2400 *
diff --git a/drivers/gpu/drm/radeon/ni_dma.c b/drivers/gpu/drm/radeon/ni_dma.c
index dd6e9688fbef..bdeb65ed3658 100644
--- a/drivers/gpu/drm/radeon/ni_dma.c
+++ b/drivers/gpu/drm/radeon/ni_dma.c
@@ -24,6 +24,7 @@
24#include <drm/drmP.h> 24#include <drm/drmP.h>
25#include "radeon.h" 25#include "radeon.h"
26#include "radeon_asic.h" 26#include "radeon_asic.h"
27#include "radeon_trace.h"
27#include "nid.h" 28#include "nid.h"
28 29
29u32 cayman_gpu_check_soft_reset(struct radeon_device *rdev); 30u32 cayman_gpu_check_soft_reset(struct radeon_device *rdev);
@@ -245,8 +246,7 @@ bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
245 * @addr: dst addr to write into pe 246 * @addr: dst addr to write into pe
246 * @count: number of page entries to update 247 * @count: number of page entries to update
247 * @incr: increase next addr by incr bytes 248 * @incr: increase next addr by incr bytes
248 * @flags: access flags 249 * @flags: hw access flags
249 * @r600_flags: hw access flags
250 * 250 *
251 * Update the page tables using the DMA (cayman/TN). 251 * Update the page tables using the DMA (cayman/TN).
252 */ 252 */
@@ -256,11 +256,12 @@ void cayman_dma_vm_set_page(struct radeon_device *rdev,
256 uint64_t addr, unsigned count, 256 uint64_t addr, unsigned count,
257 uint32_t incr, uint32_t flags) 257 uint32_t incr, uint32_t flags)
258{ 258{
259 uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
260 uint64_t value; 259 uint64_t value;
261 unsigned ndw; 260 unsigned ndw;
262 261
263 if ((flags & RADEON_VM_PAGE_SYSTEM) || (count == 1)) { 262 trace_radeon_vm_set_page(pe, addr, count, incr, flags);
263
264 if ((flags & R600_PTE_SYSTEM) || (count == 1)) {
264 while (count) { 265 while (count) {
265 ndw = count * 2; 266 ndw = count * 2;
266 if (ndw > 0xFFFFE) 267 if (ndw > 0xFFFFE)
@@ -271,16 +272,16 @@ void cayman_dma_vm_set_page(struct radeon_device *rdev,
271 ib->ptr[ib->length_dw++] = pe; 272 ib->ptr[ib->length_dw++] = pe;
272 ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff; 273 ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
273 for (; ndw > 0; ndw -= 2, --count, pe += 8) { 274 for (; ndw > 0; ndw -= 2, --count, pe += 8) {
274 if (flags & RADEON_VM_PAGE_SYSTEM) { 275 if (flags & R600_PTE_SYSTEM) {
275 value = radeon_vm_map_gart(rdev, addr); 276 value = radeon_vm_map_gart(rdev, addr);
276 value &= 0xFFFFFFFFFFFFF000ULL; 277 value &= 0xFFFFFFFFFFFFF000ULL;
277 } else if (flags & RADEON_VM_PAGE_VALID) { 278 } else if (flags & R600_PTE_VALID) {
278 value = addr; 279 value = addr;
279 } else { 280 } else {
280 value = 0; 281 value = 0;
281 } 282 }
282 addr += incr; 283 addr += incr;
283 value |= r600_flags; 284 value |= flags;
284 ib->ptr[ib->length_dw++] = value; 285 ib->ptr[ib->length_dw++] = value;
285 ib->ptr[ib->length_dw++] = upper_32_bits(value); 286 ib->ptr[ib->length_dw++] = upper_32_bits(value);
286 } 287 }
@@ -291,7 +292,7 @@ void cayman_dma_vm_set_page(struct radeon_device *rdev,
291 if (ndw > 0xFFFFE) 292 if (ndw > 0xFFFFE)
292 ndw = 0xFFFFE; 293 ndw = 0xFFFFE;
293 294
294 if (flags & RADEON_VM_PAGE_VALID) 295 if (flags & R600_PTE_VALID)
295 value = addr; 296 value = addr;
296 else 297 else
297 value = 0; 298 value = 0;
@@ -299,7 +300,7 @@ void cayman_dma_vm_set_page(struct radeon_device *rdev,
299 ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw); 300 ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
300 ib->ptr[ib->length_dw++] = pe; /* dst addr */ 301 ib->ptr[ib->length_dw++] = pe; /* dst addr */
301 ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff; 302 ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
302 ib->ptr[ib->length_dw++] = r600_flags; /* mask */ 303 ib->ptr[ib->length_dw++] = flags; /* mask */
303 ib->ptr[ib->length_dw++] = 0; 304 ib->ptr[ib->length_dw++] = 0;
304 ib->ptr[ib->length_dw++] = value; /* value */ 305 ib->ptr[ib->length_dw++] = value; /* value */
305 ib->ptr[ib->length_dw++] = upper_32_bits(value); 306 ib->ptr[ib->length_dw++] = upper_32_bits(value);
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index d71333033b2b..784983d78158 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -1434,7 +1434,7 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
1434 obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC); 1434 obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
1435 if (!obj) { 1435 if (!obj) {
1436 DRM_ERROR("cannot find crtc %d\n", crtc_id); 1436 DRM_ERROR("cannot find crtc %d\n", crtc_id);
1437 return -EINVAL; 1437 return -ENOENT;
1438 } 1438 }
1439 crtc = obj_to_crtc(obj); 1439 crtc = obj_to_crtc(obj);
1440 radeon_crtc = to_radeon_crtc(crtc); 1440 radeon_crtc = to_radeon_crtc(crtc);
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index f9be22062df1..4e609e8a8d2b 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -124,6 +124,59 @@ int r600_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
124 return 0; 124 return 0;
125} 125}
126 126
127void dce3_program_fmt(struct drm_encoder *encoder)
128{
129 struct drm_device *dev = encoder->dev;
130 struct radeon_device *rdev = dev->dev_private;
131 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
132 struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
133 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
134 int bpc = 0;
135 u32 tmp = 0;
136 enum radeon_connector_dither dither = RADEON_FMT_DITHER_DISABLE;
137
138 if (connector) {
139 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
140 bpc = radeon_get_monitor_bpc(connector);
141 dither = radeon_connector->dither;
142 }
143
144 /* LVDS FMT is set up by atom */
145 if (radeon_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
146 return;
147
148 /* not needed for analog */
149 if ((radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1) ||
150 (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2))
151 return;
152
153 if (bpc == 0)
154 return;
155
156 switch (bpc) {
157 case 6:
158 if (dither == RADEON_FMT_DITHER_ENABLE)
159 /* XXX sort out optimal dither settings */
160 tmp |= FMT_SPATIAL_DITHER_EN;
161 else
162 tmp |= FMT_TRUNCATE_EN;
163 break;
164 case 8:
165 if (dither == RADEON_FMT_DITHER_ENABLE)
166 /* XXX sort out optimal dither settings */
167 tmp |= (FMT_SPATIAL_DITHER_EN | FMT_SPATIAL_DITHER_DEPTH);
168 else
169 tmp |= (FMT_TRUNCATE_EN | FMT_TRUNCATE_DEPTH);
170 break;
171 case 10:
172 default:
173 /* not needed */
174 break;
175 }
176
177 WREG32(FMT_BIT_DEPTH_CONTROL + radeon_crtc->crtc_offset, tmp);
178}
179
127/* get temperature in millidegrees */ 180/* get temperature in millidegrees */
128int rv6xx_get_temp(struct radeon_device *rdev) 181int rv6xx_get_temp(struct radeon_device *rdev)
129{ 182{
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index 01a3ec83f284..5dceea6f71ae 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -887,7 +887,7 @@ int r600_cs_common_vline_parse(struct radeon_cs_parser *p,
887 obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC); 887 obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
888 if (!obj) { 888 if (!obj) {
889 DRM_ERROR("cannot find crtc %d\n", crtc_id); 889 DRM_ERROR("cannot find crtc %d\n", crtc_id);
890 return -EINVAL; 890 return -ENOENT;
891 } 891 }
892 crtc = obj_to_crtc(obj); 892 crtc = obj_to_crtc(obj);
893 radeon_crtc = to_radeon_crtc(crtc); 893 radeon_crtc = to_radeon_crtc(crtc);
@@ -2328,13 +2328,8 @@ static void r600_cs_parser_fini(struct radeon_cs_parser *parser, int error)
2328 unsigned i; 2328 unsigned i;
2329 2329
2330 kfree(parser->relocs); 2330 kfree(parser->relocs);
2331 for (i = 0; i < parser->nchunks; i++) { 2331 for (i = 0; i < parser->nchunks; i++)
2332 kfree(parser->chunks[i].kdata); 2332 drm_free_large(parser->chunks[i].kdata);
2333 if (parser->rdev && (parser->rdev->flags & RADEON_IS_AGP)) {
2334 kfree(parser->chunks[i].kpage[0]);
2335 kfree(parser->chunks[i].kpage[1]);
2336 }
2337 }
2338 kfree(parser->chunks); 2333 kfree(parser->chunks);
2339 kfree(parser->chunks_array); 2334 kfree(parser->chunks_array);
2340} 2335}
@@ -2391,13 +2386,12 @@ int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
2391 ib_chunk = &parser.chunks[parser.chunk_ib_idx]; 2386 ib_chunk = &parser.chunks[parser.chunk_ib_idx];
2392 parser.ib.length_dw = ib_chunk->length_dw; 2387 parser.ib.length_dw = ib_chunk->length_dw;
2393 *l = parser.ib.length_dw; 2388 *l = parser.ib.length_dw;
2394 r = r600_cs_parse(&parser); 2389 if (DRM_COPY_FROM_USER(ib, ib_chunk->user_ptr, ib_chunk->length_dw * 4)) {
2395 if (r) { 2390 r = -EFAULT;
2396 DRM_ERROR("Invalid command stream !\n");
2397 r600_cs_parser_fini(&parser, r); 2391 r600_cs_parser_fini(&parser, r);
2398 return r; 2392 return r;
2399 } 2393 }
2400 r = radeon_cs_finish_pages(&parser); 2394 r = r600_cs_parse(&parser);
2401 if (r) { 2395 if (r) {
2402 DRM_ERROR("Invalid command stream !\n"); 2396 DRM_ERROR("Invalid command stream !\n");
2403 r600_cs_parser_fini(&parser, r); 2397 r600_cs_parser_fini(&parser, r);
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index 06022e3b9c3b..4b89262f3f0e 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -24,6 +24,7 @@
24 * Authors: Christian König 24 * Authors: Christian König
25 */ 25 */
26#include <linux/hdmi.h> 26#include <linux/hdmi.h>
27#include <linux/gcd.h>
27#include <drm/drmP.h> 28#include <drm/drmP.h>
28#include <drm/radeon_drm.h> 29#include <drm/radeon_drm.h>
29#include "radeon.h" 30#include "radeon.h"
@@ -57,35 +58,57 @@ enum r600_hdmi_iec_status_bits {
57static const struct radeon_hdmi_acr r600_hdmi_predefined_acr[] = { 58static const struct radeon_hdmi_acr r600_hdmi_predefined_acr[] = {
58 /* 32kHz 44.1kHz 48kHz */ 59 /* 32kHz 44.1kHz 48kHz */
59 /* Clock N CTS N CTS N CTS */ 60 /* Clock N CTS N CTS N CTS */
60 { 25175, 4576, 28125, 7007, 31250, 6864, 28125 }, /* 25,20/1.001 MHz */ 61 { 25175, 4096, 25175, 28224, 125875, 6144, 25175 }, /* 25,20/1.001 MHz */
61 { 25200, 4096, 25200, 6272, 28000, 6144, 25200 }, /* 25.20 MHz */ 62 { 25200, 4096, 25200, 6272, 28000, 6144, 25200 }, /* 25.20 MHz */
62 { 27000, 4096, 27000, 6272, 30000, 6144, 27000 }, /* 27.00 MHz */ 63 { 27000, 4096, 27000, 6272, 30000, 6144, 27000 }, /* 27.00 MHz */
63 { 27027, 4096, 27027, 6272, 30030, 6144, 27027 }, /* 27.00*1.001 MHz */ 64 { 27027, 4096, 27027, 6272, 30030, 6144, 27027 }, /* 27.00*1.001 MHz */
64 { 54000, 4096, 54000, 6272, 60000, 6144, 54000 }, /* 54.00 MHz */ 65 { 54000, 4096, 54000, 6272, 60000, 6144, 54000 }, /* 54.00 MHz */
65 { 54054, 4096, 54054, 6272, 60060, 6144, 54054 }, /* 54.00*1.001 MHz */ 66 { 54054, 4096, 54054, 6272, 60060, 6144, 54054 }, /* 54.00*1.001 MHz */
66 { 74176, 11648, 210937, 17836, 234375, 11648, 140625 }, /* 74.25/1.001 MHz */ 67 { 74176, 4096, 74176, 5733, 75335, 6144, 74176 }, /* 74.25/1.001 MHz */
67 { 74250, 4096, 74250, 6272, 82500, 6144, 74250 }, /* 74.25 MHz */ 68 { 74250, 4096, 74250, 6272, 82500, 6144, 74250 }, /* 74.25 MHz */
68 { 148352, 11648, 421875, 8918, 234375, 5824, 140625 }, /* 148.50/1.001 MHz */ 69 { 148352, 4096, 148352, 5733, 150670, 6144, 148352 }, /* 148.50/1.001 MHz */
69 { 148500, 4096, 148500, 6272, 165000, 6144, 148500 }, /* 148.50 MHz */ 70 { 148500, 4096, 148500, 6272, 165000, 6144, 148500 }, /* 148.50 MHz */
70 { 0, 4096, 0, 6272, 0, 6144, 0 } /* Other */
71}; 71};
72 72
73
73/* 74/*
74 * calculate CTS value if it's not found in the table 75 * calculate CTS and N values if they are not found in the table
75 */ 76 */
76static void r600_hdmi_calc_cts(uint32_t clock, int *CTS, int N, int freq) 77static void r600_hdmi_calc_cts(uint32_t clock, int *CTS, int *N, int freq)
77{ 78{
78 u64 n; 79 int n, cts;
79 u32 d; 80 unsigned long div, mul;
80 81
81 if (*CTS == 0) { 82 /* Safe, but overly large values */
82 n = (u64)clock * (u64)N * 1000ULL; 83 n = 128 * freq;
83 d = 128 * freq; 84 cts = clock * 1000;
84 do_div(n, d); 85
85 *CTS = n; 86 /* Smallest valid fraction */
86 } 87 div = gcd(n, cts);
87 DRM_DEBUG("Using ACR timing N=%d CTS=%d for frequency %d\n", 88
88 N, *CTS, freq); 89 n /= div;
90 cts /= div;
91
92 /*
93 * The optimal N is 128*freq/1000. Calculate the closest larger
94 * value that doesn't truncate any bits.
95 */
96 mul = ((128*freq/1000) + (n-1))/n;
97
98 n *= mul;
99 cts *= mul;
100
101 /* Check that we are in spec (not always possible) */
102 if (n < (128*freq/1500))
103 printk(KERN_WARNING "Calculated ACR N value is too small. You may experience audio problems.\n");
104 if (n > (128*freq/300))
105 printk(KERN_WARNING "Calculated ACR N value is too large. You may experience audio problems.\n");
106
107 *N = n;
108 *CTS = cts;
109
110 DRM_DEBUG("Calculated ACR timing N=%d CTS=%d for frequency %d\n",
111 *N, *CTS, freq);
89} 112}
90 113
91struct radeon_hdmi_acr r600_hdmi_acr(uint32_t clock) 114struct radeon_hdmi_acr r600_hdmi_acr(uint32_t clock)
@@ -93,15 +116,16 @@ struct radeon_hdmi_acr r600_hdmi_acr(uint32_t clock)
93 struct radeon_hdmi_acr res; 116 struct radeon_hdmi_acr res;
94 u8 i; 117 u8 i;
95 118
96 for (i = 0; r600_hdmi_predefined_acr[i].clock != clock && 119 /* Precalculated values for common clocks */
97 r600_hdmi_predefined_acr[i].clock != 0; i++) 120 for (i = 0; i < ARRAY_SIZE(r600_hdmi_predefined_acr); i++) {
98 ; 121 if (r600_hdmi_predefined_acr[i].clock == clock)
99 res = r600_hdmi_predefined_acr[i]; 122 return r600_hdmi_predefined_acr[i];
123 }
100 124
101 /* In case some CTS are missing */ 125 /* And odd clocks get manually calculated */
102 r600_hdmi_calc_cts(clock, &res.cts_32khz, res.n_32khz, 32000); 126 r600_hdmi_calc_cts(clock, &res.cts_32khz, &res.n_32khz, 32000);
103 r600_hdmi_calc_cts(clock, &res.cts_44_1khz, res.n_44_1khz, 44100); 127 r600_hdmi_calc_cts(clock, &res.cts_44_1khz, &res.n_44_1khz, 44100);
104 r600_hdmi_calc_cts(clock, &res.cts_48khz, res.n_48khz, 48000); 128 r600_hdmi_calc_cts(clock, &res.cts_48khz, &res.n_48khz, 48000);
105 129
106 return res; 130 return res;
107} 131}
@@ -313,8 +337,10 @@ static void dce3_2_afmt_write_speaker_allocation(struct drm_encoder *encoder)
313 return; 337 return;
314 338
315 list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { 339 list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
316 if (connector->encoder == encoder) 340 if (connector->encoder == encoder) {
317 radeon_connector = to_radeon_connector(connector); 341 radeon_connector = to_radeon_connector(connector);
342 break;
343 }
318 } 344 }
319 345
320 if (!radeon_connector) { 346 if (!radeon_connector) {
@@ -366,8 +392,10 @@ static void dce3_2_afmt_write_sad_regs(struct drm_encoder *encoder)
366 }; 392 };
367 393
368 list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { 394 list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
369 if (connector->encoder == encoder) 395 if (connector->encoder == encoder) {
370 radeon_connector = to_radeon_connector(connector); 396 radeon_connector = to_radeon_connector(connector);
397 break;
398 }
371 } 399 }
372 400
373 if (!radeon_connector) { 401 if (!radeon_connector) {
@@ -384,20 +412,30 @@ static void dce3_2_afmt_write_sad_regs(struct drm_encoder *encoder)
384 412
385 for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) { 413 for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
386 u32 value = 0; 414 u32 value = 0;
415 u8 stereo_freqs = 0;
416 int max_channels = -1;
387 int j; 417 int j;
388 418
389 for (j = 0; j < sad_count; j++) { 419 for (j = 0; j < sad_count; j++) {
390 struct cea_sad *sad = &sads[j]; 420 struct cea_sad *sad = &sads[j];
391 421
392 if (sad->format == eld_reg_to_type[i][1]) { 422 if (sad->format == eld_reg_to_type[i][1]) {
393 value = MAX_CHANNELS(sad->channels) | 423 if (sad->channels > max_channels) {
394 DESCRIPTOR_BYTE_2(sad->byte2) | 424 value = MAX_CHANNELS(sad->channels) |
395 SUPPORTED_FREQUENCIES(sad->freq); 425 DESCRIPTOR_BYTE_2(sad->byte2) |
426 SUPPORTED_FREQUENCIES(sad->freq);
427 max_channels = sad->channels;
428 }
429
396 if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM) 430 if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM)
397 value |= SUPPORTED_FREQUENCIES_STEREO(sad->freq); 431 stereo_freqs |= sad->freq;
398 break; 432 else
433 break;
399 } 434 }
400 } 435 }
436
437 value |= SUPPORTED_FREQUENCIES_STEREO(stereo_freqs);
438
401 WREG32(eld_reg_to_type[i][0], value); 439 WREG32(eld_reg_to_type[i][0], value);
402 } 440 }
403 441
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index 7b3c7b5932c5..ebe38724a976 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -1199,6 +1199,34 @@
1199# define AFMT_AZ_FORMAT_WTRIG_ACK (1 << 29) 1199# define AFMT_AZ_FORMAT_WTRIG_ACK (1 << 29)
1200# define AFMT_AZ_AUDIO_ENABLE_CHG_ACK (1 << 30) 1200# define AFMT_AZ_AUDIO_ENABLE_CHG_ACK (1 << 30)
1201 1201
1202/* DCE3 FMT blocks */
1203#define FMT_CONTROL 0x6700
1204# define FMT_PIXEL_ENCODING (1 << 16)
1205 /* 0 = RGB 4:4:4 or YCbCr 4:4:4, 1 = YCbCr 4:2:2 */
1206#define FMT_BIT_DEPTH_CONTROL 0x6710
1207# define FMT_TRUNCATE_EN (1 << 0)
1208# define FMT_TRUNCATE_DEPTH (1 << 4)
1209# define FMT_SPATIAL_DITHER_EN (1 << 8)
1210# define FMT_SPATIAL_DITHER_MODE(x) ((x) << 9)
1211# define FMT_SPATIAL_DITHER_DEPTH (1 << 12)
1212# define FMT_FRAME_RANDOM_ENABLE (1 << 13)
1213# define FMT_RGB_RANDOM_ENABLE (1 << 14)
1214# define FMT_HIGHPASS_RANDOM_ENABLE (1 << 15)
1215# define FMT_TEMPORAL_DITHER_EN (1 << 16)
1216# define FMT_TEMPORAL_DITHER_DEPTH (1 << 20)
1217# define FMT_TEMPORAL_DITHER_OFFSET(x) ((x) << 21)
1218# define FMT_TEMPORAL_LEVEL (1 << 24)
1219# define FMT_TEMPORAL_DITHER_RESET (1 << 25)
1220# define FMT_25FRC_SEL(x) ((x) << 26)
1221# define FMT_50FRC_SEL(x) ((x) << 28)
1222# define FMT_75FRC_SEL(x) ((x) << 30)
1223#define FMT_CLAMP_CONTROL 0x672c
1224# define FMT_CLAMP_DATA_EN (1 << 0)
1225# define FMT_CLAMP_COLOR_FORMAT(x) ((x) << 16)
1226# define FMT_CLAMP_6BPC 0
1227# define FMT_CLAMP_8BPC 1
1228# define FMT_CLAMP_10BPC 2
1229
1202/* Power management */ 1230/* Power management */
1203#define CG_SPLL_FUNC_CNTL 0x600 1231#define CG_SPLL_FUNC_CNTL 0x600
1204# define SPLL_RESET (1 << 0) 1232# define SPLL_RESET (1 << 0)
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 24f4960f59ee..b9ee99258602 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -98,6 +98,7 @@ extern int radeon_lockup_timeout;
98extern int radeon_fastfb; 98extern int radeon_fastfb;
99extern int radeon_dpm; 99extern int radeon_dpm;
100extern int radeon_aspm; 100extern int radeon_aspm;
101extern int radeon_runtime_pm;
101 102
102/* 103/*
103 * Copy from radeon_drv.h so we don't have to include both and have conflicting 104 * Copy from radeon_drv.h so we don't have to include both and have conflicting
@@ -327,7 +328,6 @@ struct radeon_fence_driver {
327 /* sync_seq is protected by ring emission lock */ 328 /* sync_seq is protected by ring emission lock */
328 uint64_t sync_seq[RADEON_NUM_RINGS]; 329 uint64_t sync_seq[RADEON_NUM_RINGS];
329 atomic64_t last_seq; 330 atomic64_t last_seq;
330 unsigned long last_activity;
331 bool initialized; 331 bool initialized;
332}; 332};
333 333
@@ -832,6 +832,12 @@ struct radeon_mec {
832#define RADEON_VM_PTB_ALIGN_MASK (RADEON_VM_PTB_ALIGN_SIZE - 1) 832#define RADEON_VM_PTB_ALIGN_MASK (RADEON_VM_PTB_ALIGN_SIZE - 1)
833#define RADEON_VM_PTB_ALIGN(a) (((a) + RADEON_VM_PTB_ALIGN_MASK) & ~RADEON_VM_PTB_ALIGN_MASK) 833#define RADEON_VM_PTB_ALIGN(a) (((a) + RADEON_VM_PTB_ALIGN_MASK) & ~RADEON_VM_PTB_ALIGN_MASK)
834 834
835#define R600_PTE_VALID (1 << 0)
836#define R600_PTE_SYSTEM (1 << 1)
837#define R600_PTE_SNOOPED (1 << 2)
838#define R600_PTE_READABLE (1 << 5)
839#define R600_PTE_WRITEABLE (1 << 6)
840
835struct radeon_vm { 841struct radeon_vm {
836 struct list_head list; 842 struct list_head list;
837 struct list_head va; 843 struct list_head va;
@@ -967,12 +973,8 @@ struct radeon_cs_reloc {
967struct radeon_cs_chunk { 973struct radeon_cs_chunk {
968 uint32_t chunk_id; 974 uint32_t chunk_id;
969 uint32_t length_dw; 975 uint32_t length_dw;
970 int kpage_idx[2];
971 uint32_t *kpage[2];
972 uint32_t *kdata; 976 uint32_t *kdata;
973 void __user *user_ptr; 977 void __user *user_ptr;
974 int last_copied_page;
975 int last_page_index;
976}; 978};
977 979
978struct radeon_cs_parser { 980struct radeon_cs_parser {
@@ -1007,8 +1009,15 @@ struct radeon_cs_parser {
1007 struct ww_acquire_ctx ticket; 1009 struct ww_acquire_ctx ticket;
1008}; 1010};
1009 1011
1010extern int radeon_cs_finish_pages(struct radeon_cs_parser *p); 1012static inline u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
1011extern u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx); 1013{
1014 struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
1015
1016 if (ibc->kdata)
1017 return ibc->kdata[idx];
1018 return p->ib.ptr[idx];
1019}
1020
1012 1021
1013struct radeon_cs_packet { 1022struct radeon_cs_packet {
1014 unsigned idx; 1023 unsigned idx;
@@ -1675,8 +1684,6 @@ struct radeon_asic {
1675 struct { 1684 struct {
1676 int (*init)(struct radeon_device *rdev); 1685 int (*init)(struct radeon_device *rdev);
1677 void (*fini)(struct radeon_device *rdev); 1686 void (*fini)(struct radeon_device *rdev);
1678
1679 u32 pt_ring_index;
1680 void (*set_page)(struct radeon_device *rdev, 1687 void (*set_page)(struct radeon_device *rdev,
1681 struct radeon_ib *ib, 1688 struct radeon_ib *ib,
1682 uint64_t pe, 1689 uint64_t pe,
@@ -2170,6 +2177,7 @@ struct radeon_device {
2170 bool need_dma32; 2177 bool need_dma32;
2171 bool accel_working; 2178 bool accel_working;
2172 bool fastfb_working; /* IGP feature*/ 2179 bool fastfb_working; /* IGP feature*/
2180 bool needs_reset;
2173 struct radeon_surface_reg surface_regs[RADEON_GEM_MAX_SURFACES]; 2181 struct radeon_surface_reg surface_regs[RADEON_GEM_MAX_SURFACES];
2174 const struct firmware *me_fw; /* all family ME firmware */ 2182 const struct firmware *me_fw; /* all family ME firmware */
2175 const struct firmware *pfp_fw; /* r6/700 PFP firmware */ 2183 const struct firmware *pfp_fw; /* r6/700 PFP firmware */
@@ -2212,6 +2220,9 @@ struct radeon_device {
2212 /* clock, powergating flags */ 2220 /* clock, powergating flags */
2213 u32 cg_flags; 2221 u32 cg_flags;
2214 u32 pg_flags; 2222 u32 pg_flags;
2223
2224 struct dev_pm_domain vga_pm_domain;
2225 bool have_disp_power_ref;
2215}; 2226};
2216 2227
2217int radeon_device_init(struct radeon_device *rdev, 2228int radeon_device_init(struct radeon_device *rdev,
@@ -2673,8 +2684,8 @@ extern void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain);
2673extern bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo); 2684extern bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo);
2674extern void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base); 2685extern void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base);
2675extern void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc); 2686extern void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
2676extern int radeon_resume_kms(struct drm_device *dev); 2687extern int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon);
2677extern int radeon_suspend_kms(struct drm_device *dev, pm_message_t state); 2688extern int radeon_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon);
2678extern void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size); 2689extern void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size);
2679extern void radeon_program_register_sequence(struct radeon_device *rdev, 2690extern void radeon_program_register_sequence(struct radeon_device *rdev,
2680 const u32 *registers, 2691 const u32 *registers,
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index 8f7e04538fd6..50853c0cb49d 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -1622,8 +1622,7 @@ static struct radeon_asic cayman_asic = {
1622 .vm = { 1622 .vm = {
1623 .init = &cayman_vm_init, 1623 .init = &cayman_vm_init,
1624 .fini = &cayman_vm_fini, 1624 .fini = &cayman_vm_fini,
1625 .pt_ring_index = R600_RING_TYPE_DMA_INDEX, 1625 .set_page = &cayman_dma_vm_set_page,
1626 .set_page = &cayman_vm_set_page,
1627 }, 1626 },
1628 .ring = { 1627 .ring = {
1629 [RADEON_RING_TYPE_GFX_INDEX] = &cayman_gfx_ring, 1628 [RADEON_RING_TYPE_GFX_INDEX] = &cayman_gfx_ring,
@@ -1723,8 +1722,7 @@ static struct radeon_asic trinity_asic = {
1723 .vm = { 1722 .vm = {
1724 .init = &cayman_vm_init, 1723 .init = &cayman_vm_init,
1725 .fini = &cayman_vm_fini, 1724 .fini = &cayman_vm_fini,
1726 .pt_ring_index = R600_RING_TYPE_DMA_INDEX, 1725 .set_page = &cayman_dma_vm_set_page,
1727 .set_page = &cayman_vm_set_page,
1728 }, 1726 },
1729 .ring = { 1727 .ring = {
1730 [RADEON_RING_TYPE_GFX_INDEX] = &cayman_gfx_ring, 1728 [RADEON_RING_TYPE_GFX_INDEX] = &cayman_gfx_ring,
@@ -1854,8 +1852,7 @@ static struct radeon_asic si_asic = {
1854 .vm = { 1852 .vm = {
1855 .init = &si_vm_init, 1853 .init = &si_vm_init,
1856 .fini = &si_vm_fini, 1854 .fini = &si_vm_fini,
1857 .pt_ring_index = R600_RING_TYPE_DMA_INDEX, 1855 .set_page = &si_dma_vm_set_page,
1858 .set_page = &si_vm_set_page,
1859 }, 1856 },
1860 .ring = { 1857 .ring = {
1861 [RADEON_RING_TYPE_GFX_INDEX] = &si_gfx_ring, 1858 [RADEON_RING_TYPE_GFX_INDEX] = &si_gfx_ring,
@@ -1879,7 +1876,7 @@ static struct radeon_asic si_asic = {
1879 .hdmi_setmode = &evergreen_hdmi_setmode, 1876 .hdmi_setmode = &evergreen_hdmi_setmode,
1880 }, 1877 },
1881 .copy = { 1878 .copy = {
1882 .blit = NULL, 1879 .blit = &r600_copy_cpdma,
1883 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, 1880 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
1884 .dma = &si_copy_dma, 1881 .dma = &si_copy_dma,
1885 .dma_ring_index = R600_RING_TYPE_DMA_INDEX, 1882 .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
@@ -2000,8 +1997,7 @@ static struct radeon_asic ci_asic = {
2000 .vm = { 1997 .vm = {
2001 .init = &cik_vm_init, 1998 .init = &cik_vm_init,
2002 .fini = &cik_vm_fini, 1999 .fini = &cik_vm_fini,
2003 .pt_ring_index = R600_RING_TYPE_DMA_INDEX, 2000 .set_page = &cik_sdma_vm_set_page,
2004 .set_page = &cik_vm_set_page,
2005 }, 2001 },
2006 .ring = { 2002 .ring = {
2007 [RADEON_RING_TYPE_GFX_INDEX] = &ci_gfx_ring, 2003 [RADEON_RING_TYPE_GFX_INDEX] = &ci_gfx_ring,
@@ -2100,8 +2096,7 @@ static struct radeon_asic kv_asic = {
2100 .vm = { 2096 .vm = {
2101 .init = &cik_vm_init, 2097 .init = &cik_vm_init,
2102 .fini = &cik_vm_fini, 2098 .fini = &cik_vm_fini,
2103 .pt_ring_index = R600_RING_TYPE_DMA_INDEX, 2099 .set_page = &cik_sdma_vm_set_page,
2104 .set_page = &cik_vm_set_page,
2105 }, 2100 },
2106 .ring = { 2101 .ring = {
2107 [RADEON_RING_TYPE_GFX_INDEX] = &ci_gfx_ring, 2102 [RADEON_RING_TYPE_GFX_INDEX] = &ci_gfx_ring,
@@ -2442,27 +2437,48 @@ int radeon_asic_init(struct radeon_device *rdev)
2442 } 2437 }
2443 break; 2438 break;
2444 case CHIP_BONAIRE: 2439 case CHIP_BONAIRE:
2440 case CHIP_HAWAII:
2445 rdev->asic = &ci_asic; 2441 rdev->asic = &ci_asic;
2446 rdev->num_crtc = 6; 2442 rdev->num_crtc = 6;
2447 rdev->has_uvd = true; 2443 rdev->has_uvd = true;
2448 rdev->cg_flags = 2444 if (rdev->family == CHIP_BONAIRE) {
2449 RADEON_CG_SUPPORT_GFX_MGCG | 2445 rdev->cg_flags =
2450 RADEON_CG_SUPPORT_GFX_MGLS | 2446 RADEON_CG_SUPPORT_GFX_MGCG |
2451 /*RADEON_CG_SUPPORT_GFX_CGCG |*/ 2447 RADEON_CG_SUPPORT_GFX_MGLS |
2452 RADEON_CG_SUPPORT_GFX_CGLS | 2448 /*RADEON_CG_SUPPORT_GFX_CGCG |*/
2453 RADEON_CG_SUPPORT_GFX_CGTS | 2449 RADEON_CG_SUPPORT_GFX_CGLS |
2454 RADEON_CG_SUPPORT_GFX_CGTS_LS | 2450 RADEON_CG_SUPPORT_GFX_CGTS |
2455 RADEON_CG_SUPPORT_GFX_CP_LS | 2451 RADEON_CG_SUPPORT_GFX_CGTS_LS |
2456 RADEON_CG_SUPPORT_MC_LS | 2452 RADEON_CG_SUPPORT_GFX_CP_LS |
2457 RADEON_CG_SUPPORT_MC_MGCG | 2453 RADEON_CG_SUPPORT_MC_LS |
2458 RADEON_CG_SUPPORT_SDMA_MGCG | 2454 RADEON_CG_SUPPORT_MC_MGCG |
2459 RADEON_CG_SUPPORT_SDMA_LS | 2455 RADEON_CG_SUPPORT_SDMA_MGCG |
2460 RADEON_CG_SUPPORT_BIF_LS | 2456 RADEON_CG_SUPPORT_SDMA_LS |
2461 RADEON_CG_SUPPORT_VCE_MGCG | 2457 RADEON_CG_SUPPORT_BIF_LS |
2462 RADEON_CG_SUPPORT_UVD_MGCG | 2458 RADEON_CG_SUPPORT_VCE_MGCG |
2463 RADEON_CG_SUPPORT_HDP_LS | 2459 RADEON_CG_SUPPORT_UVD_MGCG |
2464 RADEON_CG_SUPPORT_HDP_MGCG; 2460 RADEON_CG_SUPPORT_HDP_LS |
2465 rdev->pg_flags = 0; 2461 RADEON_CG_SUPPORT_HDP_MGCG;
2462 rdev->pg_flags = 0;
2463 } else {
2464 rdev->cg_flags =
2465 RADEON_CG_SUPPORT_GFX_MGCG |
2466 RADEON_CG_SUPPORT_GFX_MGLS |
2467 /*RADEON_CG_SUPPORT_GFX_CGCG |*/
2468 RADEON_CG_SUPPORT_GFX_CGLS |
2469 RADEON_CG_SUPPORT_GFX_CGTS |
2470 RADEON_CG_SUPPORT_GFX_CP_LS |
2471 RADEON_CG_SUPPORT_MC_LS |
2472 RADEON_CG_SUPPORT_MC_MGCG |
2473 RADEON_CG_SUPPORT_SDMA_MGCG |
2474 RADEON_CG_SUPPORT_SDMA_LS |
2475 RADEON_CG_SUPPORT_BIF_LS |
2476 RADEON_CG_SUPPORT_VCE_MGCG |
2477 RADEON_CG_SUPPORT_UVD_MGCG |
2478 RADEON_CG_SUPPORT_HDP_LS |
2479 RADEON_CG_SUPPORT_HDP_MGCG;
2480 rdev->pg_flags = 0;
2481 }
2466 break; 2482 break;
2467 case CHIP_KAVERI: 2483 case CHIP_KAVERI:
2468 case CHIP_KABINI: 2484 case CHIP_KABINI:
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 70c29d5e080d..f2833ee3a613 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -581,17 +581,18 @@ int cayman_vm_init(struct radeon_device *rdev);
581void cayman_vm_fini(struct radeon_device *rdev); 581void cayman_vm_fini(struct radeon_device *rdev);
582void cayman_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm); 582void cayman_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
583uint32_t cayman_vm_page_flags(struct radeon_device *rdev, uint32_t flags); 583uint32_t cayman_vm_page_flags(struct radeon_device *rdev, uint32_t flags);
584void cayman_vm_set_page(struct radeon_device *rdev,
585 struct radeon_ib *ib,
586 uint64_t pe,
587 uint64_t addr, unsigned count,
588 uint32_t incr, uint32_t flags);
589int evergreen_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib); 584int evergreen_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
590int evergreen_dma_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib); 585int evergreen_dma_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
591void cayman_dma_ring_ib_execute(struct radeon_device *rdev, 586void cayman_dma_ring_ib_execute(struct radeon_device *rdev,
592 struct radeon_ib *ib); 587 struct radeon_ib *ib);
593bool cayman_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring); 588bool cayman_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
594bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring); 589bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
590void cayman_dma_vm_set_page(struct radeon_device *rdev,
591 struct radeon_ib *ib,
592 uint64_t pe,
593 uint64_t addr, unsigned count,
594 uint32_t incr, uint32_t flags);
595
595void cayman_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm); 596void cayman_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
596 597
597int ni_dpm_init(struct radeon_device *rdev); 598int ni_dpm_init(struct radeon_device *rdev);
@@ -653,17 +654,17 @@ int si_irq_set(struct radeon_device *rdev);
653int si_irq_process(struct radeon_device *rdev); 654int si_irq_process(struct radeon_device *rdev);
654int si_vm_init(struct radeon_device *rdev); 655int si_vm_init(struct radeon_device *rdev);
655void si_vm_fini(struct radeon_device *rdev); 656void si_vm_fini(struct radeon_device *rdev);
656void si_vm_set_page(struct radeon_device *rdev,
657 struct radeon_ib *ib,
658 uint64_t pe,
659 uint64_t addr, unsigned count,
660 uint32_t incr, uint32_t flags);
661void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm); 657void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
662int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib); 658int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
663int si_copy_dma(struct radeon_device *rdev, 659int si_copy_dma(struct radeon_device *rdev,
664 uint64_t src_offset, uint64_t dst_offset, 660 uint64_t src_offset, uint64_t dst_offset,
665 unsigned num_gpu_pages, 661 unsigned num_gpu_pages,
666 struct radeon_fence **fence); 662 struct radeon_fence **fence);
663void si_dma_vm_set_page(struct radeon_device *rdev,
664 struct radeon_ib *ib,
665 uint64_t pe,
666 uint64_t addr, unsigned count,
667 uint32_t incr, uint32_t flags);
667void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm); 668void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
668u32 si_get_xclk(struct radeon_device *rdev); 669u32 si_get_xclk(struct radeon_device *rdev);
669uint64_t si_get_gpu_clock_counter(struct radeon_device *rdev); 670uint64_t si_get_gpu_clock_counter(struct radeon_device *rdev);
@@ -705,6 +706,10 @@ int cik_copy_dma(struct radeon_device *rdev,
705 uint64_t src_offset, uint64_t dst_offset, 706 uint64_t src_offset, uint64_t dst_offset,
706 unsigned num_gpu_pages, 707 unsigned num_gpu_pages,
707 struct radeon_fence **fence); 708 struct radeon_fence **fence);
709int cik_copy_cpdma(struct radeon_device *rdev,
710 uint64_t src_offset, uint64_t dst_offset,
711 unsigned num_gpu_pages,
712 struct radeon_fence **fence);
708int cik_sdma_ring_test(struct radeon_device *rdev, struct radeon_ring *ring); 713int cik_sdma_ring_test(struct radeon_device *rdev, struct radeon_ring *ring);
709int cik_sdma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring); 714int cik_sdma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
710bool cik_sdma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring); 715bool cik_sdma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
@@ -731,11 +736,11 @@ int cik_irq_process(struct radeon_device *rdev);
731int cik_vm_init(struct radeon_device *rdev); 736int cik_vm_init(struct radeon_device *rdev);
732void cik_vm_fini(struct radeon_device *rdev); 737void cik_vm_fini(struct radeon_device *rdev);
733void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm); 738void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
734void cik_vm_set_page(struct radeon_device *rdev, 739void cik_sdma_vm_set_page(struct radeon_device *rdev,
735 struct radeon_ib *ib, 740 struct radeon_ib *ib,
736 uint64_t pe, 741 uint64_t pe,
737 uint64_t addr, unsigned count, 742 uint64_t addr, unsigned count,
738 uint32_t incr, uint32_t flags); 743 uint32_t incr, uint32_t flags);
739void cik_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm); 744void cik_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
740int cik_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib); 745int cik_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
741u32 cik_compute_ring_get_rptr(struct radeon_device *rdev, 746u32 cik_compute_ring_get_rptr(struct radeon_device *rdev,
diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
index d96070bf8388..6153ec18943a 100644
--- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
@@ -59,6 +59,10 @@ struct atpx_mux {
59 u16 mux; 59 u16 mux;
60} __packed; 60} __packed;
61 61
62bool radeon_is_px(void) {
63 return radeon_atpx_priv.atpx_detected;
64}
65
62/** 66/**
63 * radeon_atpx_call - call an ATPX method 67 * radeon_atpx_call - call an ATPX method
64 * 68 *
diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c
index 061b227dae0c..c155d6f3fa68 100644
--- a/drivers/gpu/drm/radeon/radeon_bios.c
+++ b/drivers/gpu/drm/radeon/radeon_bios.c
@@ -499,7 +499,7 @@ static bool legacy_read_disabled_bios(struct radeon_device *rdev)
499 crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL); 499 crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL);
500 fp2_gen_cntl = 0; 500 fp2_gen_cntl = 0;
501 501
502 if (rdev->ddev->pci_device == PCI_DEVICE_ID_ATI_RADEON_QY) { 502 if (rdev->ddev->pdev->device == PCI_DEVICE_ID_ATI_RADEON_QY) {
503 fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL); 503 fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL);
504 } 504 }
505 505
@@ -536,7 +536,7 @@ static bool legacy_read_disabled_bios(struct radeon_device *rdev)
536 (RADEON_CRTC_SYNC_TRISTAT | 536 (RADEON_CRTC_SYNC_TRISTAT |
537 RADEON_CRTC_DISPLAY_DIS))); 537 RADEON_CRTC_DISPLAY_DIS)));
538 538
539 if (rdev->ddev->pci_device == PCI_DEVICE_ID_ATI_RADEON_QY) { 539 if (rdev->ddev->pdev->device == PCI_DEVICE_ID_ATI_RADEON_QY) {
540 WREG32(RADEON_FP2_GEN_CNTL, (fp2_gen_cntl & ~RADEON_FP2_ON)); 540 WREG32(RADEON_FP2_GEN_CNTL, (fp2_gen_cntl & ~RADEON_FP2_ON));
541 } 541 }
542 542
@@ -554,7 +554,7 @@ static bool legacy_read_disabled_bios(struct radeon_device *rdev)
554 WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl); 554 WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl);
555 } 555 }
556 WREG32(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl); 556 WREG32(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl);
557 if (rdev->ddev->pci_device == PCI_DEVICE_ID_ATI_RADEON_QY) { 557 if (rdev->ddev->pdev->device == PCI_DEVICE_ID_ATI_RADEON_QY) {
558 WREG32(RADEON_FP2_GEN_CNTL, fp2_gen_cntl); 558 WREG32(RADEON_FP2_GEN_CNTL, fp2_gen_cntl);
559 } 559 }
560 return r; 560 return r;
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 64565732cb98..20a768ac89a8 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -31,6 +31,8 @@
31#include "radeon.h" 31#include "radeon.h"
32#include "atom.h" 32#include "atom.h"
33 33
34#include <linux/pm_runtime.h>
35
34extern void 36extern void
35radeon_combios_connected_scratch_regs(struct drm_connector *connector, 37radeon_combios_connected_scratch_regs(struct drm_connector *connector,
36 struct drm_encoder *encoder, 38 struct drm_encoder *encoder,
@@ -411,6 +413,21 @@ static int radeon_connector_set_property(struct drm_connector *connector, struct
411 } 413 }
412 } 414 }
413 415
416 if (property == rdev->mode_info.dither_property) {
417 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
418 /* need to find digital encoder on connector */
419 encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS);
420 if (!encoder)
421 return 0;
422
423 radeon_encoder = to_radeon_encoder(encoder);
424
425 if (radeon_connector->dither != val) {
426 radeon_connector->dither = val;
427 radeon_property_change_mode(&radeon_encoder->base);
428 }
429 }
430
414 if (property == rdev->mode_info.underscan_property) { 431 if (property == rdev->mode_info.underscan_property) {
415 /* need to find digital encoder on connector */ 432 /* need to find digital encoder on connector */
416 encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS); 433 encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS);
@@ -626,6 +643,11 @@ radeon_lvds_detect(struct drm_connector *connector, bool force)
626 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 643 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
627 struct drm_encoder *encoder = radeon_best_single_encoder(connector); 644 struct drm_encoder *encoder = radeon_best_single_encoder(connector);
628 enum drm_connector_status ret = connector_status_disconnected; 645 enum drm_connector_status ret = connector_status_disconnected;
646 int r;
647
648 r = pm_runtime_get_sync(connector->dev->dev);
649 if (r < 0)
650 return connector_status_disconnected;
629 651
630 if (encoder) { 652 if (encoder) {
631 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 653 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
@@ -651,6 +673,8 @@ radeon_lvds_detect(struct drm_connector *connector, bool force)
651 /* check acpi lid status ??? */ 673 /* check acpi lid status ??? */
652 674
653 radeon_connector_update_scratch_regs(connector, ret); 675 radeon_connector_update_scratch_regs(connector, ret);
676 pm_runtime_mark_last_busy(connector->dev->dev);
677 pm_runtime_put_autosuspend(connector->dev->dev);
654 return ret; 678 return ret;
655} 679}
656 680
@@ -750,6 +774,11 @@ radeon_vga_detect(struct drm_connector *connector, bool force)
750 struct drm_encoder_helper_funcs *encoder_funcs; 774 struct drm_encoder_helper_funcs *encoder_funcs;
751 bool dret = false; 775 bool dret = false;
752 enum drm_connector_status ret = connector_status_disconnected; 776 enum drm_connector_status ret = connector_status_disconnected;
777 int r;
778
779 r = pm_runtime_get_sync(connector->dev->dev);
780 if (r < 0)
781 return connector_status_disconnected;
753 782
754 encoder = radeon_best_single_encoder(connector); 783 encoder = radeon_best_single_encoder(connector);
755 if (!encoder) 784 if (!encoder)
@@ -790,9 +819,8 @@ radeon_vga_detect(struct drm_connector *connector, bool force)
790 * detected a monitor via load. 819 * detected a monitor via load.
791 */ 820 */
792 if (radeon_connector->detected_by_load) 821 if (radeon_connector->detected_by_load)
793 return connector->status; 822 ret = connector->status;
794 else 823 goto out;
795 return ret;
796 } 824 }
797 825
798 if (radeon_connector->dac_load_detect && encoder) { 826 if (radeon_connector->dac_load_detect && encoder) {
@@ -817,6 +845,11 @@ radeon_vga_detect(struct drm_connector *connector, bool force)
817 } 845 }
818 846
819 radeon_connector_update_scratch_regs(connector, ret); 847 radeon_connector_update_scratch_regs(connector, ret);
848
849out:
850 pm_runtime_mark_last_busy(connector->dev->dev);
851 pm_runtime_put_autosuspend(connector->dev->dev);
852
820 return ret; 853 return ret;
821} 854}
822 855
@@ -873,10 +906,15 @@ radeon_tv_detect(struct drm_connector *connector, bool force)
873 struct drm_encoder_helper_funcs *encoder_funcs; 906 struct drm_encoder_helper_funcs *encoder_funcs;
874 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 907 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
875 enum drm_connector_status ret = connector_status_disconnected; 908 enum drm_connector_status ret = connector_status_disconnected;
909 int r;
876 910
877 if (!radeon_connector->dac_load_detect) 911 if (!radeon_connector->dac_load_detect)
878 return ret; 912 return ret;
879 913
914 r = pm_runtime_get_sync(connector->dev->dev);
915 if (r < 0)
916 return connector_status_disconnected;
917
880 encoder = radeon_best_single_encoder(connector); 918 encoder = radeon_best_single_encoder(connector);
881 if (!encoder) 919 if (!encoder)
882 ret = connector_status_disconnected; 920 ret = connector_status_disconnected;
@@ -887,6 +925,8 @@ radeon_tv_detect(struct drm_connector *connector, bool force)
887 if (ret == connector_status_connected) 925 if (ret == connector_status_connected)
888 ret = radeon_connector_analog_encoder_conflict_solve(connector, encoder, ret, false); 926 ret = radeon_connector_analog_encoder_conflict_solve(connector, encoder, ret, false);
889 radeon_connector_update_scratch_regs(connector, ret); 927 radeon_connector_update_scratch_regs(connector, ret);
928 pm_runtime_mark_last_busy(connector->dev->dev);
929 pm_runtime_put_autosuspend(connector->dev->dev);
890 return ret; 930 return ret;
891} 931}
892 932
@@ -954,12 +994,18 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
954 struct drm_encoder *encoder = NULL; 994 struct drm_encoder *encoder = NULL;
955 struct drm_encoder_helper_funcs *encoder_funcs; 995 struct drm_encoder_helper_funcs *encoder_funcs;
956 struct drm_mode_object *obj; 996 struct drm_mode_object *obj;
957 int i; 997 int i, r;
958 enum drm_connector_status ret = connector_status_disconnected; 998 enum drm_connector_status ret = connector_status_disconnected;
959 bool dret = false, broken_edid = false; 999 bool dret = false, broken_edid = false;
960 1000
961 if (!force && radeon_check_hpd_status_unchanged(connector)) 1001 r = pm_runtime_get_sync(connector->dev->dev);
962 return connector->status; 1002 if (r < 0)
1003 return connector_status_disconnected;
1004
1005 if (!force && radeon_check_hpd_status_unchanged(connector)) {
1006 ret = connector->status;
1007 goto exit;
1008 }
963 1009
964 if (radeon_connector->ddc_bus) 1010 if (radeon_connector->ddc_bus)
965 dret = radeon_ddc_probe(radeon_connector, false); 1011 dret = radeon_ddc_probe(radeon_connector, false);
@@ -1110,6 +1156,11 @@ out:
1110 1156
1111 /* updated in get modes as well since we need to know if it's analog or digital */ 1157 /* updated in get modes as well since we need to know if it's analog or digital */
1112 radeon_connector_update_scratch_regs(connector, ret); 1158 radeon_connector_update_scratch_regs(connector, ret);
1159
1160exit:
1161 pm_runtime_mark_last_busy(connector->dev->dev);
1162 pm_runtime_put_autosuspend(connector->dev->dev);
1163
1113 return ret; 1164 return ret;
1114} 1165}
1115 1166
@@ -1377,9 +1428,16 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
1377 enum drm_connector_status ret = connector_status_disconnected; 1428 enum drm_connector_status ret = connector_status_disconnected;
1378 struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv; 1429 struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv;
1379 struct drm_encoder *encoder = radeon_best_single_encoder(connector); 1430 struct drm_encoder *encoder = radeon_best_single_encoder(connector);
1431 int r;
1380 1432
1381 if (!force && radeon_check_hpd_status_unchanged(connector)) 1433 r = pm_runtime_get_sync(connector->dev->dev);
1382 return connector->status; 1434 if (r < 0)
1435 return connector_status_disconnected;
1436
1437 if (!force && radeon_check_hpd_status_unchanged(connector)) {
1438 ret = connector->status;
1439 goto out;
1440 }
1383 1441
1384 if (radeon_connector->edid) { 1442 if (radeon_connector->edid) {
1385 kfree(radeon_connector->edid); 1443 kfree(radeon_connector->edid);
@@ -1443,6 +1501,10 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
1443 } 1501 }
1444 1502
1445 radeon_connector_update_scratch_regs(connector, ret); 1503 radeon_connector_update_scratch_regs(connector, ret);
1504out:
1505 pm_runtime_mark_last_busy(connector->dev->dev);
1506 pm_runtime_put_autosuspend(connector->dev->dev);
1507
1446 return ret; 1508 return ret;
1447} 1509}
1448 1510
@@ -1658,12 +1720,16 @@ radeon_add_atom_connector(struct drm_device *dev,
1658 drm_object_attach_property(&radeon_connector->base.base, 1720 drm_object_attach_property(&radeon_connector->base.base,
1659 rdev->mode_info.underscan_vborder_property, 1721 rdev->mode_info.underscan_vborder_property,
1660 0); 1722 0);
1723
1724 drm_object_attach_property(&radeon_connector->base.base,
1725 rdev->mode_info.dither_property,
1726 RADEON_FMT_DITHER_DISABLE);
1727
1661 if (radeon_audio != 0) 1728 if (radeon_audio != 0)
1662 drm_object_attach_property(&radeon_connector->base.base, 1729 drm_object_attach_property(&radeon_connector->base.base,
1663 rdev->mode_info.audio_property, 1730 rdev->mode_info.audio_property,
1664 (radeon_audio == 1) ? 1731 RADEON_AUDIO_AUTO);
1665 RADEON_AUDIO_AUTO : 1732
1666 RADEON_AUDIO_DISABLE);
1667 subpixel_order = SubPixelHorizontalRGB; 1733 subpixel_order = SubPixelHorizontalRGB;
1668 connector->interlace_allowed = true; 1734 connector->interlace_allowed = true;
1669 if (connector_type == DRM_MODE_CONNECTOR_HDMIB) 1735 if (connector_type == DRM_MODE_CONNECTOR_HDMIB)
@@ -1760,9 +1826,12 @@ radeon_add_atom_connector(struct drm_device *dev,
1760 if (ASIC_IS_DCE2(rdev) && (radeon_audio != 0)) { 1826 if (ASIC_IS_DCE2(rdev) && (radeon_audio != 0)) {
1761 drm_object_attach_property(&radeon_connector->base.base, 1827 drm_object_attach_property(&radeon_connector->base.base,
1762 rdev->mode_info.audio_property, 1828 rdev->mode_info.audio_property,
1763 (radeon_audio == 1) ? 1829 RADEON_AUDIO_AUTO);
1764 RADEON_AUDIO_AUTO : 1830 }
1765 RADEON_AUDIO_DISABLE); 1831 if (ASIC_IS_AVIVO(rdev)) {
1832 drm_object_attach_property(&radeon_connector->base.base,
1833 rdev->mode_info.dither_property,
1834 RADEON_FMT_DITHER_DISABLE);
1766 } 1835 }
1767 if (connector_type == DRM_MODE_CONNECTOR_DVII) { 1836 if (connector_type == DRM_MODE_CONNECTOR_DVII) {
1768 radeon_connector->dac_load_detect = true; 1837 radeon_connector->dac_load_detect = true;
@@ -1807,9 +1876,12 @@ radeon_add_atom_connector(struct drm_device *dev,
1807 if (ASIC_IS_DCE2(rdev) && (radeon_audio != 0)) { 1876 if (ASIC_IS_DCE2(rdev) && (radeon_audio != 0)) {
1808 drm_object_attach_property(&radeon_connector->base.base, 1877 drm_object_attach_property(&radeon_connector->base.base,
1809 rdev->mode_info.audio_property, 1878 rdev->mode_info.audio_property,
1810 (radeon_audio == 1) ? 1879 RADEON_AUDIO_AUTO);
1811 RADEON_AUDIO_AUTO : 1880 }
1812 RADEON_AUDIO_DISABLE); 1881 if (ASIC_IS_AVIVO(rdev)) {
1882 drm_object_attach_property(&radeon_connector->base.base,
1883 rdev->mode_info.dither_property,
1884 RADEON_FMT_DITHER_DISABLE);
1813 } 1885 }
1814 subpixel_order = SubPixelHorizontalRGB; 1886 subpixel_order = SubPixelHorizontalRGB;
1815 connector->interlace_allowed = true; 1887 connector->interlace_allowed = true;
@@ -1853,9 +1925,13 @@ radeon_add_atom_connector(struct drm_device *dev,
1853 if (ASIC_IS_DCE2(rdev) && (radeon_audio != 0)) { 1925 if (ASIC_IS_DCE2(rdev) && (radeon_audio != 0)) {
1854 drm_object_attach_property(&radeon_connector->base.base, 1926 drm_object_attach_property(&radeon_connector->base.base,
1855 rdev->mode_info.audio_property, 1927 rdev->mode_info.audio_property,
1856 (radeon_audio == 1) ? 1928 RADEON_AUDIO_AUTO);
1857 RADEON_AUDIO_AUTO : 1929 }
1858 RADEON_AUDIO_DISABLE); 1930 if (ASIC_IS_AVIVO(rdev)) {
1931 drm_object_attach_property(&radeon_connector->base.base,
1932 rdev->mode_info.dither_property,
1933 RADEON_FMT_DITHER_DISABLE);
1934
1859 } 1935 }
1860 connector->interlace_allowed = true; 1936 connector->interlace_allowed = true;
1861 /* in theory with a DP to VGA converter... */ 1937 /* in theory with a DP to VGA converter... */
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 80285e35bc65..26ca223d12d6 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -212,9 +212,7 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
212 return -EFAULT; 212 return -EFAULT;
213 } 213 }
214 p->chunks[i].length_dw = user_chunk.length_dw; 214 p->chunks[i].length_dw = user_chunk.length_dw;
215 p->chunks[i].kdata = NULL;
216 p->chunks[i].chunk_id = user_chunk.chunk_id; 215 p->chunks[i].chunk_id = user_chunk.chunk_id;
217 p->chunks[i].user_ptr = (void __user *)(unsigned long)user_chunk.chunk_data;
218 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_RELOCS) { 216 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_RELOCS) {
219 p->chunk_relocs_idx = i; 217 p->chunk_relocs_idx = i;
220 } 218 }
@@ -237,25 +235,31 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
237 return -EINVAL; 235 return -EINVAL;
238 } 236 }
239 237
240 cdata = (uint32_t *)(unsigned long)user_chunk.chunk_data; 238 size = p->chunks[i].length_dw;
241 if ((p->chunks[i].chunk_id == RADEON_CHUNK_ID_RELOCS) || 239 cdata = (void __user *)(unsigned long)user_chunk.chunk_data;
242 (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS)) { 240 p->chunks[i].user_ptr = cdata;
243 size = p->chunks[i].length_dw * sizeof(uint32_t); 241 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_CONST_IB)
244 p->chunks[i].kdata = kmalloc(size, GFP_KERNEL); 242 continue;
245 if (p->chunks[i].kdata == NULL) { 243
246 return -ENOMEM; 244 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_IB) {
247 } 245 if (!p->rdev || !(p->rdev->flags & RADEON_IS_AGP))
248 if (DRM_COPY_FROM_USER(p->chunks[i].kdata, 246 continue;
249 p->chunks[i].user_ptr, size)) { 247 }
250 return -EFAULT; 248
251 } 249 p->chunks[i].kdata = drm_malloc_ab(size, sizeof(uint32_t));
252 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) { 250 size *= sizeof(uint32_t);
253 p->cs_flags = p->chunks[i].kdata[0]; 251 if (p->chunks[i].kdata == NULL) {
254 if (p->chunks[i].length_dw > 1) 252 return -ENOMEM;
255 ring = p->chunks[i].kdata[1]; 253 }
256 if (p->chunks[i].length_dw > 2) 254 if (DRM_COPY_FROM_USER(p->chunks[i].kdata, cdata, size)) {
257 priority = (s32)p->chunks[i].kdata[2]; 255 return -EFAULT;
258 } 256 }
257 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) {
258 p->cs_flags = p->chunks[i].kdata[0];
259 if (p->chunks[i].length_dw > 1)
260 ring = p->chunks[i].kdata[1];
261 if (p->chunks[i].length_dw > 2)
262 priority = (s32)p->chunks[i].kdata[2];
259 } 263 }
260 } 264 }
261 265
@@ -278,34 +282,6 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
278 } 282 }
279 } 283 }
280 284
281 /* deal with non-vm */
282 if ((p->chunk_ib_idx != -1) &&
283 ((p->cs_flags & RADEON_CS_USE_VM) == 0) &&
284 (p->chunks[p->chunk_ib_idx].chunk_id == RADEON_CHUNK_ID_IB)) {
285 if (p->chunks[p->chunk_ib_idx].length_dw > (16 * 1024)) {
286 DRM_ERROR("cs IB too big: %d\n",
287 p->chunks[p->chunk_ib_idx].length_dw);
288 return -EINVAL;
289 }
290 if (p->rdev && (p->rdev->flags & RADEON_IS_AGP)) {
291 p->chunks[p->chunk_ib_idx].kpage[0] = kmalloc(PAGE_SIZE, GFP_KERNEL);
292 p->chunks[p->chunk_ib_idx].kpage[1] = kmalloc(PAGE_SIZE, GFP_KERNEL);
293 if (p->chunks[p->chunk_ib_idx].kpage[0] == NULL ||
294 p->chunks[p->chunk_ib_idx].kpage[1] == NULL) {
295 kfree(p->chunks[p->chunk_ib_idx].kpage[0]);
296 kfree(p->chunks[p->chunk_ib_idx].kpage[1]);
297 p->chunks[p->chunk_ib_idx].kpage[0] = NULL;
298 p->chunks[p->chunk_ib_idx].kpage[1] = NULL;
299 return -ENOMEM;
300 }
301 }
302 p->chunks[p->chunk_ib_idx].kpage_idx[0] = -1;
303 p->chunks[p->chunk_ib_idx].kpage_idx[1] = -1;
304 p->chunks[p->chunk_ib_idx].last_copied_page = -1;
305 p->chunks[p->chunk_ib_idx].last_page_index =
306 ((p->chunks[p->chunk_ib_idx].length_dw * 4) - 1) / PAGE_SIZE;
307 }
308
309 return 0; 285 return 0;
310} 286}
311 287
@@ -339,13 +315,8 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error, bo
339 kfree(parser->track); 315 kfree(parser->track);
340 kfree(parser->relocs); 316 kfree(parser->relocs);
341 kfree(parser->relocs_ptr); 317 kfree(parser->relocs_ptr);
342 for (i = 0; i < parser->nchunks; i++) { 318 for (i = 0; i < parser->nchunks; i++)
343 kfree(parser->chunks[i].kdata); 319 drm_free_large(parser->chunks[i].kdata);
344 if ((parser->rdev->flags & RADEON_IS_AGP)) {
345 kfree(parser->chunks[i].kpage[0]);
346 kfree(parser->chunks[i].kpage[1]);
347 }
348 }
349 kfree(parser->chunks); 320 kfree(parser->chunks);
350 kfree(parser->chunks_array); 321 kfree(parser->chunks_array);
351 radeon_ib_free(parser->rdev, &parser->ib); 322 radeon_ib_free(parser->rdev, &parser->ib);
@@ -355,7 +326,6 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error, bo
355static int radeon_cs_ib_chunk(struct radeon_device *rdev, 326static int radeon_cs_ib_chunk(struct radeon_device *rdev,
356 struct radeon_cs_parser *parser) 327 struct radeon_cs_parser *parser)
357{ 328{
358 struct radeon_cs_chunk *ib_chunk;
359 int r; 329 int r;
360 330
361 if (parser->chunk_ib_idx == -1) 331 if (parser->chunk_ib_idx == -1)
@@ -364,28 +334,11 @@ static int radeon_cs_ib_chunk(struct radeon_device *rdev,
364 if (parser->cs_flags & RADEON_CS_USE_VM) 334 if (parser->cs_flags & RADEON_CS_USE_VM)
365 return 0; 335 return 0;
366 336
367 ib_chunk = &parser->chunks[parser->chunk_ib_idx];
368 /* Copy the packet into the IB, the parser will read from the
369 * input memory (cached) and write to the IB (which can be
370 * uncached).
371 */
372 r = radeon_ib_get(rdev, parser->ring, &parser->ib,
373 NULL, ib_chunk->length_dw * 4);
374 if (r) {
375 DRM_ERROR("Failed to get ib !\n");
376 return r;
377 }
378 parser->ib.length_dw = ib_chunk->length_dw;
379 r = radeon_cs_parse(rdev, parser->ring, parser); 337 r = radeon_cs_parse(rdev, parser->ring, parser);
380 if (r || parser->parser_error) { 338 if (r || parser->parser_error) {
381 DRM_ERROR("Invalid command stream !\n"); 339 DRM_ERROR("Invalid command stream !\n");
382 return r; 340 return r;
383 } 341 }
384 r = radeon_cs_finish_pages(parser);
385 if (r) {
386 DRM_ERROR("Invalid command stream !\n");
387 return r;
388 }
389 342
390 if (parser->ring == R600_RING_TYPE_UVD_INDEX) 343 if (parser->ring == R600_RING_TYPE_UVD_INDEX)
391 radeon_uvd_note_usage(rdev); 344 radeon_uvd_note_usage(rdev);
@@ -423,7 +376,6 @@ static int radeon_bo_vm_update_pte(struct radeon_cs_parser *parser,
423static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev, 376static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
424 struct radeon_cs_parser *parser) 377 struct radeon_cs_parser *parser)
425{ 378{
426 struct radeon_cs_chunk *ib_chunk;
427 struct radeon_fpriv *fpriv = parser->filp->driver_priv; 379 struct radeon_fpriv *fpriv = parser->filp->driver_priv;
428 struct radeon_vm *vm = &fpriv->vm; 380 struct radeon_vm *vm = &fpriv->vm;
429 int r; 381 int r;
@@ -433,49 +385,13 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
433 if ((parser->cs_flags & RADEON_CS_USE_VM) == 0) 385 if ((parser->cs_flags & RADEON_CS_USE_VM) == 0)
434 return 0; 386 return 0;
435 387
436 if ((rdev->family >= CHIP_TAHITI) && 388 if (parser->const_ib.length_dw) {
437 (parser->chunk_const_ib_idx != -1)) {
438 ib_chunk = &parser->chunks[parser->chunk_const_ib_idx];
439 if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
440 DRM_ERROR("cs IB CONST too big: %d\n", ib_chunk->length_dw);
441 return -EINVAL;
442 }
443 r = radeon_ib_get(rdev, parser->ring, &parser->const_ib,
444 vm, ib_chunk->length_dw * 4);
445 if (r) {
446 DRM_ERROR("Failed to get const ib !\n");
447 return r;
448 }
449 parser->const_ib.is_const_ib = true;
450 parser->const_ib.length_dw = ib_chunk->length_dw;
451 /* Copy the packet into the IB */
452 if (DRM_COPY_FROM_USER(parser->const_ib.ptr, ib_chunk->user_ptr,
453 ib_chunk->length_dw * 4)) {
454 return -EFAULT;
455 }
456 r = radeon_ring_ib_parse(rdev, parser->ring, &parser->const_ib); 389 r = radeon_ring_ib_parse(rdev, parser->ring, &parser->const_ib);
457 if (r) { 390 if (r) {
458 return r; 391 return r;
459 } 392 }
460 } 393 }
461 394
462 ib_chunk = &parser->chunks[parser->chunk_ib_idx];
463 if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
464 DRM_ERROR("cs IB too big: %d\n", ib_chunk->length_dw);
465 return -EINVAL;
466 }
467 r = radeon_ib_get(rdev, parser->ring, &parser->ib,
468 vm, ib_chunk->length_dw * 4);
469 if (r) {
470 DRM_ERROR("Failed to get ib !\n");
471 return r;
472 }
473 parser->ib.length_dw = ib_chunk->length_dw;
474 /* Copy the packet into the IB */
475 if (DRM_COPY_FROM_USER(parser->ib.ptr, ib_chunk->user_ptr,
476 ib_chunk->length_dw * 4)) {
477 return -EFAULT;
478 }
479 r = radeon_ring_ib_parse(rdev, parser->ring, &parser->ib); 395 r = radeon_ring_ib_parse(rdev, parser->ring, &parser->ib);
480 if (r) { 396 if (r) {
481 return r; 397 return r;
@@ -527,6 +443,62 @@ static int radeon_cs_handle_lockup(struct radeon_device *rdev, int r)
527 return r; 443 return r;
528} 444}
529 445
446static int radeon_cs_ib_fill(struct radeon_device *rdev, struct radeon_cs_parser *parser)
447{
448 struct radeon_cs_chunk *ib_chunk;
449 struct radeon_vm *vm = NULL;
450 int r;
451
452 if (parser->chunk_ib_idx == -1)
453 return 0;
454
455 if (parser->cs_flags & RADEON_CS_USE_VM) {
456 struct radeon_fpriv *fpriv = parser->filp->driver_priv;
457 vm = &fpriv->vm;
458
459 if ((rdev->family >= CHIP_TAHITI) &&
460 (parser->chunk_const_ib_idx != -1)) {
461 ib_chunk = &parser->chunks[parser->chunk_const_ib_idx];
462 if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
463 DRM_ERROR("cs IB CONST too big: %d\n", ib_chunk->length_dw);
464 return -EINVAL;
465 }
466 r = radeon_ib_get(rdev, parser->ring, &parser->const_ib,
467 vm, ib_chunk->length_dw * 4);
468 if (r) {
469 DRM_ERROR("Failed to get const ib !\n");
470 return r;
471 }
472 parser->const_ib.is_const_ib = true;
473 parser->const_ib.length_dw = ib_chunk->length_dw;
474 if (DRM_COPY_FROM_USER(parser->const_ib.ptr,
475 ib_chunk->user_ptr,
476 ib_chunk->length_dw * 4))
477 return -EFAULT;
478 }
479
480 ib_chunk = &parser->chunks[parser->chunk_ib_idx];
481 if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
482 DRM_ERROR("cs IB too big: %d\n", ib_chunk->length_dw);
483 return -EINVAL;
484 }
485 }
486 ib_chunk = &parser->chunks[parser->chunk_ib_idx];
487
488 r = radeon_ib_get(rdev, parser->ring, &parser->ib,
489 vm, ib_chunk->length_dw * 4);
490 if (r) {
491 DRM_ERROR("Failed to get ib !\n");
492 return r;
493 }
494 parser->ib.length_dw = ib_chunk->length_dw;
495 if (ib_chunk->kdata)
496 memcpy(parser->ib.ptr, ib_chunk->kdata, ib_chunk->length_dw * 4);
497 else if (DRM_COPY_FROM_USER(parser->ib.ptr, ib_chunk->user_ptr, ib_chunk->length_dw * 4))
498 return -EFAULT;
499 return 0;
500}
501
530int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) 502int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
531{ 503{
532 struct radeon_device *rdev = dev->dev_private; 504 struct radeon_device *rdev = dev->dev_private;
@@ -552,10 +524,15 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
552 r = radeon_cs_handle_lockup(rdev, r); 524 r = radeon_cs_handle_lockup(rdev, r);
553 return r; 525 return r;
554 } 526 }
555 r = radeon_cs_parser_relocs(&parser); 527
556 if (r) { 528 r = radeon_cs_ib_fill(rdev, &parser);
557 if (r != -ERESTARTSYS) 529 if (!r) {
530 r = radeon_cs_parser_relocs(&parser);
531 if (r && r != -ERESTARTSYS)
558 DRM_ERROR("Failed to parse relocation %d!\n", r); 532 DRM_ERROR("Failed to parse relocation %d!\n", r);
533 }
534
535 if (r) {
559 radeon_cs_parser_fini(&parser, r, false); 536 radeon_cs_parser_fini(&parser, r, false);
560 up_read(&rdev->exclusive_lock); 537 up_read(&rdev->exclusive_lock);
561 r = radeon_cs_handle_lockup(rdev, r); 538 r = radeon_cs_handle_lockup(rdev, r);
@@ -579,97 +556,6 @@ out:
579 return r; 556 return r;
580} 557}
581 558
582int radeon_cs_finish_pages(struct radeon_cs_parser *p)
583{
584 struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
585 int i;
586 int size = PAGE_SIZE;
587
588 for (i = ibc->last_copied_page + 1; i <= ibc->last_page_index; i++) {
589 if (i == ibc->last_page_index) {
590 size = (ibc->length_dw * 4) % PAGE_SIZE;
591 if (size == 0)
592 size = PAGE_SIZE;
593 }
594
595 if (DRM_COPY_FROM_USER(p->ib.ptr + (i * (PAGE_SIZE/4)),
596 ibc->user_ptr + (i * PAGE_SIZE),
597 size))
598 return -EFAULT;
599 }
600 return 0;
601}
602
603static int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx)
604{
605 int new_page;
606 struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
607 int i;
608 int size = PAGE_SIZE;
609 bool copy1 = (p->rdev && (p->rdev->flags & RADEON_IS_AGP)) ?
610 false : true;
611
612 for (i = ibc->last_copied_page + 1; i < pg_idx; i++) {
613 if (DRM_COPY_FROM_USER(p->ib.ptr + (i * (PAGE_SIZE/4)),
614 ibc->user_ptr + (i * PAGE_SIZE),
615 PAGE_SIZE)) {
616 p->parser_error = -EFAULT;
617 return 0;
618 }
619 }
620
621 if (pg_idx == ibc->last_page_index) {
622 size = (ibc->length_dw * 4) % PAGE_SIZE;
623 if (size == 0)
624 size = PAGE_SIZE;
625 }
626
627 new_page = ibc->kpage_idx[0] < ibc->kpage_idx[1] ? 0 : 1;
628 if (copy1)
629 ibc->kpage[new_page] = p->ib.ptr + (pg_idx * (PAGE_SIZE / 4));
630
631 if (DRM_COPY_FROM_USER(ibc->kpage[new_page],
632 ibc->user_ptr + (pg_idx * PAGE_SIZE),
633 size)) {
634 p->parser_error = -EFAULT;
635 return 0;
636 }
637
638 /* copy to IB for non single case */
639 if (!copy1)
640 memcpy((void *)(p->ib.ptr+(pg_idx*(PAGE_SIZE/4))), ibc->kpage[new_page], size);
641
642 ibc->last_copied_page = pg_idx;
643 ibc->kpage_idx[new_page] = pg_idx;
644
645 return new_page;
646}
647
648u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
649{
650 struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
651 u32 pg_idx, pg_offset;
652 u32 idx_value = 0;
653 int new_page;
654
655 pg_idx = (idx * 4) / PAGE_SIZE;
656 pg_offset = (idx * 4) % PAGE_SIZE;
657
658 if (ibc->kpage_idx[0] == pg_idx)
659 return ibc->kpage[0][pg_offset/4];
660 if (ibc->kpage_idx[1] == pg_idx)
661 return ibc->kpage[1][pg_offset/4];
662
663 new_page = radeon_cs_update_pages(p, pg_idx);
664 if (new_page < 0) {
665 p->parser_error = new_page;
666 return 0;
667 }
668
669 idx_value = ibc->kpage[new_page][pg_offset/4];
670 return idx_value;
671}
672
673/** 559/**
674 * radeon_cs_packet_parse() - parse cp packet and point ib index to next packet 560 * radeon_cs_packet_parse() - parse cp packet and point ib index to next packet
675 * @parser: parser structure holding parsing context. 561 * @parser: parser structure holding parsing context.
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 841d0e09be3e..b9234c43f43d 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -98,9 +98,16 @@ static const char radeon_family_name[][16] = {
98 "BONAIRE", 98 "BONAIRE",
99 "KAVERI", 99 "KAVERI",
100 "KABINI", 100 "KABINI",
101 "HAWAII",
101 "LAST", 102 "LAST",
102}; 103};
103 104
105#if defined(CONFIG_VGA_SWITCHEROO)
106bool radeon_is_px(void);
107#else
108static inline bool radeon_is_px(void) { return false; }
109#endif
110
104/** 111/**
105 * radeon_program_register_sequence - program an array of registers. 112 * radeon_program_register_sequence - program an array of registers.
106 * 113 *
@@ -1076,7 +1083,10 @@ static bool radeon_switcheroo_quirk_long_wakeup(struct pci_dev *pdev)
1076static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state) 1083static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
1077{ 1084{
1078 struct drm_device *dev = pci_get_drvdata(pdev); 1085 struct drm_device *dev = pci_get_drvdata(pdev);
1079 pm_message_t pmm = { .event = PM_EVENT_SUSPEND }; 1086
1087 if (radeon_is_px() && state == VGA_SWITCHEROO_OFF)
1088 return;
1089
1080 if (state == VGA_SWITCHEROO_ON) { 1090 if (state == VGA_SWITCHEROO_ON) {
1081 unsigned d3_delay = dev->pdev->d3_delay; 1091 unsigned d3_delay = dev->pdev->d3_delay;
1082 1092
@@ -1087,7 +1097,7 @@ static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switchero
1087 if (d3_delay < 20 && radeon_switcheroo_quirk_long_wakeup(pdev)) 1097 if (d3_delay < 20 && radeon_switcheroo_quirk_long_wakeup(pdev))
1088 dev->pdev->d3_delay = 20; 1098 dev->pdev->d3_delay = 20;
1089 1099
1090 radeon_resume_kms(dev); 1100 radeon_resume_kms(dev, true, true);
1091 1101
1092 dev->pdev->d3_delay = d3_delay; 1102 dev->pdev->d3_delay = d3_delay;
1093 1103
@@ -1097,7 +1107,7 @@ static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switchero
1097 printk(KERN_INFO "radeon: switched off\n"); 1107 printk(KERN_INFO "radeon: switched off\n");
1098 drm_kms_helper_poll_disable(dev); 1108 drm_kms_helper_poll_disable(dev);
1099 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; 1109 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1100 radeon_suspend_kms(dev, pmm); 1110 radeon_suspend_kms(dev, true, true);
1101 dev->switch_power_state = DRM_SWITCH_POWER_OFF; 1111 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1102 } 1112 }
1103} 1113}
@@ -1147,6 +1157,7 @@ int radeon_device_init(struct radeon_device *rdev,
1147{ 1157{
1148 int r, i; 1158 int r, i;
1149 int dma_bits; 1159 int dma_bits;
1160 bool runtime = false;
1150 1161
1151 rdev->shutdown = false; 1162 rdev->shutdown = false;
1152 rdev->dev = &pdev->dev; 1163 rdev->dev = &pdev->dev;
@@ -1293,7 +1304,14 @@ int radeon_device_init(struct radeon_device *rdev,
1293 /* this will fail for cards that aren't VGA class devices, just 1304 /* this will fail for cards that aren't VGA class devices, just
1294 * ignore it */ 1305 * ignore it */
1295 vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode); 1306 vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
1296 vga_switcheroo_register_client(rdev->pdev, &radeon_switcheroo_ops, false); 1307
1308 if (radeon_runtime_pm == 1)
1309 runtime = true;
1310 if ((radeon_runtime_pm == -1) && radeon_is_px())
1311 runtime = true;
1312 vga_switcheroo_register_client(rdev->pdev, &radeon_switcheroo_ops, runtime);
1313 if (runtime)
1314 vga_switcheroo_init_domain_pm_ops(rdev->dev, &rdev->vga_pm_domain);
1297 1315
1298 r = radeon_init(rdev); 1316 r = radeon_init(rdev);
1299 if (r) 1317 if (r)
@@ -1383,7 +1401,7 @@ void radeon_device_fini(struct radeon_device *rdev)
1383 * Returns 0 for success or an error on failure. 1401 * Returns 0 for success or an error on failure.
1384 * Called at driver suspend. 1402 * Called at driver suspend.
1385 */ 1403 */
1386int radeon_suspend_kms(struct drm_device *dev, pm_message_t state) 1404int radeon_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon)
1387{ 1405{
1388 struct radeon_device *rdev; 1406 struct radeon_device *rdev;
1389 struct drm_crtc *crtc; 1407 struct drm_crtc *crtc;
@@ -1394,9 +1412,7 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
1394 if (dev == NULL || dev->dev_private == NULL) { 1412 if (dev == NULL || dev->dev_private == NULL) {
1395 return -ENODEV; 1413 return -ENODEV;
1396 } 1414 }
1397 if (state.event == PM_EVENT_PRETHAW) { 1415
1398 return 0;
1399 }
1400 rdev = dev->dev_private; 1416 rdev = dev->dev_private;
1401 1417
1402 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) 1418 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
@@ -1455,14 +1471,17 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
1455 radeon_agp_suspend(rdev); 1471 radeon_agp_suspend(rdev);
1456 1472
1457 pci_save_state(dev->pdev); 1473 pci_save_state(dev->pdev);
1458 if (state.event == PM_EVENT_SUSPEND) { 1474 if (suspend) {
1459 /* Shut down the device */ 1475 /* Shut down the device */
1460 pci_disable_device(dev->pdev); 1476 pci_disable_device(dev->pdev);
1461 pci_set_power_state(dev->pdev, PCI_D3hot); 1477 pci_set_power_state(dev->pdev, PCI_D3hot);
1462 } 1478 }
1463 console_lock(); 1479
1464 radeon_fbdev_set_suspend(rdev, 1); 1480 if (fbcon) {
1465 console_unlock(); 1481 console_lock();
1482 radeon_fbdev_set_suspend(rdev, 1);
1483 console_unlock();
1484 }
1466 return 0; 1485 return 0;
1467} 1486}
1468 1487
@@ -1475,7 +1494,7 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
1475 * Returns 0 for success or an error on failure. 1494 * Returns 0 for success or an error on failure.
1476 * Called at driver resume. 1495 * Called at driver resume.
1477 */ 1496 */
1478int radeon_resume_kms(struct drm_device *dev) 1497int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
1479{ 1498{
1480 struct drm_connector *connector; 1499 struct drm_connector *connector;
1481 struct radeon_device *rdev = dev->dev_private; 1500 struct radeon_device *rdev = dev->dev_private;
@@ -1484,12 +1503,17 @@ int radeon_resume_kms(struct drm_device *dev)
1484 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) 1503 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
1485 return 0; 1504 return 0;
1486 1505
1487 console_lock(); 1506 if (fbcon) {
1488 pci_set_power_state(dev->pdev, PCI_D0); 1507 console_lock();
1489 pci_restore_state(dev->pdev); 1508 }
1490 if (pci_enable_device(dev->pdev)) { 1509 if (resume) {
1491 console_unlock(); 1510 pci_set_power_state(dev->pdev, PCI_D0);
1492 return -1; 1511 pci_restore_state(dev->pdev);
1512 if (pci_enable_device(dev->pdev)) {
1513 if (fbcon)
1514 console_unlock();
1515 return -1;
1516 }
1493 } 1517 }
1494 /* resume AGP if in use */ 1518 /* resume AGP if in use */
1495 radeon_agp_resume(rdev); 1519 radeon_agp_resume(rdev);
@@ -1502,9 +1526,11 @@ int radeon_resume_kms(struct drm_device *dev)
1502 radeon_pm_resume(rdev); 1526 radeon_pm_resume(rdev);
1503 radeon_restore_bios_scratch_regs(rdev); 1527 radeon_restore_bios_scratch_regs(rdev);
1504 1528
1505 radeon_fbdev_set_suspend(rdev, 0); 1529 if (fbcon) {
1506 console_unlock(); 1530 radeon_fbdev_set_suspend(rdev, 0);
1507 1531 console_unlock();
1532 }
1533
1508 /* init dig PHYs, disp eng pll */ 1534 /* init dig PHYs, disp eng pll */
1509 if (rdev->is_atom_bios) { 1535 if (rdev->is_atom_bios) {
1510 radeon_atom_encoder_init(rdev); 1536 radeon_atom_encoder_init(rdev);
@@ -1549,6 +1575,14 @@ int radeon_gpu_reset(struct radeon_device *rdev)
1549 int resched; 1575 int resched;
1550 1576
1551 down_write(&rdev->exclusive_lock); 1577 down_write(&rdev->exclusive_lock);
1578
1579 if (!rdev->needs_reset) {
1580 up_write(&rdev->exclusive_lock);
1581 return 0;
1582 }
1583
1584 rdev->needs_reset = false;
1585
1552 radeon_save_bios_scratch_regs(rdev); 1586 radeon_save_bios_scratch_regs(rdev);
1553 /* block TTM */ 1587 /* block TTM */
1554 resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev); 1588 resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 0d1aa050d41d..7b253815a323 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -30,6 +30,7 @@
30#include "atom.h" 30#include "atom.h"
31#include <asm/div64.h> 31#include <asm/div64.h>
32 32
33#include <linux/pm_runtime.h>
33#include <drm/drm_crtc_helper.h> 34#include <drm/drm_crtc_helper.h>
34#include <drm/drm_edid.h> 35#include <drm/drm_edid.h>
35 36
@@ -306,7 +307,7 @@ void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id)
306 */ 307 */
307 if (update_pending && 308 if (update_pending &&
308 (DRM_SCANOUTPOS_VALID & radeon_get_crtc_scanoutpos(rdev->ddev, crtc_id, 309 (DRM_SCANOUTPOS_VALID & radeon_get_crtc_scanoutpos(rdev->ddev, crtc_id,
309 &vpos, &hpos)) && 310 &vpos, &hpos, NULL, NULL)) &&
310 ((vpos >= (99 * rdev->mode_info.crtcs[crtc_id]->base.hwmode.crtc_vdisplay)/100) || 311 ((vpos >= (99 * rdev->mode_info.crtcs[crtc_id]->base.hwmode.crtc_vdisplay)/100) ||
311 (vpos < 0 && !ASIC_IS_AVIVO(rdev)))) { 312 (vpos < 0 && !ASIC_IS_AVIVO(rdev)))) {
312 /* crtc didn't flip in this target vblank interval, 313 /* crtc didn't flip in this target vblank interval,
@@ -494,11 +495,55 @@ unlock_free:
494 return r; 495 return r;
495} 496}
496 497
498static int
499radeon_crtc_set_config(struct drm_mode_set *set)
500{
501 struct drm_device *dev;
502 struct radeon_device *rdev;
503 struct drm_crtc *crtc;
504 bool active = false;
505 int ret;
506
507 if (!set || !set->crtc)
508 return -EINVAL;
509
510 dev = set->crtc->dev;
511
512 ret = pm_runtime_get_sync(dev->dev);
513 if (ret < 0)
514 return ret;
515
516 ret = drm_crtc_helper_set_config(set);
517
518 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
519 if (crtc->enabled)
520 active = true;
521
522 pm_runtime_mark_last_busy(dev->dev);
523
524 rdev = dev->dev_private;
525 /* if we have active crtcs and we don't have a power ref,
526 take the current one */
527 if (active && !rdev->have_disp_power_ref) {
528 rdev->have_disp_power_ref = true;
529 return ret;
530 }
531 /* if we have no active crtcs, then drop the power ref
532 we got before */
533 if (!active && rdev->have_disp_power_ref) {
534 pm_runtime_put_autosuspend(dev->dev);
535 rdev->have_disp_power_ref = false;
536 }
537
538 /* drop the power reference we got coming in here */
539 pm_runtime_put_autosuspend(dev->dev);
540 return ret;
541}
497static const struct drm_crtc_funcs radeon_crtc_funcs = { 542static const struct drm_crtc_funcs radeon_crtc_funcs = {
498 .cursor_set = radeon_crtc_cursor_set, 543 .cursor_set = radeon_crtc_cursor_set,
499 .cursor_move = radeon_crtc_cursor_move, 544 .cursor_move = radeon_crtc_cursor_move,
500 .gamma_set = radeon_crtc_gamma_set, 545 .gamma_set = radeon_crtc_gamma_set,
501 .set_config = drm_crtc_helper_set_config, 546 .set_config = radeon_crtc_set_config,
502 .destroy = radeon_crtc_destroy, 547 .destroy = radeon_crtc_destroy,
503 .page_flip = radeon_crtc_page_flip, 548 .page_flip = radeon_crtc_page_flip,
504}; 549};
@@ -1178,6 +1223,12 @@ static struct drm_prop_enum_list radeon_audio_enum_list[] =
1178 { RADEON_AUDIO_AUTO, "auto" }, 1223 { RADEON_AUDIO_AUTO, "auto" },
1179}; 1224};
1180 1225
1226/* XXX support different dither options? spatial, temporal, both, etc. */
1227static struct drm_prop_enum_list radeon_dither_enum_list[] =
1228{ { RADEON_FMT_DITHER_DISABLE, "off" },
1229 { RADEON_FMT_DITHER_ENABLE, "on" },
1230};
1231
1181static int radeon_modeset_create_props(struct radeon_device *rdev) 1232static int radeon_modeset_create_props(struct radeon_device *rdev)
1182{ 1233{
1183 int sz; 1234 int sz;
@@ -1234,6 +1285,12 @@ static int radeon_modeset_create_props(struct radeon_device *rdev)
1234 "audio", 1285 "audio",
1235 radeon_audio_enum_list, sz); 1286 radeon_audio_enum_list, sz);
1236 1287
1288 sz = ARRAY_SIZE(radeon_dither_enum_list);
1289 rdev->mode_info.dither_property =
1290 drm_property_create_enum(rdev->ddev, 0,
1291 "dither",
1292 radeon_dither_enum_list, sz);
1293
1237 return 0; 1294 return 0;
1238} 1295}
1239 1296
@@ -1539,12 +1596,17 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
1539} 1596}
1540 1597
1541/* 1598/*
1542 * Retrieve current video scanout position of crtc on a given gpu. 1599 * Retrieve current video scanout position of crtc on a given gpu, and
1600 * an optional accurate timestamp of when query happened.
1543 * 1601 *
1544 * \param dev Device to query. 1602 * \param dev Device to query.
1545 * \param crtc Crtc to query. 1603 * \param crtc Crtc to query.
1546 * \param *vpos Location where vertical scanout position should be stored. 1604 * \param *vpos Location where vertical scanout position should be stored.
1547 * \param *hpos Location where horizontal scanout position should go. 1605 * \param *hpos Location where horizontal scanout position should go.
1606 * \param *stime Target location for timestamp taken immediately before
1607 * scanout position query. Can be NULL to skip timestamp.
1608 * \param *etime Target location for timestamp taken immediately after
1609 * scanout position query. Can be NULL to skip timestamp.
1548 * 1610 *
1549 * Returns vpos as a positive number while in active scanout area. 1611 * Returns vpos as a positive number while in active scanout area.
1550 * Returns vpos as a negative number inside vblank, counting the number 1612 * Returns vpos as a negative number inside vblank, counting the number
@@ -1560,7 +1622,8 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
1560 * unknown small number of scanlines wrt. real scanout position. 1622 * unknown small number of scanlines wrt. real scanout position.
1561 * 1623 *
1562 */ 1624 */
1563int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc, int *vpos, int *hpos) 1625int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc, int *vpos, int *hpos,
1626 ktime_t *stime, ktime_t *etime)
1564{ 1627{
1565 u32 stat_crtc = 0, vbl = 0, position = 0; 1628 u32 stat_crtc = 0, vbl = 0, position = 0;
1566 int vbl_start, vbl_end, vtotal, ret = 0; 1629 int vbl_start, vbl_end, vtotal, ret = 0;
@@ -1568,6 +1631,12 @@ int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc, int *vpos, int
1568 1631
1569 struct radeon_device *rdev = dev->dev_private; 1632 struct radeon_device *rdev = dev->dev_private;
1570 1633
1634 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
1635
1636 /* Get optional system timestamp before query. */
1637 if (stime)
1638 *stime = ktime_get();
1639
1571 if (ASIC_IS_DCE4(rdev)) { 1640 if (ASIC_IS_DCE4(rdev)) {
1572 if (crtc == 0) { 1641 if (crtc == 0) {
1573 vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + 1642 vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
@@ -1650,6 +1719,12 @@ int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc, int *vpos, int
1650 } 1719 }
1651 } 1720 }
1652 1721
1722 /* Get optional system timestamp after query. */
1723 if (etime)
1724 *etime = ktime_get();
1725
1726 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
1727
1653 /* Decode into vertical and horizontal scanout position. */ 1728 /* Decode into vertical and horizontal scanout position. */
1654 *vpos = position & 0x1fff; 1729 *vpos = position & 0x1fff;
1655 *hpos = (position >> 16) & 0x1fff; 1730 *hpos = (position >> 16) & 0x1fff;
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 9c14a1ba1de4..1aee32213f66 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -36,8 +36,9 @@
36#include <drm/drm_pciids.h> 36#include <drm/drm_pciids.h>
37#include <linux/console.h> 37#include <linux/console.h>
38#include <linux/module.h> 38#include <linux/module.h>
39 39#include <linux/pm_runtime.h>
40 40#include <linux/vga_switcheroo.h>
41#include "drm_crtc_helper.h"
41/* 42/*
42 * KMS wrapper. 43 * KMS wrapper.
43 * - 2.0.0 - initial interface 44 * - 2.0.0 - initial interface
@@ -87,8 +88,8 @@ void radeon_driver_postclose_kms(struct drm_device *dev,
87 struct drm_file *file_priv); 88 struct drm_file *file_priv);
88void radeon_driver_preclose_kms(struct drm_device *dev, 89void radeon_driver_preclose_kms(struct drm_device *dev,
89 struct drm_file *file_priv); 90 struct drm_file *file_priv);
90int radeon_suspend_kms(struct drm_device *dev, pm_message_t state); 91int radeon_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon);
91int radeon_resume_kms(struct drm_device *dev); 92int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon);
92u32 radeon_get_vblank_counter_kms(struct drm_device *dev, int crtc); 93u32 radeon_get_vblank_counter_kms(struct drm_device *dev, int crtc);
93int radeon_enable_vblank_kms(struct drm_device *dev, int crtc); 94int radeon_enable_vblank_kms(struct drm_device *dev, int crtc);
94void radeon_disable_vblank_kms(struct drm_device *dev, int crtc); 95void radeon_disable_vblank_kms(struct drm_device *dev, int crtc);
@@ -100,14 +101,14 @@ void radeon_driver_irq_preinstall_kms(struct drm_device *dev);
100int radeon_driver_irq_postinstall_kms(struct drm_device *dev); 101int radeon_driver_irq_postinstall_kms(struct drm_device *dev);
101void radeon_driver_irq_uninstall_kms(struct drm_device *dev); 102void radeon_driver_irq_uninstall_kms(struct drm_device *dev);
102irqreturn_t radeon_driver_irq_handler_kms(DRM_IRQ_ARGS); 103irqreturn_t radeon_driver_irq_handler_kms(DRM_IRQ_ARGS);
103int radeon_gem_object_init(struct drm_gem_object *obj);
104void radeon_gem_object_free(struct drm_gem_object *obj); 104void radeon_gem_object_free(struct drm_gem_object *obj);
105int radeon_gem_object_open(struct drm_gem_object *obj, 105int radeon_gem_object_open(struct drm_gem_object *obj,
106 struct drm_file *file_priv); 106 struct drm_file *file_priv);
107void radeon_gem_object_close(struct drm_gem_object *obj, 107void radeon_gem_object_close(struct drm_gem_object *obj,
108 struct drm_file *file_priv); 108 struct drm_file *file_priv);
109extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc, 109extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc,
110 int *vpos, int *hpos); 110 int *vpos, int *hpos, ktime_t *stime,
111 ktime_t *etime);
111extern const struct drm_ioctl_desc radeon_ioctls_kms[]; 112extern const struct drm_ioctl_desc radeon_ioctls_kms[];
112extern int radeon_max_kms_ioctl; 113extern int radeon_max_kms_ioctl;
113int radeon_mmap(struct file *filp, struct vm_area_struct *vma); 114int radeon_mmap(struct file *filp, struct vm_area_struct *vma);
@@ -137,9 +138,11 @@ void radeon_debugfs_cleanup(struct drm_minor *minor);
137#if defined(CONFIG_VGA_SWITCHEROO) 138#if defined(CONFIG_VGA_SWITCHEROO)
138void radeon_register_atpx_handler(void); 139void radeon_register_atpx_handler(void);
139void radeon_unregister_atpx_handler(void); 140void radeon_unregister_atpx_handler(void);
141bool radeon_is_px(void);
140#else 142#else
141static inline void radeon_register_atpx_handler(void) {} 143static inline void radeon_register_atpx_handler(void) {}
142static inline void radeon_unregister_atpx_handler(void) {} 144static inline void radeon_unregister_atpx_handler(void) {}
145static inline bool radeon_is_px(void) { return false; }
143#endif 146#endif
144 147
145int radeon_no_wb; 148int radeon_no_wb;
@@ -162,6 +165,7 @@ int radeon_lockup_timeout = 10000;
162int radeon_fastfb = 0; 165int radeon_fastfb = 0;
163int radeon_dpm = -1; 166int radeon_dpm = -1;
164int radeon_aspm = -1; 167int radeon_aspm = -1;
168int radeon_runtime_pm = -1;
165 169
166MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers"); 170MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers");
167module_param_named(no_wb, radeon_no_wb, int, 0444); 171module_param_named(no_wb, radeon_no_wb, int, 0444);
@@ -223,6 +227,9 @@ module_param_named(dpm, radeon_dpm, int, 0444);
223MODULE_PARM_DESC(aspm, "ASPM support (1 = enable, 0 = disable, -1 = auto)"); 227MODULE_PARM_DESC(aspm, "ASPM support (1 = enable, 0 = disable, -1 = auto)");
224module_param_named(aspm, radeon_aspm, int, 0444); 228module_param_named(aspm, radeon_aspm, int, 0444);
225 229
230MODULE_PARM_DESC(runpm, "PX runtime pm (1 = force enable, 0 = disable, -1 = PX only default)");
231module_param_named(runpm, radeon_runtime_pm, int, 0444);
232
226static struct pci_device_id pciidlist[] = { 233static struct pci_device_id pciidlist[] = {
227 radeon_PCI_IDS 234 radeon_PCI_IDS
228}; 235};
@@ -259,6 +266,7 @@ static int radeon_resume(struct drm_device *dev)
259 return 0; 266 return 0;
260} 267}
261 268
269
262static const struct file_operations radeon_driver_old_fops = { 270static const struct file_operations radeon_driver_old_fops = {
263 .owner = THIS_MODULE, 271 .owner = THIS_MODULE,
264 .open = drm_open, 272 .open = drm_open,
@@ -353,25 +361,144 @@ radeon_pci_remove(struct pci_dev *pdev)
353 drm_put_dev(dev); 361 drm_put_dev(dev);
354} 362}
355 363
356static int 364static int radeon_pmops_suspend(struct device *dev)
357radeon_pci_suspend(struct pci_dev *pdev, pm_message_t state)
358{ 365{
359 struct drm_device *dev = pci_get_drvdata(pdev); 366 struct pci_dev *pdev = to_pci_dev(dev);
360 return radeon_suspend_kms(dev, state); 367 struct drm_device *drm_dev = pci_get_drvdata(pdev);
368 return radeon_suspend_kms(drm_dev, true, true);
361} 369}
362 370
363static int 371static int radeon_pmops_resume(struct device *dev)
364radeon_pci_resume(struct pci_dev *pdev)
365{ 372{
366 struct drm_device *dev = pci_get_drvdata(pdev); 373 struct pci_dev *pdev = to_pci_dev(dev);
367 return radeon_resume_kms(dev); 374 struct drm_device *drm_dev = pci_get_drvdata(pdev);
375 return radeon_resume_kms(drm_dev, true, true);
376}
377
378static int radeon_pmops_freeze(struct device *dev)
379{
380 struct pci_dev *pdev = to_pci_dev(dev);
381 struct drm_device *drm_dev = pci_get_drvdata(pdev);
382 return radeon_suspend_kms(drm_dev, false, true);
383}
384
385static int radeon_pmops_thaw(struct device *dev)
386{
387 struct pci_dev *pdev = to_pci_dev(dev);
388 struct drm_device *drm_dev = pci_get_drvdata(pdev);
389 return radeon_resume_kms(drm_dev, false, true);
390}
391
392static int radeon_pmops_runtime_suspend(struct device *dev)
393{
394 struct pci_dev *pdev = to_pci_dev(dev);
395 struct drm_device *drm_dev = pci_get_drvdata(pdev);
396 int ret;
397
398 if (radeon_runtime_pm == 0)
399 return -EINVAL;
400
401 drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
402 drm_kms_helper_poll_disable(drm_dev);
403 vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_OFF);
404
405 ret = radeon_suspend_kms(drm_dev, false, false);
406 pci_save_state(pdev);
407 pci_disable_device(pdev);
408 pci_set_power_state(pdev, PCI_D3cold);
409 drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF;
410
411 return 0;
412}
413
414static int radeon_pmops_runtime_resume(struct device *dev)
415{
416 struct pci_dev *pdev = to_pci_dev(dev);
417 struct drm_device *drm_dev = pci_get_drvdata(pdev);
418 int ret;
419
420 if (radeon_runtime_pm == 0)
421 return -EINVAL;
422
423 drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
424
425 pci_set_power_state(pdev, PCI_D0);
426 pci_restore_state(pdev);
427 ret = pci_enable_device(pdev);
428 if (ret)
429 return ret;
430 pci_set_master(pdev);
431
432 ret = radeon_resume_kms(drm_dev, false, false);
433 drm_kms_helper_poll_enable(drm_dev);
434 vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON);
435 drm_dev->switch_power_state = DRM_SWITCH_POWER_ON;
436 return 0;
368} 437}
369 438
439static int radeon_pmops_runtime_idle(struct device *dev)
440{
441 struct pci_dev *pdev = to_pci_dev(dev);
442 struct drm_device *drm_dev = pci_get_drvdata(pdev);
443 struct drm_crtc *crtc;
444
445 if (radeon_runtime_pm == 0)
446 return -EBUSY;
447
448 /* are we PX enabled? */
449 if (radeon_runtime_pm == -1 && !radeon_is_px()) {
450 DRM_DEBUG_DRIVER("failing to power off - not px\n");
451 return -EBUSY;
452 }
453
454 list_for_each_entry(crtc, &drm_dev->mode_config.crtc_list, head) {
455 if (crtc->enabled) {
456 DRM_DEBUG_DRIVER("failing to power off - crtc active\n");
457 return -EBUSY;
458 }
459 }
460
461 pm_runtime_mark_last_busy(dev);
462 pm_runtime_autosuspend(dev);
463 /* we don't want the main rpm_idle to call suspend - we want to autosuspend */
464 return 1;
465}
466
467long radeon_drm_ioctl(struct file *filp,
468 unsigned int cmd, unsigned long arg)
469{
470 struct drm_file *file_priv = filp->private_data;
471 struct drm_device *dev;
472 long ret;
473 dev = file_priv->minor->dev;
474 ret = pm_runtime_get_sync(dev->dev);
475 if (ret < 0)
476 return ret;
477
478 ret = drm_ioctl(filp, cmd, arg);
479
480 pm_runtime_mark_last_busy(dev->dev);
481 pm_runtime_put_autosuspend(dev->dev);
482 return ret;
483}
484
485static const struct dev_pm_ops radeon_pm_ops = {
486 .suspend = radeon_pmops_suspend,
487 .resume = radeon_pmops_resume,
488 .freeze = radeon_pmops_freeze,
489 .thaw = radeon_pmops_thaw,
490 .poweroff = radeon_pmops_freeze,
491 .restore = radeon_pmops_resume,
492 .runtime_suspend = radeon_pmops_runtime_suspend,
493 .runtime_resume = radeon_pmops_runtime_resume,
494 .runtime_idle = radeon_pmops_runtime_idle,
495};
496
370static const struct file_operations radeon_driver_kms_fops = { 497static const struct file_operations radeon_driver_kms_fops = {
371 .owner = THIS_MODULE, 498 .owner = THIS_MODULE,
372 .open = drm_open, 499 .open = drm_open,
373 .release = drm_release, 500 .release = drm_release,
374 .unlocked_ioctl = drm_ioctl, 501 .unlocked_ioctl = radeon_drm_ioctl,
375 .mmap = radeon_mmap, 502 .mmap = radeon_mmap,
376 .poll = drm_poll, 503 .poll = drm_poll,
377 .read = drm_read, 504 .read = drm_read,
@@ -380,6 +507,15 @@ static const struct file_operations radeon_driver_kms_fops = {
380#endif 507#endif
381}; 508};
382 509
510
511static void
512radeon_pci_shutdown(struct pci_dev *pdev)
513{
514 struct drm_device *dev = pci_get_drvdata(pdev);
515
516 radeon_driver_unload_kms(dev);
517}
518
383static struct drm_driver kms_driver = { 519static struct drm_driver kms_driver = {
384 .driver_features = 520 .driver_features =
385 DRIVER_USE_AGP | 521 DRIVER_USE_AGP |
@@ -392,8 +528,6 @@ static struct drm_driver kms_driver = {
392 .postclose = radeon_driver_postclose_kms, 528 .postclose = radeon_driver_postclose_kms,
393 .lastclose = radeon_driver_lastclose_kms, 529 .lastclose = radeon_driver_lastclose_kms,
394 .unload = radeon_driver_unload_kms, 530 .unload = radeon_driver_unload_kms,
395 .suspend = radeon_suspend_kms,
396 .resume = radeon_resume_kms,
397 .get_vblank_counter = radeon_get_vblank_counter_kms, 531 .get_vblank_counter = radeon_get_vblank_counter_kms,
398 .enable_vblank = radeon_enable_vblank_kms, 532 .enable_vblank = radeon_enable_vblank_kms,
399 .disable_vblank = radeon_disable_vblank_kms, 533 .disable_vblank = radeon_disable_vblank_kms,
@@ -408,7 +542,6 @@ static struct drm_driver kms_driver = {
408 .irq_uninstall = radeon_driver_irq_uninstall_kms, 542 .irq_uninstall = radeon_driver_irq_uninstall_kms,
409 .irq_handler = radeon_driver_irq_handler_kms, 543 .irq_handler = radeon_driver_irq_handler_kms,
410 .ioctls = radeon_ioctls_kms, 544 .ioctls = radeon_ioctls_kms,
411 .gem_init_object = radeon_gem_object_init,
412 .gem_free_object = radeon_gem_object_free, 545 .gem_free_object = radeon_gem_object_free,
413 .gem_open_object = radeon_gem_object_open, 546 .gem_open_object = radeon_gem_object_open,
414 .gem_close_object = radeon_gem_object_close, 547 .gem_close_object = radeon_gem_object_close,
@@ -451,8 +584,8 @@ static struct pci_driver radeon_kms_pci_driver = {
451 .id_table = pciidlist, 584 .id_table = pciidlist,
452 .probe = radeon_pci_probe, 585 .probe = radeon_pci_probe,
453 .remove = radeon_pci_remove, 586 .remove = radeon_pci_remove,
454 .suspend = radeon_pci_suspend, 587 .driver.pm = &radeon_pm_ops,
455 .resume = radeon_pci_resume, 588 .shutdown = radeon_pci_shutdown,
456}; 589};
457 590
458static int __init radeon_init(void) 591static int __init radeon_init(void)
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
index b369d42f7de5..543dcfae7e6f 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.h
+++ b/drivers/gpu/drm/radeon/radeon_drv.h
@@ -113,6 +113,9 @@
113#define DRIVER_MINOR 33 113#define DRIVER_MINOR 33
114#define DRIVER_PATCHLEVEL 0 114#define DRIVER_PATCHLEVEL 0
115 115
116long radeon_drm_ioctl(struct file *filp,
117 unsigned int cmd, unsigned long arg);
118
116/* The rest of the file is DEPRECATED! */ 119/* The rest of the file is DEPRECATED! */
117#ifdef CONFIG_DRM_RADEON_UMS 120#ifdef CONFIG_DRM_RADEON_UMS
118 121
diff --git a/drivers/gpu/drm/radeon/radeon_family.h b/drivers/gpu/drm/radeon/radeon_family.h
index 3c8289083f9d..614ad549297f 100644
--- a/drivers/gpu/drm/radeon/radeon_family.h
+++ b/drivers/gpu/drm/radeon/radeon_family.h
@@ -96,6 +96,7 @@ enum radeon_family {
96 CHIP_BONAIRE, 96 CHIP_BONAIRE,
97 CHIP_KAVERI, 97 CHIP_KAVERI,
98 CHIP_KABINI, 98 CHIP_KABINI,
99 CHIP_HAWAII,
99 CHIP_LAST, 100 CHIP_LAST,
100}; 101};
101 102
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index ddb8f8e04eb5..281d14c22a47 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -190,10 +190,8 @@ void radeon_fence_process(struct radeon_device *rdev, int ring)
190 } 190 }
191 } while (atomic64_xchg(&rdev->fence_drv[ring].last_seq, seq) > seq); 191 } while (atomic64_xchg(&rdev->fence_drv[ring].last_seq, seq) > seq);
192 192
193 if (wake) { 193 if (wake)
194 rdev->fence_drv[ring].last_activity = jiffies;
195 wake_up_all(&rdev->fence_queue); 194 wake_up_all(&rdev->fence_queue);
196 }
197} 195}
198 196
199/** 197/**
@@ -212,13 +210,13 @@ static void radeon_fence_destroy(struct kref *kref)
212} 210}
213 211
214/** 212/**
215 * radeon_fence_seq_signaled - check if a fence sequeuce number has signaled 213 * radeon_fence_seq_signaled - check if a fence sequence number has signaled
216 * 214 *
217 * @rdev: radeon device pointer 215 * @rdev: radeon device pointer
218 * @seq: sequence number 216 * @seq: sequence number
219 * @ring: ring index the fence is associated with 217 * @ring: ring index the fence is associated with
220 * 218 *
221 * Check if the last singled fence sequnce number is >= the requested 219 * Check if the last signaled fence sequnce number is >= the requested
222 * sequence number (all asics). 220 * sequence number (all asics).
223 * Returns true if the fence has signaled (current fence value 221 * Returns true if the fence has signaled (current fence value
224 * is >= requested value) or false if it has not (current fence 222 * is >= requested value) or false if it has not (current fence
@@ -263,113 +261,131 @@ bool radeon_fence_signaled(struct radeon_fence *fence)
263} 261}
264 262
265/** 263/**
266 * radeon_fence_wait_seq - wait for a specific sequence number 264 * radeon_fence_any_seq_signaled - check if any sequence number is signaled
267 * 265 *
268 * @rdev: radeon device pointer 266 * @rdev: radeon device pointer
269 * @target_seq: sequence number we want to wait for 267 * @seq: sequence numbers
270 * @ring: ring index the fence is associated with 268 *
269 * Check if the last signaled fence sequnce number is >= the requested
270 * sequence number (all asics).
271 * Returns true if any has signaled (current value is >= requested value)
272 * or false if it has not. Helper function for radeon_fence_wait_seq.
273 */
274static bool radeon_fence_any_seq_signaled(struct radeon_device *rdev, u64 *seq)
275{
276 unsigned i;
277
278 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
279 if (seq[i] && radeon_fence_seq_signaled(rdev, seq[i], i))
280 return true;
281 }
282 return false;
283}
284
285/**
286 * radeon_fence_wait_seq - wait for a specific sequence numbers
287 *
288 * @rdev: radeon device pointer
289 * @target_seq: sequence number(s) we want to wait for
271 * @intr: use interruptable sleep 290 * @intr: use interruptable sleep
272 * @lock_ring: whether the ring should be locked or not 291 * @lock_ring: whether the ring should be locked or not
273 * 292 *
274 * Wait for the requested sequence number to be written (all asics). 293 * Wait for the requested sequence number(s) to be written by any ring
294 * (all asics). Sequnce number array is indexed by ring id.
275 * @intr selects whether to use interruptable (true) or non-interruptable 295 * @intr selects whether to use interruptable (true) or non-interruptable
276 * (false) sleep when waiting for the sequence number. Helper function 296 * (false) sleep when waiting for the sequence number. Helper function
277 * for radeon_fence_wait(), et al. 297 * for radeon_fence_wait_*().
278 * Returns 0 if the sequence number has passed, error for all other cases. 298 * Returns 0 if the sequence number has passed, error for all other cases.
279 * -EDEADLK is returned when a GPU lockup has been detected and the ring is 299 * -EDEADLK is returned when a GPU lockup has been detected.
280 * marked as not ready so no further jobs get scheduled until a successful
281 * reset.
282 */ 300 */
283static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 target_seq, 301static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 *target_seq,
284 unsigned ring, bool intr, bool lock_ring) 302 bool intr, bool lock_ring)
285{ 303{
286 unsigned long timeout, last_activity; 304 uint64_t last_seq[RADEON_NUM_RINGS];
287 uint64_t seq;
288 unsigned i;
289 bool signaled; 305 bool signaled;
290 int r; 306 int i, r;
307
308 while (!radeon_fence_any_seq_signaled(rdev, target_seq)) {
309
310 /* Save current sequence values, used to check for GPU lockups */
311 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
312 if (!target_seq[i])
313 continue;
291 314
292 while (target_seq > atomic64_read(&rdev->fence_drv[ring].last_seq)) { 315 last_seq[i] = atomic64_read(&rdev->fence_drv[i].last_seq);
293 if (!rdev->ring[ring].ready) { 316 trace_radeon_fence_wait_begin(rdev->ddev, target_seq[i]);
294 return -EBUSY; 317 radeon_irq_kms_sw_irq_get(rdev, i);
295 } 318 }
296 319
297 timeout = jiffies - RADEON_FENCE_JIFFIES_TIMEOUT; 320 if (intr) {
298 if (time_after(rdev->fence_drv[ring].last_activity, timeout)) { 321 r = wait_event_interruptible_timeout(rdev->fence_queue, (
299 /* the normal case, timeout is somewhere before last_activity */ 322 (signaled = radeon_fence_any_seq_signaled(rdev, target_seq))
300 timeout = rdev->fence_drv[ring].last_activity - timeout; 323 || rdev->needs_reset), RADEON_FENCE_JIFFIES_TIMEOUT);
301 } else { 324 } else {
302 /* either jiffies wrapped around, or no fence was signaled in the last 500ms 325 r = wait_event_timeout(rdev->fence_queue, (
303 * anyway we will just wait for the minimum amount and then check for a lockup 326 (signaled = radeon_fence_any_seq_signaled(rdev, target_seq))
304 */ 327 || rdev->needs_reset), RADEON_FENCE_JIFFIES_TIMEOUT);
305 timeout = 1;
306 } 328 }
307 seq = atomic64_read(&rdev->fence_drv[ring].last_seq);
308 /* Save current last activity valuee, used to check for GPU lockups */
309 last_activity = rdev->fence_drv[ring].last_activity;
310 329
311 trace_radeon_fence_wait_begin(rdev->ddev, seq); 330 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
312 radeon_irq_kms_sw_irq_get(rdev, ring); 331 if (!target_seq[i])
313 if (intr) { 332 continue;
314 r = wait_event_interruptible_timeout(rdev->fence_queue, 333
315 (signaled = radeon_fence_seq_signaled(rdev, target_seq, ring)), 334 radeon_irq_kms_sw_irq_put(rdev, i);
316 timeout); 335 trace_radeon_fence_wait_end(rdev->ddev, target_seq[i]);
317 } else {
318 r = wait_event_timeout(rdev->fence_queue,
319 (signaled = radeon_fence_seq_signaled(rdev, target_seq, ring)),
320 timeout);
321 } 336 }
322 radeon_irq_kms_sw_irq_put(rdev, ring); 337
323 if (unlikely(r < 0)) { 338 if (unlikely(r < 0))
324 return r; 339 return r;
325 }
326 trace_radeon_fence_wait_end(rdev->ddev, seq);
327 340
328 if (unlikely(!signaled)) { 341 if (unlikely(!signaled)) {
342 if (rdev->needs_reset)
343 return -EDEADLK;
344
329 /* we were interrupted for some reason and fence 345 /* we were interrupted for some reason and fence
330 * isn't signaled yet, resume waiting */ 346 * isn't signaled yet, resume waiting */
331 if (r) { 347 if (r)
332 continue; 348 continue;
349
350 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
351 if (!target_seq[i])
352 continue;
353
354 if (last_seq[i] != atomic64_read(&rdev->fence_drv[i].last_seq))
355 break;
333 } 356 }
334 357
335 /* check if sequence value has changed since last_activity */ 358 if (i != RADEON_NUM_RINGS)
336 if (seq != atomic64_read(&rdev->fence_drv[ring].last_seq)) {
337 continue; 359 continue;
338 }
339 360
340 if (lock_ring) { 361 if (lock_ring)
341 mutex_lock(&rdev->ring_lock); 362 mutex_lock(&rdev->ring_lock);
342 }
343 363
344 /* test if somebody else has already decided that this is a lockup */ 364 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
345 if (last_activity != rdev->fence_drv[ring].last_activity) { 365 if (!target_seq[i])
346 if (lock_ring) { 366 continue;
347 mutex_unlock(&rdev->ring_lock); 367
348 } 368 if (radeon_ring_is_lockup(rdev, i, &rdev->ring[i]))
349 continue; 369 break;
350 } 370 }
351 371
352 if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) { 372 if (i < RADEON_NUM_RINGS) {
353 /* good news we believe it's a lockup */ 373 /* good news we believe it's a lockup */
354 dev_warn(rdev->dev, "GPU lockup (waiting for 0x%016llx last fence id 0x%016llx)\n", 374 dev_warn(rdev->dev, "GPU lockup (waiting for "
355 target_seq, seq); 375 "0x%016llx last fence id 0x%016llx on"
356 376 " ring %d)\n",
357 /* change last activity so nobody else think there is a lockup */ 377 target_seq[i], last_seq[i], i);
358 for (i = 0; i < RADEON_NUM_RINGS; ++i) { 378
359 rdev->fence_drv[i].last_activity = jiffies; 379 /* remember that we need an reset */
360 } 380 rdev->needs_reset = true;
361 381 if (lock_ring)
362 /* mark the ring as not ready any more */
363 rdev->ring[ring].ready = false;
364 if (lock_ring) {
365 mutex_unlock(&rdev->ring_lock); 382 mutex_unlock(&rdev->ring_lock);
366 } 383 wake_up_all(&rdev->fence_queue);
367 return -EDEADLK; 384 return -EDEADLK;
368 } 385 }
369 386
370 if (lock_ring) { 387 if (lock_ring)
371 mutex_unlock(&rdev->ring_lock); 388 mutex_unlock(&rdev->ring_lock);
372 }
373 } 389 }
374 } 390 }
375 return 0; 391 return 0;
@@ -388,6 +404,7 @@ static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 target_seq,
388 */ 404 */
389int radeon_fence_wait(struct radeon_fence *fence, bool intr) 405int radeon_fence_wait(struct radeon_fence *fence, bool intr)
390{ 406{
407 uint64_t seq[RADEON_NUM_RINGS] = {};
391 int r; 408 int r;
392 409
393 if (fence == NULL) { 410 if (fence == NULL) {
@@ -395,147 +412,15 @@ int radeon_fence_wait(struct radeon_fence *fence, bool intr)
395 return -EINVAL; 412 return -EINVAL;
396 } 413 }
397 414
398 r = radeon_fence_wait_seq(fence->rdev, fence->seq, 415 seq[fence->ring] = fence->seq;
399 fence->ring, intr, true); 416 if (seq[fence->ring] == RADEON_FENCE_SIGNALED_SEQ)
400 if (r) { 417 return 0;
401 return r;
402 }
403 fence->seq = RADEON_FENCE_SIGNALED_SEQ;
404 return 0;
405}
406
407static bool radeon_fence_any_seq_signaled(struct radeon_device *rdev, u64 *seq)
408{
409 unsigned i;
410
411 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
412 if (seq[i] && radeon_fence_seq_signaled(rdev, seq[i], i)) {
413 return true;
414 }
415 }
416 return false;
417}
418 418
419/** 419 r = radeon_fence_wait_seq(fence->rdev, seq, intr, true);
420 * radeon_fence_wait_any_seq - wait for a sequence number on any ring 420 if (r)
421 * 421 return r;
422 * @rdev: radeon device pointer
423 * @target_seq: sequence number(s) we want to wait for
424 * @intr: use interruptable sleep
425 *
426 * Wait for the requested sequence number(s) to be written by any ring
427 * (all asics). Sequnce number array is indexed by ring id.
428 * @intr selects whether to use interruptable (true) or non-interruptable
429 * (false) sleep when waiting for the sequence number. Helper function
430 * for radeon_fence_wait_any(), et al.
431 * Returns 0 if the sequence number has passed, error for all other cases.
432 */
433static int radeon_fence_wait_any_seq(struct radeon_device *rdev,
434 u64 *target_seq, bool intr)
435{
436 unsigned long timeout, last_activity, tmp;
437 unsigned i, ring = RADEON_NUM_RINGS;
438 bool signaled;
439 int r;
440
441 for (i = 0, last_activity = 0; i < RADEON_NUM_RINGS; ++i) {
442 if (!target_seq[i]) {
443 continue;
444 }
445
446 /* use the most recent one as indicator */
447 if (time_after(rdev->fence_drv[i].last_activity, last_activity)) {
448 last_activity = rdev->fence_drv[i].last_activity;
449 }
450
451 /* For lockup detection just pick the lowest ring we are
452 * actively waiting for
453 */
454 if (i < ring) {
455 ring = i;
456 }
457 }
458
459 /* nothing to wait for ? */
460 if (ring == RADEON_NUM_RINGS) {
461 return -ENOENT;
462 }
463
464 while (!radeon_fence_any_seq_signaled(rdev, target_seq)) {
465 timeout = jiffies - RADEON_FENCE_JIFFIES_TIMEOUT;
466 if (time_after(last_activity, timeout)) {
467 /* the normal case, timeout is somewhere before last_activity */
468 timeout = last_activity - timeout;
469 } else {
470 /* either jiffies wrapped around, or no fence was signaled in the last 500ms
471 * anyway we will just wait for the minimum amount and then check for a lockup
472 */
473 timeout = 1;
474 }
475
476 trace_radeon_fence_wait_begin(rdev->ddev, target_seq[ring]);
477 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
478 if (target_seq[i]) {
479 radeon_irq_kms_sw_irq_get(rdev, i);
480 }
481 }
482 if (intr) {
483 r = wait_event_interruptible_timeout(rdev->fence_queue,
484 (signaled = radeon_fence_any_seq_signaled(rdev, target_seq)),
485 timeout);
486 } else {
487 r = wait_event_timeout(rdev->fence_queue,
488 (signaled = radeon_fence_any_seq_signaled(rdev, target_seq)),
489 timeout);
490 }
491 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
492 if (target_seq[i]) {
493 radeon_irq_kms_sw_irq_put(rdev, i);
494 }
495 }
496 if (unlikely(r < 0)) {
497 return r;
498 }
499 trace_radeon_fence_wait_end(rdev->ddev, target_seq[ring]);
500
501 if (unlikely(!signaled)) {
502 /* we were interrupted for some reason and fence
503 * isn't signaled yet, resume waiting */
504 if (r) {
505 continue;
506 }
507
508 mutex_lock(&rdev->ring_lock);
509 for (i = 0, tmp = 0; i < RADEON_NUM_RINGS; ++i) {
510 if (time_after(rdev->fence_drv[i].last_activity, tmp)) {
511 tmp = rdev->fence_drv[i].last_activity;
512 }
513 }
514 /* test if somebody else has already decided that this is a lockup */
515 if (last_activity != tmp) {
516 last_activity = tmp;
517 mutex_unlock(&rdev->ring_lock);
518 continue;
519 }
520
521 if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) {
522 /* good news we believe it's a lockup */
523 dev_warn(rdev->dev, "GPU lockup (waiting for 0x%016llx)\n",
524 target_seq[ring]);
525
526 /* change last activity so nobody else think there is a lockup */
527 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
528 rdev->fence_drv[i].last_activity = jiffies;
529 }
530 422
531 /* mark the ring as not ready any more */ 423 fence->seq = RADEON_FENCE_SIGNALED_SEQ;
532 rdev->ring[ring].ready = false;
533 mutex_unlock(&rdev->ring_lock);
534 return -EDEADLK;
535 }
536 mutex_unlock(&rdev->ring_lock);
537 }
538 }
539 return 0; 424 return 0;
540} 425}
541 426
@@ -557,7 +442,7 @@ int radeon_fence_wait_any(struct radeon_device *rdev,
557 bool intr) 442 bool intr)
558{ 443{
559 uint64_t seq[RADEON_NUM_RINGS]; 444 uint64_t seq[RADEON_NUM_RINGS];
560 unsigned i; 445 unsigned i, num_rings = 0;
561 int r; 446 int r;
562 447
563 for (i = 0; i < RADEON_NUM_RINGS; ++i) { 448 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
@@ -567,15 +452,19 @@ int radeon_fence_wait_any(struct radeon_device *rdev,
567 continue; 452 continue;
568 } 453 }
569 454
570 if (fences[i]->seq == RADEON_FENCE_SIGNALED_SEQ) {
571 /* something was allready signaled */
572 return 0;
573 }
574
575 seq[i] = fences[i]->seq; 455 seq[i] = fences[i]->seq;
456 ++num_rings;
457
458 /* test if something was allready signaled */
459 if (seq[i] == RADEON_FENCE_SIGNALED_SEQ)
460 return 0;
576 } 461 }
577 462
578 r = radeon_fence_wait_any_seq(rdev, seq, intr); 463 /* nothing to wait for ? */
464 if (num_rings == 0)
465 return -ENOENT;
466
467 r = radeon_fence_wait_seq(rdev, seq, intr, true);
579 if (r) { 468 if (r) {
580 return r; 469 return r;
581 } 470 }
@@ -594,15 +483,15 @@ int radeon_fence_wait_any(struct radeon_device *rdev,
594 */ 483 */
595int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring) 484int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring)
596{ 485{
597 uint64_t seq; 486 uint64_t seq[RADEON_NUM_RINGS] = {};
598 487
599 seq = atomic64_read(&rdev->fence_drv[ring].last_seq) + 1ULL; 488 seq[ring] = atomic64_read(&rdev->fence_drv[ring].last_seq) + 1ULL;
600 if (seq >= rdev->fence_drv[ring].sync_seq[ring]) { 489 if (seq[ring] >= rdev->fence_drv[ring].sync_seq[ring]) {
601 /* nothing to wait for, last_seq is 490 /* nothing to wait for, last_seq is
602 already the last emited fence */ 491 already the last emited fence */
603 return -ENOENT; 492 return -ENOENT;
604 } 493 }
605 return radeon_fence_wait_seq(rdev, seq, ring, false, false); 494 return radeon_fence_wait_seq(rdev, seq, false, false);
606} 495}
607 496
608/** 497/**
@@ -617,14 +506,18 @@ int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring)
617 */ 506 */
618int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring) 507int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring)
619{ 508{
620 uint64_t seq = rdev->fence_drv[ring].sync_seq[ring]; 509 uint64_t seq[RADEON_NUM_RINGS] = {};
621 int r; 510 int r;
622 511
623 r = radeon_fence_wait_seq(rdev, seq, ring, false, false); 512 seq[ring] = rdev->fence_drv[ring].sync_seq[ring];
513 if (!seq[ring])
514 return 0;
515
516 r = radeon_fence_wait_seq(rdev, seq, false, false);
624 if (r) { 517 if (r) {
625 if (r == -EDEADLK) { 518 if (r == -EDEADLK)
626 return -EDEADLK; 519 return -EDEADLK;
627 } 520
628 dev_err(rdev->dev, "error waiting for ring[%d] to become idle (%d)\n", 521 dev_err(rdev->dev, "error waiting for ring[%d] to become idle (%d)\n",
629 ring, r); 522 ring, r);
630 } 523 }
@@ -826,7 +719,6 @@ static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring)
826 for (i = 0; i < RADEON_NUM_RINGS; ++i) 719 for (i = 0; i < RADEON_NUM_RINGS; ++i)
827 rdev->fence_drv[ring].sync_seq[i] = 0; 720 rdev->fence_drv[ring].sync_seq[i] = 0;
828 atomic64_set(&rdev->fence_drv[ring].last_seq, 0); 721 atomic64_set(&rdev->fence_drv[ring].last_seq, 0);
829 rdev->fence_drv[ring].last_activity = jiffies;
830 rdev->fence_drv[ring].initialized = false; 722 rdev->fence_drv[ring].initialized = false;
831} 723}
832 724
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index b990b1a2bd50..8a83b89d4709 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -607,8 +607,8 @@ static int radeon_vm_evict(struct radeon_device *rdev, struct radeon_vm *vm)
607 */ 607 */
608int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm) 608int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm)
609{ 609{
610 unsigned pd_size, pts_size; 610 unsigned pd_size, pd_entries, pts_size;
611 u64 *pd_addr; 611 struct radeon_ib ib;
612 int r; 612 int r;
613 613
614 if (vm == NULL) { 614 if (vm == NULL) {
@@ -619,8 +619,10 @@ int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm)
619 return 0; 619 return 0;
620 } 620 }
621 621
622retry:
623 pd_size = radeon_vm_directory_size(rdev); 622 pd_size = radeon_vm_directory_size(rdev);
623 pd_entries = radeon_vm_num_pdes(rdev);
624
625retry:
624 r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager, 626 r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager,
625 &vm->page_directory, pd_size, 627 &vm->page_directory, pd_size,
626 RADEON_VM_PTB_ALIGN_SIZE, false); 628 RADEON_VM_PTB_ALIGN_SIZE, false);
@@ -637,9 +639,31 @@ retry:
637 vm->pd_gpu_addr = radeon_sa_bo_gpu_addr(vm->page_directory); 639 vm->pd_gpu_addr = radeon_sa_bo_gpu_addr(vm->page_directory);
638 640
639 /* Initially clear the page directory */ 641 /* Initially clear the page directory */
640 pd_addr = radeon_sa_bo_cpu_addr(vm->page_directory); 642 r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib,
641 memset(pd_addr, 0, pd_size); 643 NULL, pd_entries * 2 + 64);
644 if (r) {
645 radeon_sa_bo_free(rdev, &vm->page_directory, vm->fence);
646 return r;
647 }
648
649 ib.length_dw = 0;
650
651 radeon_asic_vm_set_page(rdev, &ib, vm->pd_gpu_addr,
652 0, pd_entries, 0, 0);
653
654 radeon_ib_sync_to(&ib, vm->fence);
655 r = radeon_ib_schedule(rdev, &ib, NULL);
656 if (r) {
657 radeon_ib_free(rdev, &ib);
658 radeon_sa_bo_free(rdev, &vm->page_directory, vm->fence);
659 return r;
660 }
661 radeon_fence_unref(&vm->fence);
662 vm->fence = radeon_fence_ref(ib.fence);
663 radeon_ib_free(rdev, &ib);
664 radeon_fence_unref(&vm->last_flush);
642 665
666 /* allocate page table array */
643 pts_size = radeon_vm_num_pdes(rdev) * sizeof(struct radeon_sa_bo *); 667 pts_size = radeon_vm_num_pdes(rdev) * sizeof(struct radeon_sa_bo *);
644 vm->page_tables = kzalloc(pts_size, GFP_KERNEL); 668 vm->page_tables = kzalloc(pts_size, GFP_KERNEL);
645 669
@@ -914,6 +938,26 @@ uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr)
914} 938}
915 939
916/** 940/**
941 * radeon_vm_page_flags - translate page flags to what the hw uses
942 *
943 * @flags: flags comming from userspace
944 *
945 * Translate the flags the userspace ABI uses to hw flags.
946 */
947static uint32_t radeon_vm_page_flags(uint32_t flags)
948{
949 uint32_t hw_flags = 0;
950 hw_flags |= (flags & RADEON_VM_PAGE_VALID) ? R600_PTE_VALID : 0;
951 hw_flags |= (flags & RADEON_VM_PAGE_READABLE) ? R600_PTE_READABLE : 0;
952 hw_flags |= (flags & RADEON_VM_PAGE_WRITEABLE) ? R600_PTE_WRITEABLE : 0;
953 if (flags & RADEON_VM_PAGE_SYSTEM) {
954 hw_flags |= R600_PTE_SYSTEM;
955 hw_flags |= (flags & RADEON_VM_PAGE_SNOOPED) ? R600_PTE_SNOOPED : 0;
956 }
957 return hw_flags;
958}
959
960/**
917 * radeon_vm_update_pdes - make sure that page directory is valid 961 * radeon_vm_update_pdes - make sure that page directory is valid
918 * 962 *
919 * @rdev: radeon_device pointer 963 * @rdev: radeon_device pointer
@@ -974,7 +1018,11 @@ retry:
974 if (count) { 1018 if (count) {
975 radeon_asic_vm_set_page(rdev, ib, last_pde, 1019 radeon_asic_vm_set_page(rdev, ib, last_pde,
976 last_pt, count, incr, 1020 last_pt, count, incr,
977 RADEON_VM_PAGE_VALID); 1021 R600_PTE_VALID);
1022
1023 count *= RADEON_VM_PTE_COUNT;
1024 radeon_asic_vm_set_page(rdev, ib, last_pt, 0,
1025 count, 0, 0);
978 } 1026 }
979 1027
980 count = 1; 1028 count = 1;
@@ -987,8 +1035,11 @@ retry:
987 1035
988 if (count) { 1036 if (count) {
989 radeon_asic_vm_set_page(rdev, ib, last_pde, last_pt, count, 1037 radeon_asic_vm_set_page(rdev, ib, last_pde, last_pt, count,
990 incr, RADEON_VM_PAGE_VALID); 1038 incr, R600_PTE_VALID);
991 1039
1040 count *= RADEON_VM_PTE_COUNT;
1041 radeon_asic_vm_set_page(rdev, ib, last_pt, 0,
1042 count, 0, 0);
992 } 1043 }
993 1044
994 return 0; 1045 return 0;
@@ -1082,7 +1133,6 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
1082 struct radeon_bo *bo, 1133 struct radeon_bo *bo,
1083 struct ttm_mem_reg *mem) 1134 struct ttm_mem_reg *mem)
1084{ 1135{
1085 unsigned ridx = rdev->asic->vm.pt_ring_index;
1086 struct radeon_ib ib; 1136 struct radeon_ib ib;
1087 struct radeon_bo_va *bo_va; 1137 struct radeon_bo_va *bo_va;
1088 unsigned nptes, npdes, ndw; 1138 unsigned nptes, npdes, ndw;
@@ -1151,11 +1201,14 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
1151 /* reserve space for pde addresses */ 1201 /* reserve space for pde addresses */
1152 ndw += npdes * 2; 1202 ndw += npdes * 2;
1153 1203
1204 /* reserve space for clearing new page tables */
1205 ndw += npdes * 2 * RADEON_VM_PTE_COUNT;
1206
1154 /* update too big for an IB */ 1207 /* update too big for an IB */
1155 if (ndw > 0xfffff) 1208 if (ndw > 0xfffff)
1156 return -ENOMEM; 1209 return -ENOMEM;
1157 1210
1158 r = radeon_ib_get(rdev, ridx, &ib, NULL, ndw * 4); 1211 r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib, NULL, ndw * 4);
1159 ib.length_dw = 0; 1212 ib.length_dw = 0;
1160 1213
1161 r = radeon_vm_update_pdes(rdev, vm, &ib, bo_va->soffset, bo_va->eoffset); 1214 r = radeon_vm_update_pdes(rdev, vm, &ib, bo_va->soffset, bo_va->eoffset);
@@ -1165,7 +1218,7 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
1165 } 1218 }
1166 1219
1167 radeon_vm_update_ptes(rdev, vm, &ib, bo_va->soffset, bo_va->eoffset, 1220 radeon_vm_update_ptes(rdev, vm, &ib, bo_va->soffset, bo_va->eoffset,
1168 addr, bo_va->flags); 1221 addr, radeon_vm_page_flags(bo_va->flags));
1169 1222
1170 radeon_ib_sync_to(&ib, vm->fence); 1223 radeon_ib_sync_to(&ib, vm->fence);
1171 r = radeon_ib_schedule(rdev, &ib, NULL); 1224 r = radeon_ib_schedule(rdev, &ib, NULL);
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index dce99c8a5835..805c5e566b9a 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -29,13 +29,6 @@
29#include <drm/radeon_drm.h> 29#include <drm/radeon_drm.h>
30#include "radeon.h" 30#include "radeon.h"
31 31
32int radeon_gem_object_init(struct drm_gem_object *obj)
33{
34 BUG();
35
36 return 0;
37}
38
39void radeon_gem_object_free(struct drm_gem_object *gobj) 32void radeon_gem_object_free(struct drm_gem_object *gobj)
40{ 33{
41 struct radeon_bo *robj = gem_to_radeon_bo(gobj); 34 struct radeon_bo *robj = gem_to_radeon_bo(gobj);
diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
index c180df8e84db..bdb0f93e73bc 100644
--- a/drivers/gpu/drm/radeon/radeon_ioc32.c
+++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
@@ -418,7 +418,7 @@ long radeon_kms_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long
418 if (nr < DRM_COMMAND_BASE) 418 if (nr < DRM_COMMAND_BASE)
419 return drm_compat_ioctl(filp, cmd, arg); 419 return drm_compat_ioctl(filp, cmd, arg);
420 420
421 ret = drm_ioctl(filp, cmd, arg); 421 ret = radeon_drm_ioctl(filp, cmd, arg);
422 422
423 return ret; 423 return ret;
424} 424}
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index cc9e8482cf30..ec6240b00469 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -32,6 +32,8 @@
32#include "radeon.h" 32#include "radeon.h"
33#include "atom.h" 33#include "atom.h"
34 34
35#include <linux/pm_runtime.h>
36
35#define RADEON_WAIT_IDLE_TIMEOUT 200 37#define RADEON_WAIT_IDLE_TIMEOUT 200
36 38
37/** 39/**
@@ -47,8 +49,12 @@ irqreturn_t radeon_driver_irq_handler_kms(DRM_IRQ_ARGS)
47{ 49{
48 struct drm_device *dev = (struct drm_device *) arg; 50 struct drm_device *dev = (struct drm_device *) arg;
49 struct radeon_device *rdev = dev->dev_private; 51 struct radeon_device *rdev = dev->dev_private;
52 irqreturn_t ret;
50 53
51 return radeon_irq_process(rdev); 54 ret = radeon_irq_process(rdev);
55 if (ret == IRQ_HANDLED)
56 pm_runtime_mark_last_busy(dev->dev);
57 return ret;
52} 58}
53 59
54/* 60/*
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 61580ddc4eb2..bb8710531a1b 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -32,7 +32,7 @@
32 32
33#include <linux/vga_switcheroo.h> 33#include <linux/vga_switcheroo.h>
34#include <linux/slab.h> 34#include <linux/slab.h>
35 35#include <linux/pm_runtime.h>
36/** 36/**
37 * radeon_driver_unload_kms - Main unload function for KMS. 37 * radeon_driver_unload_kms - Main unload function for KMS.
38 * 38 *
@@ -50,9 +50,14 @@ int radeon_driver_unload_kms(struct drm_device *dev)
50 50
51 if (rdev == NULL) 51 if (rdev == NULL)
52 return 0; 52 return 0;
53
53 if (rdev->rmmio == NULL) 54 if (rdev->rmmio == NULL)
54 goto done_free; 55 goto done_free;
56
57 pm_runtime_get_sync(dev->dev);
58
55 radeon_acpi_fini(rdev); 59 radeon_acpi_fini(rdev);
60
56 radeon_modeset_fini(rdev); 61 radeon_modeset_fini(rdev);
57 radeon_device_fini(rdev); 62 radeon_device_fini(rdev);
58 63
@@ -125,9 +130,20 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
125 "Error during ACPI methods call\n"); 130 "Error during ACPI methods call\n");
126 } 131 }
127 132
133 if (radeon_runtime_pm != 0) {
134 pm_runtime_use_autosuspend(dev->dev);
135 pm_runtime_set_autosuspend_delay(dev->dev, 5000);
136 pm_runtime_set_active(dev->dev);
137 pm_runtime_allow(dev->dev);
138 pm_runtime_mark_last_busy(dev->dev);
139 pm_runtime_put_autosuspend(dev->dev);
140 }
141
128out: 142out:
129 if (r) 143 if (r)
130 radeon_driver_unload_kms(dev); 144 radeon_driver_unload_kms(dev);
145
146
131 return r; 147 return r;
132} 148}
133 149
@@ -191,7 +207,7 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
191 207
192 switch (info->request) { 208 switch (info->request) {
193 case RADEON_INFO_DEVICE_ID: 209 case RADEON_INFO_DEVICE_ID:
194 *value = dev->pci_device; 210 *value = dev->pdev->device;
195 break; 211 break;
196 case RADEON_INFO_NUM_GB_PIPES: 212 case RADEON_INFO_NUM_GB_PIPES:
197 *value = rdev->num_gb_pipes; 213 *value = rdev->num_gb_pipes;
@@ -475,9 +491,14 @@ void radeon_driver_lastclose_kms(struct drm_device *dev)
475int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) 491int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
476{ 492{
477 struct radeon_device *rdev = dev->dev_private; 493 struct radeon_device *rdev = dev->dev_private;
494 int r;
478 495
479 file_priv->driver_priv = NULL; 496 file_priv->driver_priv = NULL;
480 497
498 r = pm_runtime_get_sync(dev->dev);
499 if (r < 0)
500 return r;
501
481 /* new gpu have virtual address space support */ 502 /* new gpu have virtual address space support */
482 if (rdev->family >= CHIP_CAYMAN) { 503 if (rdev->family >= CHIP_CAYMAN) {
483 struct radeon_fpriv *fpriv; 504 struct radeon_fpriv *fpriv;
@@ -506,6 +527,9 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
506 527
507 file_priv->driver_priv = fpriv; 528 file_priv->driver_priv = fpriv;
508 } 529 }
530
531 pm_runtime_mark_last_busy(dev->dev);
532 pm_runtime_put_autosuspend(dev->dev);
509 return 0; 533 return 0;
510} 534}
511 535
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index 7cb178a34a0f..0c7b8c66301b 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -1056,6 +1056,26 @@ static void radeon_crtc_commit(struct drm_crtc *crtc)
1056 } 1056 }
1057} 1057}
1058 1058
1059static void radeon_crtc_disable(struct drm_crtc *crtc)
1060{
1061 radeon_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
1062 if (crtc->fb) {
1063 int r;
1064 struct radeon_framebuffer *radeon_fb;
1065 struct radeon_bo *rbo;
1066
1067 radeon_fb = to_radeon_framebuffer(crtc->fb);
1068 rbo = gem_to_radeon_bo(radeon_fb->obj);
1069 r = radeon_bo_reserve(rbo, false);
1070 if (unlikely(r))
1071 DRM_ERROR("failed to reserve rbo before unpin\n");
1072 else {
1073 radeon_bo_unpin(rbo);
1074 radeon_bo_unreserve(rbo);
1075 }
1076 }
1077}
1078
1059static const struct drm_crtc_helper_funcs legacy_helper_funcs = { 1079static const struct drm_crtc_helper_funcs legacy_helper_funcs = {
1060 .dpms = radeon_crtc_dpms, 1080 .dpms = radeon_crtc_dpms,
1061 .mode_fixup = radeon_crtc_mode_fixup, 1081 .mode_fixup = radeon_crtc_mode_fixup,
@@ -1065,6 +1085,7 @@ static const struct drm_crtc_helper_funcs legacy_helper_funcs = {
1065 .prepare = radeon_crtc_prepare, 1085 .prepare = radeon_crtc_prepare,
1066 .commit = radeon_crtc_commit, 1086 .commit = radeon_crtc_commit,
1067 .load_lut = radeon_crtc_load_lut, 1087 .load_lut = radeon_crtc_load_lut,
1088 .disable = radeon_crtc_disable
1068}; 1089};
1069 1090
1070 1091
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index 62cd512f5c8d..c89971d904c3 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -392,7 +392,7 @@ void radeon_legacy_backlight_init(struct radeon_encoder *radeon_encoder,
392 props.type = BACKLIGHT_RAW; 392 props.type = BACKLIGHT_RAW;
393 snprintf(bl_name, sizeof(bl_name), 393 snprintf(bl_name, sizeof(bl_name),
394 "radeon_bl%d", dev->primary->index); 394 "radeon_bl%d", dev->primary->index);
395 bd = backlight_device_register(bl_name, &drm_connector->kdev, 395 bd = backlight_device_register(bl_name, drm_connector->kdev,
396 pdata, &radeon_backlight_ops, &props); 396 pdata, &radeon_backlight_ops, &props);
397 if (IS_ERR(bd)) { 397 if (IS_ERR(bd)) {
398 DRM_ERROR("Backlight registration failed\n"); 398 DRM_ERROR("Backlight registration failed\n");
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index ef63d3f00b2f..3f0dd664af90 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -249,6 +249,8 @@ struct radeon_mode_info {
249 struct drm_property *underscan_vborder_property; 249 struct drm_property *underscan_vborder_property;
250 /* audio */ 250 /* audio */
251 struct drm_property *audio_property; 251 struct drm_property *audio_property;
252 /* FMT dithering */
253 struct drm_property *dither_property;
252 /* hardcoded DFP edid from BIOS */ 254 /* hardcoded DFP edid from BIOS */
253 struct edid *bios_hardcoded_edid; 255 struct edid *bios_hardcoded_edid;
254 int bios_hardcoded_edid_size; 256 int bios_hardcoded_edid_size;
@@ -479,6 +481,11 @@ enum radeon_connector_audio {
479 RADEON_AUDIO_AUTO = 2 481 RADEON_AUDIO_AUTO = 2
480}; 482};
481 483
484enum radeon_connector_dither {
485 RADEON_FMT_DITHER_DISABLE = 0,
486 RADEON_FMT_DITHER_ENABLE = 1,
487};
488
482struct radeon_connector { 489struct radeon_connector {
483 struct drm_connector base; 490 struct drm_connector base;
484 uint32_t connector_id; 491 uint32_t connector_id;
@@ -498,6 +505,7 @@ struct radeon_connector {
498 struct radeon_router router; 505 struct radeon_router router;
499 struct radeon_i2c_chan *router_bus; 506 struct radeon_i2c_chan *router_bus;
500 enum radeon_connector_audio audio; 507 enum radeon_connector_audio audio;
508 enum radeon_connector_dither dither;
501}; 509};
502 510
503struct radeon_framebuffer { 511struct radeon_framebuffer {
@@ -758,7 +766,8 @@ extern int radeon_crtc_cursor_move(struct drm_crtc *crtc,
758 int x, int y); 766 int x, int y);
759 767
760extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc, 768extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc,
761 int *vpos, int *hpos); 769 int *vpos, int *hpos, ktime_t *stime,
770 ktime_t *etime);
762 771
763extern bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev); 772extern bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev);
764extern struct edid * 773extern struct edid *
@@ -850,6 +859,12 @@ void radeon_legacy_tv_mode_set(struct drm_encoder *encoder,
850 struct drm_display_mode *mode, 859 struct drm_display_mode *mode,
851 struct drm_display_mode *adjusted_mode); 860 struct drm_display_mode *adjusted_mode);
852 861
862/* fmt blocks */
863void avivo_program_fmt(struct drm_encoder *encoder);
864void dce3_program_fmt(struct drm_encoder *encoder);
865void dce4_program_fmt(struct drm_encoder *encoder);
866void dce8_program_fmt(struct drm_encoder *encoder);
867
853/* fbdev layer */ 868/* fbdev layer */
854int radeon_fbdev_init(struct radeon_device *rdev); 869int radeon_fbdev_init(struct radeon_device *rdev);
855void radeon_fbdev_fini(struct radeon_device *rdev); 870void radeon_fbdev_fini(struct radeon_device *rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 4f6b7fc7ad3c..866ace070b91 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -508,17 +508,21 @@ static ssize_t radeon_set_dpm_forced_performance_level(struct device *dev,
508 } else if (strncmp("auto", buf, strlen("auto")) == 0) { 508 } else if (strncmp("auto", buf, strlen("auto")) == 0) {
509 level = RADEON_DPM_FORCED_LEVEL_AUTO; 509 level = RADEON_DPM_FORCED_LEVEL_AUTO;
510 } else { 510 } else {
511 mutex_unlock(&rdev->pm.mutex);
512 count = -EINVAL; 511 count = -EINVAL;
513 goto fail; 512 goto fail;
514 } 513 }
515 if (rdev->asic->dpm.force_performance_level) { 514 if (rdev->asic->dpm.force_performance_level) {
515 if (rdev->pm.dpm.thermal_active) {
516 count = -EINVAL;
517 goto fail;
518 }
516 ret = radeon_dpm_force_performance_level(rdev, level); 519 ret = radeon_dpm_force_performance_level(rdev, level);
517 if (ret) 520 if (ret)
518 count = -EINVAL; 521 count = -EINVAL;
519 } 522 }
520 mutex_unlock(&rdev->pm.mutex);
521fail: 523fail:
524 mutex_unlock(&rdev->pm.mutex);
525
522 return count; 526 return count;
523} 527}
524 528
@@ -881,11 +885,12 @@ static void radeon_dpm_change_power_state_locked(struct radeon_device *rdev)
881 } 885 }
882 } 886 }
883 887
884 printk("switching from power state:\n"); 888 if (radeon_dpm == 1) {
885 radeon_dpm_print_power_state(rdev, rdev->pm.dpm.current_ps); 889 printk("switching from power state:\n");
886 printk("switching to power state:\n"); 890 radeon_dpm_print_power_state(rdev, rdev->pm.dpm.current_ps);
887 radeon_dpm_print_power_state(rdev, rdev->pm.dpm.requested_ps); 891 printk("switching to power state:\n");
888 892 radeon_dpm_print_power_state(rdev, rdev->pm.dpm.requested_ps);
893 }
889 mutex_lock(&rdev->ddev->struct_mutex); 894 mutex_lock(&rdev->ddev->struct_mutex);
890 down_write(&rdev->pm.mclk_lock); 895 down_write(&rdev->pm.mclk_lock);
891 mutex_lock(&rdev->ring_lock); 896 mutex_lock(&rdev->ring_lock);
@@ -918,12 +923,16 @@ static void radeon_dpm_change_power_state_locked(struct radeon_device *rdev)
918 radeon_dpm_post_set_power_state(rdev); 923 radeon_dpm_post_set_power_state(rdev);
919 924
920 if (rdev->asic->dpm.force_performance_level) { 925 if (rdev->asic->dpm.force_performance_level) {
921 if (rdev->pm.dpm.thermal_active) 926 if (rdev->pm.dpm.thermal_active) {
927 enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level;
922 /* force low perf level for thermal */ 928 /* force low perf level for thermal */
923 radeon_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_LOW); 929 radeon_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_LOW);
924 else 930 /* save the user's level */
925 /* otherwise, enable auto */ 931 rdev->pm.dpm.forced_level = level;
926 radeon_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_AUTO); 932 } else {
933 /* otherwise, user selected level */
934 radeon_dpm_force_performance_level(rdev, rdev->pm.dpm.forced_level);
935 }
927 } 936 }
928 937
929done: 938done:
@@ -1179,7 +1188,8 @@ static int radeon_pm_init_dpm(struct radeon_device *rdev)
1179 mutex_lock(&rdev->pm.mutex); 1188 mutex_lock(&rdev->pm.mutex);
1180 radeon_dpm_init(rdev); 1189 radeon_dpm_init(rdev);
1181 rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps = rdev->pm.dpm.boot_ps; 1190 rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps = rdev->pm.dpm.boot_ps;
1182 radeon_dpm_print_power_states(rdev); 1191 if (radeon_dpm == 1)
1192 radeon_dpm_print_power_states(rdev);
1183 radeon_dpm_setup_asic(rdev); 1193 radeon_dpm_setup_asic(rdev);
1184 ret = radeon_dpm_enable(rdev); 1194 ret = radeon_dpm_enable(rdev);
1185 mutex_unlock(&rdev->pm.mutex); 1195 mutex_unlock(&rdev->pm.mutex);
@@ -1241,6 +1251,24 @@ int radeon_pm_init(struct radeon_device *rdev)
1241 case CHIP_RV670: 1251 case CHIP_RV670:
1242 case CHIP_RS780: 1252 case CHIP_RS780:
1243 case CHIP_RS880: 1253 case CHIP_RS880:
1254 case CHIP_CAYMAN:
1255 case CHIP_ARUBA:
1256 case CHIP_BONAIRE:
1257 case CHIP_KABINI:
1258 case CHIP_KAVERI:
1259 case CHIP_HAWAII:
1260 /* DPM requires the RLC, RV770+ dGPU requires SMC */
1261 if (!rdev->rlc_fw)
1262 rdev->pm.pm_method = PM_METHOD_PROFILE;
1263 else if ((rdev->family >= CHIP_RV770) &&
1264 (!(rdev->flags & RADEON_IS_IGP)) &&
1265 (!rdev->smc_fw))
1266 rdev->pm.pm_method = PM_METHOD_PROFILE;
1267 else if (radeon_dpm == 1)
1268 rdev->pm.pm_method = PM_METHOD_DPM;
1269 else
1270 rdev->pm.pm_method = PM_METHOD_PROFILE;
1271 break;
1244 case CHIP_RV770: 1272 case CHIP_RV770:
1245 case CHIP_RV730: 1273 case CHIP_RV730:
1246 case CHIP_RV710: 1274 case CHIP_RV710:
@@ -1256,16 +1284,11 @@ int radeon_pm_init(struct radeon_device *rdev)
1256 case CHIP_BARTS: 1284 case CHIP_BARTS:
1257 case CHIP_TURKS: 1285 case CHIP_TURKS:
1258 case CHIP_CAICOS: 1286 case CHIP_CAICOS:
1259 case CHIP_CAYMAN:
1260 case CHIP_ARUBA:
1261 case CHIP_TAHITI: 1287 case CHIP_TAHITI:
1262 case CHIP_PITCAIRN: 1288 case CHIP_PITCAIRN:
1263 case CHIP_VERDE: 1289 case CHIP_VERDE:
1264 case CHIP_OLAND: 1290 case CHIP_OLAND:
1265 case CHIP_HAINAN: 1291 case CHIP_HAINAN:
1266 case CHIP_BONAIRE:
1267 case CHIP_KABINI:
1268 case CHIP_KAVERI:
1269 /* DPM requires the RLC, RV770+ dGPU requires SMC */ 1292 /* DPM requires the RLC, RV770+ dGPU requires SMC */
1270 if (!rdev->rlc_fw) 1293 if (!rdev->rlc_fw)
1271 rdev->pm.pm_method = PM_METHOD_PROFILE; 1294 rdev->pm.pm_method = PM_METHOD_PROFILE;
@@ -1273,10 +1296,10 @@ int radeon_pm_init(struct radeon_device *rdev)
1273 (!(rdev->flags & RADEON_IS_IGP)) && 1296 (!(rdev->flags & RADEON_IS_IGP)) &&
1274 (!rdev->smc_fw)) 1297 (!rdev->smc_fw))
1275 rdev->pm.pm_method = PM_METHOD_PROFILE; 1298 rdev->pm.pm_method = PM_METHOD_PROFILE;
1276 else if (radeon_dpm == 1) 1299 else if (radeon_dpm == 0)
1277 rdev->pm.pm_method = PM_METHOD_DPM;
1278 else
1279 rdev->pm.pm_method = PM_METHOD_PROFILE; 1300 rdev->pm.pm_method = PM_METHOD_PROFILE;
1301 else
1302 rdev->pm.pm_method = PM_METHOD_DPM;
1280 break; 1303 break;
1281 default: 1304 default:
1282 /* default to profile method */ 1305 /* default to profile method */
@@ -1468,7 +1491,7 @@ static bool radeon_pm_in_vbl(struct radeon_device *rdev)
1468 */ 1491 */
1469 for (crtc = 0; (crtc < rdev->num_crtc) && in_vbl; crtc++) { 1492 for (crtc = 0; (crtc < rdev->num_crtc) && in_vbl; crtc++) {
1470 if (rdev->pm.active_crtcs & (1 << crtc)) { 1493 if (rdev->pm.active_crtcs & (1 << crtc)) {
1471 vbl_status = radeon_get_crtc_scanoutpos(rdev->ddev, crtc, &vpos, &hpos); 1494 vbl_status = radeon_get_crtc_scanoutpos(rdev->ddev, crtc, &vpos, &hpos, NULL, NULL);
1472 if ((vbl_status & DRM_SCANOUTPOS_VALID) && 1495 if ((vbl_status & DRM_SCANOUTPOS_VALID) &&
1473 !(vbl_status & DRM_SCANOUTPOS_INVBL)) 1496 !(vbl_status & DRM_SCANOUTPOS_INVBL))
1474 in_vbl = false; 1497 in_vbl = false;
diff --git a/drivers/gpu/drm/radeon/radeon_trace.h b/drivers/gpu/drm/radeon/radeon_trace.h
index f7e367815964..811bca691b36 100644
--- a/drivers/gpu/drm/radeon/radeon_trace.h
+++ b/drivers/gpu/drm/radeon/radeon_trace.h
@@ -47,6 +47,30 @@ TRACE_EVENT(radeon_cs,
47 __entry->fences) 47 __entry->fences)
48); 48);
49 49
50TRACE_EVENT(radeon_vm_set_page,
51 TP_PROTO(uint64_t pe, uint64_t addr, unsigned count,
52 uint32_t incr, uint32_t flags),
53 TP_ARGS(pe, addr, count, incr, flags),
54 TP_STRUCT__entry(
55 __field(u64, pe)
56 __field(u64, addr)
57 __field(u32, count)
58 __field(u32, incr)
59 __field(u32, flags)
60 ),
61
62 TP_fast_assign(
63 __entry->pe = pe;
64 __entry->addr = addr;
65 __entry->count = count;
66 __entry->incr = incr;
67 __entry->flags = flags;
68 ),
69 TP_printk("pe=%010Lx, addr=%010Lx, incr=%u, flags=%08x, count=%u",
70 __entry->pe, __entry->addr, __entry->incr,
71 __entry->flags, __entry->count)
72);
73
50DECLARE_EVENT_CLASS(radeon_fence_request, 74DECLARE_EVENT_CLASS(radeon_fence_request,
51 75
52 TP_PROTO(struct drm_device *dev, u32 seqno), 76 TP_PROTO(struct drm_device *dev, u32 seqno),
diff --git a/drivers/gpu/drm/radeon/radeon_ucode.h b/drivers/gpu/drm/radeon/radeon_ucode.h
index 33858364fe89..a77cd274dfc3 100644
--- a/drivers/gpu/drm/radeon/radeon_ucode.h
+++ b/drivers/gpu/drm/radeon/radeon_ucode.h
@@ -59,6 +59,7 @@
59#define SI_MC_UCODE_SIZE 7769 59#define SI_MC_UCODE_SIZE 7769
60#define OLAND_MC_UCODE_SIZE 7863 60#define OLAND_MC_UCODE_SIZE 7863
61#define CIK_MC_UCODE_SIZE 7866 61#define CIK_MC_UCODE_SIZE 7866
62#define HAWAII_MC_UCODE_SIZE 7933
62 63
63/* SDMA */ 64/* SDMA */
64#define CIK_SDMA_UCODE_SIZE 1050 65#define CIK_SDMA_UCODE_SIZE 1050
@@ -143,4 +144,7 @@
143#define BONAIRE_SMC_UCODE_START 0x20000 144#define BONAIRE_SMC_UCODE_START 0x20000
144#define BONAIRE_SMC_UCODE_SIZE 0x1FDEC 145#define BONAIRE_SMC_UCODE_SIZE 0x1FDEC
145 146
147#define HAWAII_SMC_UCODE_START 0x20000
148#define HAWAII_SMC_UCODE_SIZE 0x1FDEC
149
146#endif 150#endif
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index 308eff5be1b4..373d088bac66 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -97,6 +97,7 @@ int radeon_uvd_init(struct radeon_device *rdev)
97 case CHIP_BONAIRE: 97 case CHIP_BONAIRE:
98 case CHIP_KABINI: 98 case CHIP_KABINI:
99 case CHIP_KAVERI: 99 case CHIP_KAVERI:
100 case CHIP_HAWAII:
100 fw_name = FIRMWARE_BONAIRE; 101 fw_name = FIRMWARE_BONAIRE;
101 break; 102 break;
102 103
@@ -240,6 +241,8 @@ void radeon_uvd_free_handles(struct radeon_device *rdev, struct drm_file *filp)
240 if (handle != 0 && rdev->uvd.filp[i] == filp) { 241 if (handle != 0 && rdev->uvd.filp[i] == filp) {
241 struct radeon_fence *fence; 242 struct radeon_fence *fence;
242 243
244 radeon_uvd_note_usage(rdev);
245
243 r = radeon_uvd_get_destroy_msg(rdev, 246 r = radeon_uvd_get_destroy_msg(rdev,
244 R600_RING_TYPE_UVD_INDEX, handle, &fence); 247 R600_RING_TYPE_UVD_INDEX, handle, &fence);
245 if (r) { 248 if (r) {
@@ -620,7 +623,7 @@ static int radeon_uvd_send_msg(struct radeon_device *rdev,
620 if (r) 623 if (r)
621 goto err; 624 goto err;
622 625
623 r = radeon_ib_get(rdev, ring, &ib, NULL, 16); 626 r = radeon_ib_get(rdev, ring, &ib, NULL, 64);
624 if (r) 627 if (r)
625 goto err; 628 goto err;
626 629
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 6acba8017b9a..76cc8d3aafec 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -153,6 +153,70 @@ u32 rs600_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
153 return RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING; 153 return RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING;
154} 154}
155 155
156void avivo_program_fmt(struct drm_encoder *encoder)
157{
158 struct drm_device *dev = encoder->dev;
159 struct radeon_device *rdev = dev->dev_private;
160 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
161 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
162 int bpc = 0;
163 u32 tmp = 0;
164 enum radeon_connector_dither dither = RADEON_FMT_DITHER_DISABLE;
165
166 if (connector) {
167 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
168 bpc = radeon_get_monitor_bpc(connector);
169 dither = radeon_connector->dither;
170 }
171
172 /* LVDS FMT is set up by atom */
173 if (radeon_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
174 return;
175
176 if (bpc == 0)
177 return;
178
179 switch (bpc) {
180 case 6:
181 if (dither == RADEON_FMT_DITHER_ENABLE)
182 /* XXX sort out optimal dither settings */
183 tmp |= AVIVO_TMDS_BIT_DEPTH_CONTROL_SPATIAL_DITHER_EN;
184 else
185 tmp |= AVIVO_TMDS_BIT_DEPTH_CONTROL_TRUNCATE_EN;
186 break;
187 case 8:
188 if (dither == RADEON_FMT_DITHER_ENABLE)
189 /* XXX sort out optimal dither settings */
190 tmp |= (AVIVO_TMDS_BIT_DEPTH_CONTROL_SPATIAL_DITHER_EN |
191 AVIVO_TMDS_BIT_DEPTH_CONTROL_SPATIAL_DITHER_DEPTH);
192 else
193 tmp |= (AVIVO_TMDS_BIT_DEPTH_CONTROL_TRUNCATE_EN |
194 AVIVO_TMDS_BIT_DEPTH_CONTROL_TRUNCATE_DEPTH);
195 break;
196 case 10:
197 default:
198 /* not needed */
199 break;
200 }
201
202 switch (radeon_encoder->encoder_id) {
203 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
204 WREG32(AVIVO_TMDSA_BIT_DEPTH_CONTROL, tmp);
205 break;
206 case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
207 WREG32(AVIVO_LVTMA_BIT_DEPTH_CONTROL, tmp);
208 break;
209 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
210 WREG32(AVIVO_DVOA_BIT_DEPTH_CONTROL, tmp);
211 break;
212 case ENCODER_OBJECT_ID_INTERNAL_DDI:
213 WREG32(AVIVO_DDIA_BIT_DEPTH_CONTROL, tmp);
214 break;
215 default:
216 break;
217 }
218}
219
156void rs600_pm_misc(struct radeon_device *rdev) 220void rs600_pm_misc(struct radeon_device *rdev)
157{ 221{
158 int requested_index = rdev->pm.requested_power_state_index; 222 int requested_index = rdev->pm.requested_power_state_index;
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index 1447d794c22a..1c560629575a 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -345,9 +345,11 @@ static void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
345 if (max_bandwidth.full > rdev->pm.sideport_bandwidth.full && 345 if (max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
346 rdev->pm.sideport_bandwidth.full) 346 rdev->pm.sideport_bandwidth.full)
347 max_bandwidth = rdev->pm.sideport_bandwidth; 347 max_bandwidth = rdev->pm.sideport_bandwidth;
348 read_delay_latency.full = dfixed_const(370 * 800 * 1000); 348 read_delay_latency.full = dfixed_const(370 * 800);
349 read_delay_latency.full = dfixed_div(read_delay_latency, 349 a.full = dfixed_const(1000);
350 rdev->pm.igp_sideport_mclk); 350 b.full = dfixed_div(rdev->pm.igp_sideport_mclk, a);
351 read_delay_latency.full = dfixed_div(read_delay_latency, b);
352 read_delay_latency.full = dfixed_mul(read_delay_latency, a);
351 } else { 353 } else {
352 if (max_bandwidth.full > rdev->pm.k8_bandwidth.full && 354 if (max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
353 rdev->pm.k8_bandwidth.full) 355 rdev->pm.k8_bandwidth.full)
@@ -488,14 +490,10 @@ static void rs690_compute_mode_priority(struct radeon_device *rdev,
488 } 490 }
489 if (wm0->priority_mark.full > priority_mark02.full) 491 if (wm0->priority_mark.full > priority_mark02.full)
490 priority_mark02.full = wm0->priority_mark.full; 492 priority_mark02.full = wm0->priority_mark.full;
491 if (dfixed_trunc(priority_mark02) < 0)
492 priority_mark02.full = 0;
493 if (wm0->priority_mark_max.full > priority_mark02.full) 493 if (wm0->priority_mark_max.full > priority_mark02.full)
494 priority_mark02.full = wm0->priority_mark_max.full; 494 priority_mark02.full = wm0->priority_mark_max.full;
495 if (wm1->priority_mark.full > priority_mark12.full) 495 if (wm1->priority_mark.full > priority_mark12.full)
496 priority_mark12.full = wm1->priority_mark.full; 496 priority_mark12.full = wm1->priority_mark.full;
497 if (dfixed_trunc(priority_mark12) < 0)
498 priority_mark12.full = 0;
499 if (wm1->priority_mark_max.full > priority_mark12.full) 497 if (wm1->priority_mark_max.full > priority_mark12.full)
500 priority_mark12.full = wm1->priority_mark_max.full; 498 priority_mark12.full = wm1->priority_mark_max.full;
501 *d1mode_priority_a_cnt = dfixed_trunc(priority_mark02); 499 *d1mode_priority_a_cnt = dfixed_trunc(priority_mark02);
@@ -526,8 +524,6 @@ static void rs690_compute_mode_priority(struct radeon_device *rdev,
526 } 524 }
527 if (wm0->priority_mark.full > priority_mark02.full) 525 if (wm0->priority_mark.full > priority_mark02.full)
528 priority_mark02.full = wm0->priority_mark.full; 526 priority_mark02.full = wm0->priority_mark.full;
529 if (dfixed_trunc(priority_mark02) < 0)
530 priority_mark02.full = 0;
531 if (wm0->priority_mark_max.full > priority_mark02.full) 527 if (wm0->priority_mark_max.full > priority_mark02.full)
532 priority_mark02.full = wm0->priority_mark_max.full; 528 priority_mark02.full = wm0->priority_mark_max.full;
533 *d1mode_priority_a_cnt = dfixed_trunc(priority_mark02); 529 *d1mode_priority_a_cnt = dfixed_trunc(priority_mark02);
@@ -555,8 +551,6 @@ static void rs690_compute_mode_priority(struct radeon_device *rdev,
555 } 551 }
556 if (wm1->priority_mark.full > priority_mark12.full) 552 if (wm1->priority_mark.full > priority_mark12.full)
557 priority_mark12.full = wm1->priority_mark.full; 553 priority_mark12.full = wm1->priority_mark.full;
558 if (dfixed_trunc(priority_mark12) < 0)
559 priority_mark12.full = 0;
560 if (wm1->priority_mark_max.full > priority_mark12.full) 554 if (wm1->priority_mark_max.full > priority_mark12.full)
561 priority_mark12.full = wm1->priority_mark_max.full; 555 priority_mark12.full = wm1->priority_mark_max.full;
562 *d2mode_priority_a_cnt = dfixed_trunc(priority_mark12); 556 *d2mode_priority_a_cnt = dfixed_trunc(priority_mark12);
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index 873eb4b193b4..5d1c316115ef 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -1155,14 +1155,10 @@ static void rv515_compute_mode_priority(struct radeon_device *rdev,
1155 } 1155 }
1156 if (wm0->priority_mark.full > priority_mark02.full) 1156 if (wm0->priority_mark.full > priority_mark02.full)
1157 priority_mark02.full = wm0->priority_mark.full; 1157 priority_mark02.full = wm0->priority_mark.full;
1158 if (dfixed_trunc(priority_mark02) < 0)
1159 priority_mark02.full = 0;
1160 if (wm0->priority_mark_max.full > priority_mark02.full) 1158 if (wm0->priority_mark_max.full > priority_mark02.full)
1161 priority_mark02.full = wm0->priority_mark_max.full; 1159 priority_mark02.full = wm0->priority_mark_max.full;
1162 if (wm1->priority_mark.full > priority_mark12.full) 1160 if (wm1->priority_mark.full > priority_mark12.full)
1163 priority_mark12.full = wm1->priority_mark.full; 1161 priority_mark12.full = wm1->priority_mark.full;
1164 if (dfixed_trunc(priority_mark12) < 0)
1165 priority_mark12.full = 0;
1166 if (wm1->priority_mark_max.full > priority_mark12.full) 1162 if (wm1->priority_mark_max.full > priority_mark12.full)
1167 priority_mark12.full = wm1->priority_mark_max.full; 1163 priority_mark12.full = wm1->priority_mark_max.full;
1168 *d1mode_priority_a_cnt = dfixed_trunc(priority_mark02); 1164 *d1mode_priority_a_cnt = dfixed_trunc(priority_mark02);
@@ -1193,8 +1189,6 @@ static void rv515_compute_mode_priority(struct radeon_device *rdev,
1193 } 1189 }
1194 if (wm0->priority_mark.full > priority_mark02.full) 1190 if (wm0->priority_mark.full > priority_mark02.full)
1195 priority_mark02.full = wm0->priority_mark.full; 1191 priority_mark02.full = wm0->priority_mark.full;
1196 if (dfixed_trunc(priority_mark02) < 0)
1197 priority_mark02.full = 0;
1198 if (wm0->priority_mark_max.full > priority_mark02.full) 1192 if (wm0->priority_mark_max.full > priority_mark02.full)
1199 priority_mark02.full = wm0->priority_mark_max.full; 1193 priority_mark02.full = wm0->priority_mark_max.full;
1200 *d1mode_priority_a_cnt = dfixed_trunc(priority_mark02); 1194 *d1mode_priority_a_cnt = dfixed_trunc(priority_mark02);
@@ -1222,8 +1216,6 @@ static void rv515_compute_mode_priority(struct radeon_device *rdev,
1222 } 1216 }
1223 if (wm1->priority_mark.full > priority_mark12.full) 1217 if (wm1->priority_mark.full > priority_mark12.full)
1224 priority_mark12.full = wm1->priority_mark.full; 1218 priority_mark12.full = wm1->priority_mark.full;
1225 if (dfixed_trunc(priority_mark12) < 0)
1226 priority_mark12.full = 0;
1227 if (wm1->priority_mark_max.full > priority_mark12.full) 1219 if (wm1->priority_mark_max.full > priority_mark12.full)
1228 priority_mark12.full = wm1->priority_mark_max.full; 1220 priority_mark12.full = wm1->priority_mark_max.full;
1229 *d2mode_priority_a_cnt = dfixed_trunc(priority_mark12); 1221 *d2mode_priority_a_cnt = dfixed_trunc(priority_mark12);
diff --git a/drivers/gpu/drm/radeon/rv6xx_dpm.c b/drivers/gpu/drm/radeon/rv6xx_dpm.c
index 5811d277a36a..26633a025252 100644
--- a/drivers/gpu/drm/radeon/rv6xx_dpm.c
+++ b/drivers/gpu/drm/radeon/rv6xx_dpm.c
@@ -407,9 +407,9 @@ static void rv6xx_enable_engine_feedback_and_reference_sync(struct radeon_device
407 WREG32_P(SPLL_CNTL_MODE, SPLL_DIV_SYNC, ~SPLL_DIV_SYNC); 407 WREG32_P(SPLL_CNTL_MODE, SPLL_DIV_SYNC, ~SPLL_DIV_SYNC);
408} 408}
409 409
410static u64 rv6xx_clocks_per_unit(u32 unit) 410static u32 rv6xx_clocks_per_unit(u32 unit)
411{ 411{
412 u64 tmp = 1 << (2 * unit); 412 u32 tmp = 1 << (2 * unit);
413 413
414 return tmp; 414 return tmp;
415} 415}
@@ -417,7 +417,7 @@ static u64 rv6xx_clocks_per_unit(u32 unit)
417static u32 rv6xx_scale_count_given_unit(struct radeon_device *rdev, 417static u32 rv6xx_scale_count_given_unit(struct radeon_device *rdev,
418 u32 unscaled_count, u32 unit) 418 u32 unscaled_count, u32 unit)
419{ 419{
420 u32 count_per_unit = (u32)rv6xx_clocks_per_unit(unit); 420 u32 count_per_unit = rv6xx_clocks_per_unit(unit);
421 421
422 return (unscaled_count + count_per_unit - 1) / count_per_unit; 422 return (unscaled_count + count_per_unit - 1) / count_per_unit;
423} 423}
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index d96f7cbca0a1..6a64ccaa0695 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -78,11 +78,6 @@ extern void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_
78extern u32 evergreen_get_number_of_dram_channels(struct radeon_device *rdev); 78extern u32 evergreen_get_number_of_dram_channels(struct radeon_device *rdev);
79extern void evergreen_print_gpu_status_regs(struct radeon_device *rdev); 79extern void evergreen_print_gpu_status_regs(struct radeon_device *rdev);
80extern bool evergreen_is_display_hung(struct radeon_device *rdev); 80extern bool evergreen_is_display_hung(struct radeon_device *rdev);
81extern void si_dma_vm_set_page(struct radeon_device *rdev,
82 struct radeon_ib *ib,
83 uint64_t pe,
84 uint64_t addr, unsigned count,
85 uint32_t incr, uint32_t flags);
86static void si_enable_gui_idle_interrupt(struct radeon_device *rdev, 81static void si_enable_gui_idle_interrupt(struct radeon_device *rdev,
87 bool enable); 82 bool enable);
88static void si_fini_pg(struct radeon_device *rdev); 83static void si_fini_pg(struct radeon_device *rdev);
@@ -4673,61 +4668,6 @@ static void si_vm_decode_fault(struct radeon_device *rdev,
4673 block, mc_id); 4668 block, mc_id);
4674} 4669}
4675 4670
4676/**
4677 * si_vm_set_page - update the page tables using the CP
4678 *
4679 * @rdev: radeon_device pointer
4680 * @ib: indirect buffer to fill with commands
4681 * @pe: addr of the page entry
4682 * @addr: dst addr to write into pe
4683 * @count: number of page entries to update
4684 * @incr: increase next addr by incr bytes
4685 * @flags: access flags
4686 *
4687 * Update the page tables using the CP (SI).
4688 */
4689void si_vm_set_page(struct radeon_device *rdev,
4690 struct radeon_ib *ib,
4691 uint64_t pe,
4692 uint64_t addr, unsigned count,
4693 uint32_t incr, uint32_t flags)
4694{
4695 uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
4696 uint64_t value;
4697 unsigned ndw;
4698
4699 if (rdev->asic->vm.pt_ring_index == RADEON_RING_TYPE_GFX_INDEX) {
4700 while (count) {
4701 ndw = 2 + count * 2;
4702 if (ndw > 0x3FFE)
4703 ndw = 0x3FFE;
4704
4705 ib->ptr[ib->length_dw++] = PACKET3(PACKET3_WRITE_DATA, ndw);
4706 ib->ptr[ib->length_dw++] = (WRITE_DATA_ENGINE_SEL(0) |
4707 WRITE_DATA_DST_SEL(1));
4708 ib->ptr[ib->length_dw++] = pe;
4709 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
4710 for (; ndw > 2; ndw -= 2, --count, pe += 8) {
4711 if (flags & RADEON_VM_PAGE_SYSTEM) {
4712 value = radeon_vm_map_gart(rdev, addr);
4713 value &= 0xFFFFFFFFFFFFF000ULL;
4714 } else if (flags & RADEON_VM_PAGE_VALID) {
4715 value = addr;
4716 } else {
4717 value = 0;
4718 }
4719 addr += incr;
4720 value |= r600_flags;
4721 ib->ptr[ib->length_dw++] = value;
4722 ib->ptr[ib->length_dw++] = upper_32_bits(value);
4723 }
4724 }
4725 } else {
4726 /* DMA */
4727 si_dma_vm_set_page(rdev, ib, pe, addr, count, incr, flags);
4728 }
4729}
4730
4731void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm) 4671void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
4732{ 4672{
4733 struct radeon_ring *ring = &rdev->ring[ridx]; 4673 struct radeon_ring *ring = &rdev->ring[ridx];
@@ -5372,52 +5312,53 @@ void si_get_csb_buffer(struct radeon_device *rdev, volatile u32 *buffer)
5372 if (buffer == NULL) 5312 if (buffer == NULL)
5373 return; 5313 return;
5374 5314
5375 buffer[count++] = PACKET3(PACKET3_PREAMBLE_CNTL, 0); 5315 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
5376 buffer[count++] = PACKET3_PREAMBLE_BEGIN_CLEAR_STATE; 5316 buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
5377 5317
5378 buffer[count++] = PACKET3(PACKET3_CONTEXT_CONTROL, 1); 5318 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
5379 buffer[count++] = 0x80000000; 5319 buffer[count++] = cpu_to_le32(0x80000000);
5380 buffer[count++] = 0x80000000; 5320 buffer[count++] = cpu_to_le32(0x80000000);
5381 5321
5382 for (sect = rdev->rlc.cs_data; sect->section != NULL; ++sect) { 5322 for (sect = rdev->rlc.cs_data; sect->section != NULL; ++sect) {
5383 for (ext = sect->section; ext->extent != NULL; ++ext) { 5323 for (ext = sect->section; ext->extent != NULL; ++ext) {
5384 if (sect->id == SECT_CONTEXT) { 5324 if (sect->id == SECT_CONTEXT) {
5385 buffer[count++] = PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count); 5325 buffer[count++] =
5386 buffer[count++] = ext->reg_index - 0xa000; 5326 cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
5327 buffer[count++] = cpu_to_le32(ext->reg_index - 0xa000);
5387 for (i = 0; i < ext->reg_count; i++) 5328 for (i = 0; i < ext->reg_count; i++)
5388 buffer[count++] = ext->extent[i]; 5329 buffer[count++] = cpu_to_le32(ext->extent[i]);
5389 } else { 5330 } else {
5390 return; 5331 return;
5391 } 5332 }
5392 } 5333 }
5393 } 5334 }
5394 5335
5395 buffer[count++] = PACKET3(PACKET3_SET_CONTEXT_REG, 1); 5336 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 1));
5396 buffer[count++] = PA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START; 5337 buffer[count++] = cpu_to_le32(PA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START);
5397 switch (rdev->family) { 5338 switch (rdev->family) {
5398 case CHIP_TAHITI: 5339 case CHIP_TAHITI:
5399 case CHIP_PITCAIRN: 5340 case CHIP_PITCAIRN:
5400 buffer[count++] = 0x2a00126a; 5341 buffer[count++] = cpu_to_le32(0x2a00126a);
5401 break; 5342 break;
5402 case CHIP_VERDE: 5343 case CHIP_VERDE:
5403 buffer[count++] = 0x0000124a; 5344 buffer[count++] = cpu_to_le32(0x0000124a);
5404 break; 5345 break;
5405 case CHIP_OLAND: 5346 case CHIP_OLAND:
5406 buffer[count++] = 0x00000082; 5347 buffer[count++] = cpu_to_le32(0x00000082);
5407 break; 5348 break;
5408 case CHIP_HAINAN: 5349 case CHIP_HAINAN:
5409 buffer[count++] = 0x00000000; 5350 buffer[count++] = cpu_to_le32(0x00000000);
5410 break; 5351 break;
5411 default: 5352 default:
5412 buffer[count++] = 0x00000000; 5353 buffer[count++] = cpu_to_le32(0x00000000);
5413 break; 5354 break;
5414 } 5355 }
5415 5356
5416 buffer[count++] = PACKET3(PACKET3_PREAMBLE_CNTL, 0); 5357 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
5417 buffer[count++] = PACKET3_PREAMBLE_END_CLEAR_STATE; 5358 buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
5418 5359
5419 buffer[count++] = PACKET3(PACKET3_CLEAR_STATE, 0); 5360 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
5420 buffer[count++] = 0; 5361 buffer[count++] = cpu_to_le32(0);
5421} 5362}
5422 5363
5423static void si_init_pg(struct radeon_device *rdev) 5364static void si_init_pg(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/si_dma.c b/drivers/gpu/drm/radeon/si_dma.c
index 49909d23dfce..8e8f46133532 100644
--- a/drivers/gpu/drm/radeon/si_dma.c
+++ b/drivers/gpu/drm/radeon/si_dma.c
@@ -24,6 +24,7 @@
24#include <drm/drmP.h> 24#include <drm/drmP.h>
25#include "radeon.h" 25#include "radeon.h"
26#include "radeon_asic.h" 26#include "radeon_asic.h"
27#include "radeon_trace.h"
27#include "sid.h" 28#include "sid.h"
28 29
29u32 si_gpu_check_soft_reset(struct radeon_device *rdev); 30u32 si_gpu_check_soft_reset(struct radeon_device *rdev);
@@ -75,11 +76,12 @@ void si_dma_vm_set_page(struct radeon_device *rdev,
75 uint64_t addr, unsigned count, 76 uint64_t addr, unsigned count,
76 uint32_t incr, uint32_t flags) 77 uint32_t incr, uint32_t flags)
77{ 78{
78 uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
79 uint64_t value; 79 uint64_t value;
80 unsigned ndw; 80 unsigned ndw;
81 81
82 if (flags & RADEON_VM_PAGE_SYSTEM) { 82 trace_radeon_vm_set_page(pe, addr, count, incr, flags);
83
84 if (flags & R600_PTE_SYSTEM) {
83 while (count) { 85 while (count) {
84 ndw = count * 2; 86 ndw = count * 2;
85 if (ndw > 0xFFFFE) 87 if (ndw > 0xFFFFE)
@@ -90,16 +92,10 @@ void si_dma_vm_set_page(struct radeon_device *rdev,
90 ib->ptr[ib->length_dw++] = pe; 92 ib->ptr[ib->length_dw++] = pe;
91 ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff; 93 ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
92 for (; ndw > 0; ndw -= 2, --count, pe += 8) { 94 for (; ndw > 0; ndw -= 2, --count, pe += 8) {
93 if (flags & RADEON_VM_PAGE_SYSTEM) { 95 value = radeon_vm_map_gart(rdev, addr);
94 value = radeon_vm_map_gart(rdev, addr); 96 value &= 0xFFFFFFFFFFFFF000ULL;
95 value &= 0xFFFFFFFFFFFFF000ULL;
96 } else if (flags & RADEON_VM_PAGE_VALID) {
97 value = addr;
98 } else {
99 value = 0;
100 }
101 addr += incr; 97 addr += incr;
102 value |= r600_flags; 98 value |= flags;
103 ib->ptr[ib->length_dw++] = value; 99 ib->ptr[ib->length_dw++] = value;
104 ib->ptr[ib->length_dw++] = upper_32_bits(value); 100 ib->ptr[ib->length_dw++] = upper_32_bits(value);
105 } 101 }
@@ -110,7 +106,7 @@ void si_dma_vm_set_page(struct radeon_device *rdev,
110 if (ndw > 0xFFFFE) 106 if (ndw > 0xFFFFE)
111 ndw = 0xFFFFE; 107 ndw = 0xFFFFE;
112 108
113 if (flags & RADEON_VM_PAGE_VALID) 109 if (flags & R600_PTE_VALID)
114 value = addr; 110 value = addr;
115 else 111 else
116 value = 0; 112 value = 0;
@@ -118,7 +114,7 @@ void si_dma_vm_set_page(struct radeon_device *rdev,
118 ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw); 114 ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
119 ib->ptr[ib->length_dw++] = pe; /* dst addr */ 115 ib->ptr[ib->length_dw++] = pe; /* dst addr */
120 ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff; 116 ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
121 ib->ptr[ib->length_dw++] = r600_flags; /* mask */ 117 ib->ptr[ib->length_dw++] = flags; /* mask */
122 ib->ptr[ib->length_dw++] = 0; 118 ib->ptr[ib->length_dw++] = 0;
123 ib->ptr[ib->length_dw++] = value; /* value */ 119 ib->ptr[ib->length_dw++] = value; /* value */
124 ib->ptr[ib->length_dw++] = upper_32_bits(value); 120 ib->ptr[ib->length_dw++] = upper_32_bits(value);
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index 2332aa1bf93c..0b00c790fb77 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -3589,7 +3589,12 @@ static void si_program_display_gap(struct radeon_device *rdev)
3589 WREG32(DCCG_DISP_SLOW_SELECT_REG, tmp); 3589 WREG32(DCCG_DISP_SLOW_SELECT_REG, tmp);
3590 } 3590 }
3591 3591
3592 si_notify_smc_display_change(rdev, rdev->pm.dpm.new_active_crtc_count > 0); 3592 /* Setting this to false forces the performance state to low if the crtcs are disabled.
3593 * This can be a problem on PowerXpress systems or if you want to use the card
3594 * for offscreen rendering or compute if there are no crtcs enabled. Set it to
3595 * true for now so that performance scales even if the displays are off.
3596 */
3597 si_notify_smc_display_change(rdev, true /*rdev->pm.dpm.new_active_crtc_count > 0*/);
3593} 3598}
3594 3599
3595static void si_enable_spread_spectrum(struct radeon_device *rdev, bool enable) 3600static void si_enable_spread_spectrum(struct radeon_device *rdev, bool enable)
@@ -4553,7 +4558,7 @@ static int si_init_smc_table(struct radeon_device *rdev)
4553 table->systemFlags |= PPSMC_SYSTEMFLAG_GDDR5; 4558 table->systemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
4554 4559
4555 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REVERT_GPIO5_POLARITY) 4560 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REVERT_GPIO5_POLARITY)
4556 table->systemFlags |= PPSMC_EXTRAFLAGS_AC2DC_GPIO5_POLARITY_HIGH; 4561 table->extraFlags |= PPSMC_EXTRAFLAGS_AC2DC_GPIO5_POLARITY_HIGH;
4557 4562
4558 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_VRHOT_GPIO_CONFIGURABLE) { 4563 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_VRHOT_GPIO_CONFIGURABLE) {
4559 table->systemFlags |= PPSMC_SYSTEMFLAG_REGULATOR_HOT_PROG_GPIO; 4564 table->systemFlags |= PPSMC_SYSTEMFLAG_REGULATOR_HOT_PROG_GPIO;
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
index 7e2e0ea66a00..b322acc48097 100644
--- a/drivers/gpu/drm/radeon/sid.h
+++ b/drivers/gpu/drm/radeon/sid.h
@@ -478,7 +478,7 @@
478#define STATE3_MASK (0x1f << 15) 478#define STATE3_MASK (0x1f << 15)
479#define STATE3_SHIFT 15 479#define STATE3_SHIFT 15
480 480
481#define MC_SEQ_TRAIN_WAKEUP_CNTL 0x2808 481#define MC_SEQ_TRAIN_WAKEUP_CNTL 0x28e8
482#define TRAIN_DONE_D0 (1 << 30) 482#define TRAIN_DONE_D0 (1 << 30)
483#define TRAIN_DONE_D1 (1 << 31) 483#define TRAIN_DONE_D1 (1 << 31)
484 484
@@ -683,6 +683,51 @@
683 * bit5 = 176.4 kHz 683 * bit5 = 176.4 kHz
684 * bit6 = 192 kHz 684 * bit6 = 192 kHz
685 */ 685 */
686
687#define AZ_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC 0x37
688# define VIDEO_LIPSYNC(x) (((x) & 0xff) << 0)
689# define AUDIO_LIPSYNC(x) (((x) & 0xff) << 8)
690/* VIDEO_LIPSYNC, AUDIO_LIPSYNC
691 * 0 = invalid
692 * x = legal delay value
693 * 255 = sync not supported
694 */
695#define AZ_F0_CODEC_PIN_CONTROL_RESPONSE_HBR 0x38
696# define HBR_CAPABLE (1 << 0) /* enabled by default */
697
698#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO0 0x3a
699# define MANUFACTURER_ID(x) (((x) & 0xffff) << 0)
700# define PRODUCT_ID(x) (((x) & 0xffff) << 16)
701#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO1 0x3b
702# define SINK_DESCRIPTION_LEN(x) (((x) & 0xff) << 0)
703#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO2 0x3c
704# define PORT_ID0(x) (((x) & 0xffffffff) << 0)
705#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO3 0x3d
706# define PORT_ID1(x) (((x) & 0xffffffff) << 0)
707#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO4 0x3e
708# define DESCRIPTION0(x) (((x) & 0xff) << 0)
709# define DESCRIPTION1(x) (((x) & 0xff) << 8)
710# define DESCRIPTION2(x) (((x) & 0xff) << 16)
711# define DESCRIPTION3(x) (((x) & 0xff) << 24)
712#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO5 0x3f
713# define DESCRIPTION4(x) (((x) & 0xff) << 0)
714# define DESCRIPTION5(x) (((x) & 0xff) << 8)
715# define DESCRIPTION6(x) (((x) & 0xff) << 16)
716# define DESCRIPTION7(x) (((x) & 0xff) << 24)
717#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO6 0x40
718# define DESCRIPTION8(x) (((x) & 0xff) << 0)
719# define DESCRIPTION9(x) (((x) & 0xff) << 8)
720# define DESCRIPTION10(x) (((x) & 0xff) << 16)
721# define DESCRIPTION11(x) (((x) & 0xff) << 24)
722#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO7 0x41
723# define DESCRIPTION12(x) (((x) & 0xff) << 0)
724# define DESCRIPTION13(x) (((x) & 0xff) << 8)
725# define DESCRIPTION14(x) (((x) & 0xff) << 16)
726# define DESCRIPTION15(x) (((x) & 0xff) << 24)
727#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO8 0x42
728# define DESCRIPTION16(x) (((x) & 0xff) << 0)
729# define DESCRIPTION17(x) (((x) & 0xff) << 8)
730
686#define AZ_F0_CODEC_PIN_CONTROL_HOTPLUG_CONTROL 0x54 731#define AZ_F0_CODEC_PIN_CONTROL_HOTPLUG_CONTROL 0x54
687# define AUDIO_ENABLED (1 << 31) 732# define AUDIO_ENABLED (1 << 31)
688 733
diff --git a/drivers/gpu/drm/rcar-du/Kconfig b/drivers/gpu/drm/rcar-du/Kconfig
index c590cd9dca0b..d8e835ac2c5e 100644
--- a/drivers/gpu/drm/rcar-du/Kconfig
+++ b/drivers/gpu/drm/rcar-du/Kconfig
@@ -4,6 +4,7 @@ config DRM_RCAR_DU
4 select DRM_KMS_HELPER 4 select DRM_KMS_HELPER
5 select DRM_KMS_CMA_HELPER 5 select DRM_KMS_CMA_HELPER
6 select DRM_GEM_CMA_HELPER 6 select DRM_GEM_CMA_HELPER
7 select DRM_KMS_FB_HELPER
7 help 8 help
8 Choose this option if you have an R-Car chipset. 9 Choose this option if you have an R-Car chipset.
9 If M is selected the module will be called rcar-du-drm. 10 If M is selected the module will be called rcar-du-drm.
diff --git a/drivers/gpu/drm/shmobile/Kconfig b/drivers/gpu/drm/shmobile/Kconfig
index ca498d151a76..2ee44ca9d67f 100644
--- a/drivers/gpu/drm/shmobile/Kconfig
+++ b/drivers/gpu/drm/shmobile/Kconfig
@@ -1,7 +1,9 @@
1config DRM_SHMOBILE 1config DRM_SHMOBILE
2 tristate "DRM Support for SH Mobile" 2 tristate "DRM Support for SH Mobile"
3 depends on DRM && (ARM || SUPERH) 3 depends on DRM && (ARM || SUPERH)
4 select BACKLIGHT_CLASS_DEVICE
4 select DRM_KMS_HELPER 5 select DRM_KMS_HELPER
6 select DRM_KMS_FB_HELPER
5 select DRM_KMS_CMA_HELPER 7 select DRM_KMS_CMA_HELPER
6 select DRM_GEM_CMA_HELPER 8 select DRM_GEM_CMA_HELPER
7 help 9 help
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
index 54bad98e9477..562f9a401cf6 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
@@ -40,7 +40,7 @@
40static void shmob_drm_clk_on(struct shmob_drm_device *sdev) 40static void shmob_drm_clk_on(struct shmob_drm_device *sdev)
41{ 41{
42 if (sdev->clock) 42 if (sdev->clock)
43 clk_enable(sdev->clock); 43 clk_prepare_enable(sdev->clock);
44#if 0 44#if 0
45 if (sdev->meram_dev && sdev->meram_dev->pdev) 45 if (sdev->meram_dev && sdev->meram_dev->pdev)
46 pm_runtime_get_sync(&sdev->meram_dev->pdev->dev); 46 pm_runtime_get_sync(&sdev->meram_dev->pdev->dev);
@@ -54,7 +54,7 @@ static void shmob_drm_clk_off(struct shmob_drm_device *sdev)
54 pm_runtime_put_sync(&sdev->meram_dev->pdev->dev); 54 pm_runtime_put_sync(&sdev->meram_dev->pdev->dev);
55#endif 55#endif
56 if (sdev->clock) 56 if (sdev->clock)
57 clk_disable(sdev->clock); 57 clk_disable_unprepare(sdev->clock);
58} 58}
59 59
60/* ----------------------------------------------------------------------------- 60/* -----------------------------------------------------------------------------
diff --git a/drivers/gpu/host1x/drm/Kconfig b/drivers/gpu/drm/tegra/Kconfig
index 69853a4de40a..8961ba6a34b8 100644
--- a/drivers/gpu/host1x/drm/Kconfig
+++ b/drivers/gpu/drm/tegra/Kconfig
@@ -1,7 +1,10 @@
1config DRM_TEGRA 1config DRM_TEGRA
2 bool "NVIDIA Tegra DRM" 2 bool "NVIDIA Tegra DRM"
3 depends on ARCH_TEGRA || ARCH_MULTIPLATFORM
3 depends on DRM 4 depends on DRM
5 select TEGRA_HOST1X
4 select DRM_KMS_HELPER 6 select DRM_KMS_HELPER
7 select DRM_KMS_FB_HELPER
5 select FB_SYS_FILLRECT 8 select FB_SYS_FILLRECT
6 select FB_SYS_COPYAREA 9 select FB_SYS_COPYAREA
7 select FB_SYS_IMAGEBLIT 10 select FB_SYS_IMAGEBLIT
@@ -13,6 +16,11 @@ config DRM_TEGRA
13 16
14if DRM_TEGRA 17if DRM_TEGRA
15 18
19config DRM_TEGRA_DEBUG
20 bool "NVIDIA Tegra DRM debug support"
21 help
22 Say yes here to enable debugging support.
23
16config DRM_TEGRA_STAGING 24config DRM_TEGRA_STAGING
17 bool "Enable HOST1X interface" 25 bool "Enable HOST1X interface"
18 depends on STAGING 26 depends on STAGING
@@ -21,9 +29,4 @@ config DRM_TEGRA_STAGING
21 29
22 If unsure, choose N. 30 If unsure, choose N.
23 31
24config DRM_TEGRA_DEBUG
25 bool "NVIDIA Tegra DRM debug support"
26 help
27 Say yes here to enable debugging support.
28
29endif 32endif
diff --git a/drivers/gpu/drm/tegra/Makefile b/drivers/gpu/drm/tegra/Makefile
new file mode 100644
index 000000000000..edc76abd58bb
--- /dev/null
+++ b/drivers/gpu/drm/tegra/Makefile
@@ -0,0 +1,15 @@
1ccflags-$(CONFIG_DRM_TEGRA_DEBUG) += -DDEBUG
2
3tegra-drm-y := \
4 bus.o \
5 drm.o \
6 gem.o \
7 fb.o \
8 dc.o \
9 output.o \
10 rgb.o \
11 hdmi.o \
12 gr2d.o \
13 gr3d.o
14
15obj-$(CONFIG_DRM_TEGRA) += tegra-drm.o
diff --git a/drivers/gpu/drm/tegra/bus.c b/drivers/gpu/drm/tegra/bus.c
new file mode 100644
index 000000000000..565f8f7b9a47
--- /dev/null
+++ b/drivers/gpu/drm/tegra/bus.c
@@ -0,0 +1,76 @@
1/*
2 * Copyright (C) 2013 NVIDIA Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#include "drm.h"
10
11static int drm_host1x_set_busid(struct drm_device *dev,
12 struct drm_master *master)
13{
14 const char *device = dev_name(dev->dev);
15 const char *driver = dev->driver->name;
16 const char *bus = dev->dev->bus->name;
17 int length;
18
19 master->unique_len = strlen(bus) + 1 + strlen(device);
20 master->unique_size = master->unique_len;
21
22 master->unique = kmalloc(master->unique_len + 1, GFP_KERNEL);
23 if (!master->unique)
24 return -ENOMEM;
25
26 snprintf(master->unique, master->unique_len + 1, "%s:%s", bus, device);
27
28 length = strlen(driver) + 1 + master->unique_len;
29
30 dev->devname = kmalloc(length + 1, GFP_KERNEL);
31 if (!dev->devname)
32 return -ENOMEM;
33
34 snprintf(dev->devname, length + 1, "%s@%s", driver, master->unique);
35
36 return 0;
37}
38
39static struct drm_bus drm_host1x_bus = {
40 .bus_type = DRIVER_BUS_HOST1X,
41 .set_busid = drm_host1x_set_busid,
42};
43
44int drm_host1x_init(struct drm_driver *driver, struct host1x_device *device)
45{
46 struct drm_device *drm;
47 int ret;
48
49 INIT_LIST_HEAD(&driver->device_list);
50 driver->bus = &drm_host1x_bus;
51
52 drm = drm_dev_alloc(driver, &device->dev);
53 if (!drm)
54 return -ENOMEM;
55
56 ret = drm_dev_register(drm, 0);
57 if (ret)
58 goto err_free;
59
60 DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n", driver->name,
61 driver->major, driver->minor, driver->patchlevel,
62 driver->date, drm->primary->index);
63
64 return 0;
65
66err_free:
67 drm_dev_free(drm);
68 return ret;
69}
70
71void drm_host1x_exit(struct drm_driver *driver, struct host1x_device *device)
72{
73 struct tegra_drm *tegra = dev_get_drvdata(&device->dev);
74
75 drm_put_dev(tegra->drm);
76}
diff --git a/drivers/gpu/host1x/drm/dc.c b/drivers/gpu/drm/tegra/dc.c
index b1a05ad901c3..ae1cb31ead7e 100644
--- a/drivers/gpu/host1x/drm/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -8,13 +8,9 @@
8 */ 8 */
9 9
10#include <linux/clk.h> 10#include <linux/clk.h>
11#include <linux/debugfs.h>
12#include <linux/module.h>
13#include <linux/of.h>
14#include <linux/platform_device.h>
15#include <linux/clk/tegra.h> 11#include <linux/clk/tegra.h>
12#include <linux/debugfs.h>
16 13
17#include "host1x_client.h"
18#include "dc.h" 14#include "dc.h"
19#include "drm.h" 15#include "drm.h"
20#include "gem.h" 16#include "gem.h"
@@ -51,6 +47,8 @@ static int tegra_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
51 window.dst.h = crtc_h; 47 window.dst.h = crtc_h;
52 window.format = tegra_dc_format(fb->pixel_format); 48 window.format = tegra_dc_format(fb->pixel_format);
53 window.bits_per_pixel = fb->bits_per_pixel; 49 window.bits_per_pixel = fb->bits_per_pixel;
50 window.bottom_up = tegra_fb_is_bottom_up(fb);
51 window.tiled = tegra_fb_is_tiled(fb);
54 52
55 for (i = 0; i < drm_format_num_planes(fb->pixel_format); i++) { 53 for (i = 0; i < drm_format_num_planes(fb->pixel_format); i++) {
56 struct tegra_bo *bo = tegra_fb_get_plane(fb, i); 54 struct tegra_bo *bo = tegra_fb_get_plane(fb, i);
@@ -97,8 +95,11 @@ static int tegra_plane_disable(struct drm_plane *plane)
97 95
98static void tegra_plane_destroy(struct drm_plane *plane) 96static void tegra_plane_destroy(struct drm_plane *plane)
99{ 97{
98 struct tegra_plane *p = to_tegra_plane(plane);
99
100 tegra_plane_disable(plane); 100 tegra_plane_disable(plane);
101 drm_plane_cleanup(plane); 101 drm_plane_cleanup(plane);
102 kfree(p);
102} 103}
103 104
104static const struct drm_plane_funcs tegra_plane_funcs = { 105static const struct drm_plane_funcs tegra_plane_funcs = {
@@ -124,7 +125,7 @@ static int tegra_dc_add_planes(struct drm_device *drm, struct tegra_dc *dc)
124 for (i = 0; i < 2; i++) { 125 for (i = 0; i < 2; i++) {
125 struct tegra_plane *plane; 126 struct tegra_plane *plane;
126 127
127 plane = devm_kzalloc(drm->dev, sizeof(*plane), GFP_KERNEL); 128 plane = kzalloc(sizeof(*plane), GFP_KERNEL);
128 if (!plane) 129 if (!plane)
129 return -ENOMEM; 130 return -ENOMEM;
130 131
@@ -133,8 +134,10 @@ static int tegra_dc_add_planes(struct drm_device *drm, struct tegra_dc *dc)
133 err = drm_plane_init(drm, &plane->base, 1 << dc->pipe, 134 err = drm_plane_init(drm, &plane->base, 1 << dc->pipe,
134 &tegra_plane_funcs, plane_formats, 135 &tegra_plane_funcs, plane_formats,
135 ARRAY_SIZE(plane_formats), false); 136 ARRAY_SIZE(plane_formats), false);
136 if (err < 0) 137 if (err < 0) {
138 kfree(plane);
137 return err; 139 return err;
140 }
138 } 141 }
139 142
140 return 0; 143 return 0;
@@ -145,6 +148,7 @@ static int tegra_dc_set_base(struct tegra_dc *dc, int x, int y,
145{ 148{
146 unsigned int format = tegra_dc_format(fb->pixel_format); 149 unsigned int format = tegra_dc_format(fb->pixel_format);
147 struct tegra_bo *bo = tegra_fb_get_plane(fb, 0); 150 struct tegra_bo *bo = tegra_fb_get_plane(fb, 0);
151 unsigned int h_offset = 0, v_offset = 0;
148 unsigned long value; 152 unsigned long value;
149 153
150 tegra_dc_writel(dc, WINDOW_A_SELECT, DC_CMD_DISPLAY_WINDOW_HEADER); 154 tegra_dc_writel(dc, WINDOW_A_SELECT, DC_CMD_DISPLAY_WINDOW_HEADER);
@@ -156,6 +160,32 @@ static int tegra_dc_set_base(struct tegra_dc *dc, int x, int y,
156 tegra_dc_writel(dc, fb->pitches[0], DC_WIN_LINE_STRIDE); 160 tegra_dc_writel(dc, fb->pitches[0], DC_WIN_LINE_STRIDE);
157 tegra_dc_writel(dc, format, DC_WIN_COLOR_DEPTH); 161 tegra_dc_writel(dc, format, DC_WIN_COLOR_DEPTH);
158 162
163 if (tegra_fb_is_tiled(fb)) {
164 value = DC_WIN_BUFFER_ADDR_MODE_TILE_UV |
165 DC_WIN_BUFFER_ADDR_MODE_TILE;
166 } else {
167 value = DC_WIN_BUFFER_ADDR_MODE_LINEAR_UV |
168 DC_WIN_BUFFER_ADDR_MODE_LINEAR;
169 }
170
171 tegra_dc_writel(dc, value, DC_WIN_BUFFER_ADDR_MODE);
172
173 /* make sure bottom-up buffers are properly displayed */
174 if (tegra_fb_is_bottom_up(fb)) {
175 value = tegra_dc_readl(dc, DC_WIN_WIN_OPTIONS);
176 value |= INVERT_V;
177 tegra_dc_writel(dc, value, DC_WIN_WIN_OPTIONS);
178
179 v_offset += fb->height - 1;
180 } else {
181 value = tegra_dc_readl(dc, DC_WIN_WIN_OPTIONS);
182 value &= ~INVERT_V;
183 tegra_dc_writel(dc, value, DC_WIN_WIN_OPTIONS);
184 }
185
186 tegra_dc_writel(dc, h_offset, DC_WINBUF_ADDR_H_OFFSET);
187 tegra_dc_writel(dc, v_offset, DC_WINBUF_ADDR_V_OFFSET);
188
159 value = GENERAL_UPDATE | WIN_A_UPDATE; 189 value = GENERAL_UPDATE | WIN_A_UPDATE;
160 tegra_dc_writel(dc, value, DC_CMD_STATE_CONTROL); 190 tegra_dc_writel(dc, value, DC_CMD_STATE_CONTROL);
161 191
@@ -255,14 +285,26 @@ static int tegra_dc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
255 return 0; 285 return 0;
256} 286}
257 287
288static void drm_crtc_clear(struct drm_crtc *crtc)
289{
290 memset(crtc, 0, sizeof(*crtc));
291}
292
293static void tegra_dc_destroy(struct drm_crtc *crtc)
294{
295 drm_crtc_cleanup(crtc);
296 drm_crtc_clear(crtc);
297}
298
258static const struct drm_crtc_funcs tegra_crtc_funcs = { 299static const struct drm_crtc_funcs tegra_crtc_funcs = {
259 .page_flip = tegra_dc_page_flip, 300 .page_flip = tegra_dc_page_flip,
260 .set_config = drm_crtc_helper_set_config, 301 .set_config = drm_crtc_helper_set_config,
261 .destroy = drm_crtc_cleanup, 302 .destroy = tegra_dc_destroy,
262}; 303};
263 304
264static void tegra_crtc_disable(struct drm_crtc *crtc) 305static void tegra_crtc_disable(struct drm_crtc *crtc)
265{ 306{
307 struct tegra_dc *dc = to_tegra_dc(crtc);
266 struct drm_device *drm = crtc->dev; 308 struct drm_device *drm = crtc->dev;
267 struct drm_plane *plane; 309 struct drm_plane *plane;
268 310
@@ -277,6 +319,8 @@ static void tegra_crtc_disable(struct drm_crtc *crtc)
277 } 319 }
278 } 320 }
279 } 321 }
322
323 drm_vblank_off(drm, dc->pipe);
280} 324}
281 325
282static bool tegra_crtc_mode_fixup(struct drm_crtc *crtc, 326static bool tegra_crtc_mode_fixup(struct drm_crtc *crtc,
@@ -491,9 +535,22 @@ int tegra_dc_setup_window(struct tegra_dc *dc, unsigned int index,
491 tegra_dc_writel(dc, window->stride[0], DC_WIN_LINE_STRIDE); 535 tegra_dc_writel(dc, window->stride[0], DC_WIN_LINE_STRIDE);
492 } 536 }
493 537
538 if (window->bottom_up)
539 v_offset += window->src.h - 1;
540
494 tegra_dc_writel(dc, h_offset, DC_WINBUF_ADDR_H_OFFSET); 541 tegra_dc_writel(dc, h_offset, DC_WINBUF_ADDR_H_OFFSET);
495 tegra_dc_writel(dc, v_offset, DC_WINBUF_ADDR_V_OFFSET); 542 tegra_dc_writel(dc, v_offset, DC_WINBUF_ADDR_V_OFFSET);
496 543
544 if (window->tiled) {
545 value = DC_WIN_BUFFER_ADDR_MODE_TILE_UV |
546 DC_WIN_BUFFER_ADDR_MODE_TILE;
547 } else {
548 value = DC_WIN_BUFFER_ADDR_MODE_LINEAR_UV |
549 DC_WIN_BUFFER_ADDR_MODE_LINEAR;
550 }
551
552 tegra_dc_writel(dc, value, DC_WIN_BUFFER_ADDR_MODE);
553
497 value = WIN_ENABLE; 554 value = WIN_ENABLE;
498 555
499 if (yuv) { 556 if (yuv) {
@@ -512,6 +569,9 @@ int tegra_dc_setup_window(struct tegra_dc *dc, unsigned int index,
512 value |= COLOR_EXPAND; 569 value |= COLOR_EXPAND;
513 } 570 }
514 571
572 if (window->bottom_up)
573 value |= INVERT_V;
574
515 tegra_dc_writel(dc, value, DC_WIN_WIN_OPTIONS); 575 tegra_dc_writel(dc, value, DC_WIN_WIN_OPTIONS);
516 576
517 /* 577 /*
@@ -1041,30 +1101,30 @@ static int tegra_dc_debugfs_exit(struct tegra_dc *dc)
1041 return 0; 1101 return 0;
1042} 1102}
1043 1103
1044static int tegra_dc_drm_init(struct host1x_client *client, 1104static int tegra_dc_init(struct host1x_client *client)
1045 struct drm_device *drm)
1046{ 1105{
1106 struct tegra_drm *tegra = dev_get_drvdata(client->parent);
1047 struct tegra_dc *dc = host1x_client_to_dc(client); 1107 struct tegra_dc *dc = host1x_client_to_dc(client);
1048 int err; 1108 int err;
1049 1109
1050 dc->pipe = drm->mode_config.num_crtc; 1110 dc->pipe = tegra->drm->mode_config.num_crtc;
1051 1111
1052 drm_crtc_init(drm, &dc->base, &tegra_crtc_funcs); 1112 drm_crtc_init(tegra->drm, &dc->base, &tegra_crtc_funcs);
1053 drm_mode_crtc_set_gamma_size(&dc->base, 256); 1113 drm_mode_crtc_set_gamma_size(&dc->base, 256);
1054 drm_crtc_helper_add(&dc->base, &tegra_crtc_helper_funcs); 1114 drm_crtc_helper_add(&dc->base, &tegra_crtc_helper_funcs);
1055 1115
1056 err = tegra_dc_rgb_init(drm, dc); 1116 err = tegra_dc_rgb_init(tegra->drm, dc);
1057 if (err < 0 && err != -ENODEV) { 1117 if (err < 0 && err != -ENODEV) {
1058 dev_err(dc->dev, "failed to initialize RGB output: %d\n", err); 1118 dev_err(dc->dev, "failed to initialize RGB output: %d\n", err);
1059 return err; 1119 return err;
1060 } 1120 }
1061 1121
1062 err = tegra_dc_add_planes(drm, dc); 1122 err = tegra_dc_add_planes(tegra->drm, dc);
1063 if (err < 0) 1123 if (err < 0)
1064 return err; 1124 return err;
1065 1125
1066 if (IS_ENABLED(CONFIG_DEBUG_FS)) { 1126 if (IS_ENABLED(CONFIG_DEBUG_FS)) {
1067 err = tegra_dc_debugfs_init(dc, drm->primary); 1127 err = tegra_dc_debugfs_init(dc, tegra->drm->primary);
1068 if (err < 0) 1128 if (err < 0)
1069 dev_err(dc->dev, "debugfs setup failed: %d\n", err); 1129 dev_err(dc->dev, "debugfs setup failed: %d\n", err);
1070 } 1130 }
@@ -1080,7 +1140,7 @@ static int tegra_dc_drm_init(struct host1x_client *client,
1080 return 0; 1140 return 0;
1081} 1141}
1082 1142
1083static int tegra_dc_drm_exit(struct host1x_client *client) 1143static int tegra_dc_exit(struct host1x_client *client)
1084{ 1144{
1085 struct tegra_dc *dc = host1x_client_to_dc(client); 1145 struct tegra_dc *dc = host1x_client_to_dc(client);
1086 int err; 1146 int err;
@@ -1103,13 +1163,12 @@ static int tegra_dc_drm_exit(struct host1x_client *client)
1103} 1163}
1104 1164
1105static const struct host1x_client_ops dc_client_ops = { 1165static const struct host1x_client_ops dc_client_ops = {
1106 .drm_init = tegra_dc_drm_init, 1166 .init = tegra_dc_init,
1107 .drm_exit = tegra_dc_drm_exit, 1167 .exit = tegra_dc_exit,
1108}; 1168};
1109 1169
1110static int tegra_dc_probe(struct platform_device *pdev) 1170static int tegra_dc_probe(struct platform_device *pdev)
1111{ 1171{
1112 struct host1x_drm *host1x = host1x_get_drm_data(pdev->dev.parent);
1113 struct resource *regs; 1172 struct resource *regs;
1114 struct tegra_dc *dc; 1173 struct tegra_dc *dc;
1115 int err; 1174 int err;
@@ -1153,7 +1212,7 @@ static int tegra_dc_probe(struct platform_device *pdev)
1153 return err; 1212 return err;
1154 } 1213 }
1155 1214
1156 err = host1x_register_client(host1x, &dc->client); 1215 err = host1x_client_register(&dc->client);
1157 if (err < 0) { 1216 if (err < 0) {
1158 dev_err(&pdev->dev, "failed to register host1x client: %d\n", 1217 dev_err(&pdev->dev, "failed to register host1x client: %d\n",
1159 err); 1218 err);
@@ -1167,17 +1226,22 @@ static int tegra_dc_probe(struct platform_device *pdev)
1167 1226
1168static int tegra_dc_remove(struct platform_device *pdev) 1227static int tegra_dc_remove(struct platform_device *pdev)
1169{ 1228{
1170 struct host1x_drm *host1x = host1x_get_drm_data(pdev->dev.parent);
1171 struct tegra_dc *dc = platform_get_drvdata(pdev); 1229 struct tegra_dc *dc = platform_get_drvdata(pdev);
1172 int err; 1230 int err;
1173 1231
1174 err = host1x_unregister_client(host1x, &dc->client); 1232 err = host1x_client_unregister(&dc->client);
1175 if (err < 0) { 1233 if (err < 0) {
1176 dev_err(&pdev->dev, "failed to unregister host1x client: %d\n", 1234 dev_err(&pdev->dev, "failed to unregister host1x client: %d\n",
1177 err); 1235 err);
1178 return err; 1236 return err;
1179 } 1237 }
1180 1238
1239 err = tegra_dc_rgb_remove(dc);
1240 if (err < 0) {
1241 dev_err(&pdev->dev, "failed to remove RGB output: %d\n", err);
1242 return err;
1243 }
1244
1181 clk_disable_unprepare(dc->clk); 1245 clk_disable_unprepare(dc->clk);
1182 1246
1183 return 0; 1247 return 0;
diff --git a/drivers/gpu/host1x/drm/dc.h b/drivers/gpu/drm/tegra/dc.h
index 79eaec9aac77..91bbda291470 100644
--- a/drivers/gpu/host1x/drm/dc.h
+++ b/drivers/gpu/drm/tegra/dc.h
@@ -302,6 +302,7 @@
302#define DC_WIN_CSC_KVB 0x618 302#define DC_WIN_CSC_KVB 0x618
303 303
304#define DC_WIN_WIN_OPTIONS 0x700 304#define DC_WIN_WIN_OPTIONS 0x700
305#define INVERT_V (1 << 2)
305#define COLOR_EXPAND (1 << 6) 306#define COLOR_EXPAND (1 << 6)
306#define CSC_ENABLE (1 << 18) 307#define CSC_ENABLE (1 << 18)
307#define WIN_ENABLE (1 << 30) 308#define WIN_ENABLE (1 << 30)
@@ -365,6 +366,10 @@
365#define DC_WIN_BUF_STRIDE 0x70b 366#define DC_WIN_BUF_STRIDE 0x70b
366#define DC_WIN_UV_BUF_STRIDE 0x70c 367#define DC_WIN_UV_BUF_STRIDE 0x70c
367#define DC_WIN_BUFFER_ADDR_MODE 0x70d 368#define DC_WIN_BUFFER_ADDR_MODE 0x70d
369#define DC_WIN_BUFFER_ADDR_MODE_LINEAR (0 << 0)
370#define DC_WIN_BUFFER_ADDR_MODE_TILE (1 << 0)
371#define DC_WIN_BUFFER_ADDR_MODE_LINEAR_UV (0 << 16)
372#define DC_WIN_BUFFER_ADDR_MODE_TILE_UV (1 << 16)
368#define DC_WIN_DV_CONTROL 0x70e 373#define DC_WIN_DV_CONTROL 0x70e
369 374
370#define DC_WIN_BLEND_NOKEY 0x70f 375#define DC_WIN_BLEND_NOKEY 0x70f
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
new file mode 100644
index 000000000000..28e178137718
--- /dev/null
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -0,0 +1,714 @@
1/*
2 * Copyright (C) 2012 Avionic Design GmbH
3 * Copyright (C) 2012-2013 NVIDIA CORPORATION. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9
10#include <linux/host1x.h>
11
12#include "drm.h"
13#include "gem.h"
14
15#define DRIVER_NAME "tegra"
16#define DRIVER_DESC "NVIDIA Tegra graphics"
17#define DRIVER_DATE "20120330"
18#define DRIVER_MAJOR 0
19#define DRIVER_MINOR 0
20#define DRIVER_PATCHLEVEL 0
21
22struct tegra_drm_file {
23 struct list_head contexts;
24};
25
26static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
27{
28 struct host1x_device *device = to_host1x_device(drm->dev);
29 struct tegra_drm *tegra;
30 int err;
31
32 tegra = kzalloc(sizeof(*tegra), GFP_KERNEL);
33 if (!tegra)
34 return -ENOMEM;
35
36 dev_set_drvdata(drm->dev, tegra);
37 mutex_init(&tegra->clients_lock);
38 INIT_LIST_HEAD(&tegra->clients);
39 drm->dev_private = tegra;
40 tegra->drm = drm;
41
42 drm_mode_config_init(drm);
43
44 err = host1x_device_init(device);
45 if (err < 0)
46 return err;
47
48 /*
49 * We don't use the drm_irq_install() helpers provided by the DRM
50 * core, so we need to set this manually in order to allow the
51 * DRM_IOCTL_WAIT_VBLANK to operate correctly.
52 */
53 drm->irq_enabled = true;
54
55 err = drm_vblank_init(drm, drm->mode_config.num_crtc);
56 if (err < 0)
57 return err;
58
59 err = tegra_drm_fb_init(drm);
60 if (err < 0)
61 return err;
62
63 drm_kms_helper_poll_init(drm);
64
65 return 0;
66}
67
68static int tegra_drm_unload(struct drm_device *drm)
69{
70 struct host1x_device *device = to_host1x_device(drm->dev);
71 int err;
72
73 drm_kms_helper_poll_fini(drm);
74 tegra_drm_fb_exit(drm);
75 drm_vblank_cleanup(drm);
76 drm_mode_config_cleanup(drm);
77
78 err = host1x_device_exit(device);
79 if (err < 0)
80 return err;
81
82 return 0;
83}
84
85static int tegra_drm_open(struct drm_device *drm, struct drm_file *filp)
86{
87 struct tegra_drm_file *fpriv;
88
89 fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
90 if (!fpriv)
91 return -ENOMEM;
92
93 INIT_LIST_HEAD(&fpriv->contexts);
94 filp->driver_priv = fpriv;
95
96 return 0;
97}
98
99static void tegra_drm_context_free(struct tegra_drm_context *context)
100{
101 context->client->ops->close_channel(context);
102 kfree(context);
103}
104
105static void tegra_drm_lastclose(struct drm_device *drm)
106{
107 struct tegra_drm *tegra = drm->dev_private;
108
109 tegra_fbdev_restore_mode(tegra->fbdev);
110}
111
112static struct host1x_bo *
113host1x_bo_lookup(struct drm_device *drm, struct drm_file *file, u32 handle)
114{
115 struct drm_gem_object *gem;
116 struct tegra_bo *bo;
117
118 gem = drm_gem_object_lookup(drm, file, handle);
119 if (!gem)
120 return NULL;
121
122 mutex_lock(&drm->struct_mutex);
123 drm_gem_object_unreference(gem);
124 mutex_unlock(&drm->struct_mutex);
125
126 bo = to_tegra_bo(gem);
127 return &bo->base;
128}
129
130int tegra_drm_submit(struct tegra_drm_context *context,
131 struct drm_tegra_submit *args, struct drm_device *drm,
132 struct drm_file *file)
133{
134 unsigned int num_cmdbufs = args->num_cmdbufs;
135 unsigned int num_relocs = args->num_relocs;
136 unsigned int num_waitchks = args->num_waitchks;
137 struct drm_tegra_cmdbuf __user *cmdbufs =
138 (void * __user)(uintptr_t)args->cmdbufs;
139 struct drm_tegra_reloc __user *relocs =
140 (void * __user)(uintptr_t)args->relocs;
141 struct drm_tegra_waitchk __user *waitchks =
142 (void * __user)(uintptr_t)args->waitchks;
143 struct drm_tegra_syncpt syncpt;
144 struct host1x_job *job;
145 int err;
146
147 /* We don't yet support other than one syncpt_incr struct per submit */
148 if (args->num_syncpts != 1)
149 return -EINVAL;
150
151 job = host1x_job_alloc(context->channel, args->num_cmdbufs,
152 args->num_relocs, args->num_waitchks);
153 if (!job)
154 return -ENOMEM;
155
156 job->num_relocs = args->num_relocs;
157 job->num_waitchk = args->num_waitchks;
158 job->client = (u32)args->context;
159 job->class = context->client->base.class;
160 job->serialize = true;
161
162 while (num_cmdbufs) {
163 struct drm_tegra_cmdbuf cmdbuf;
164 struct host1x_bo *bo;
165
166 err = copy_from_user(&cmdbuf, cmdbufs, sizeof(cmdbuf));
167 if (err)
168 goto fail;
169
170 bo = host1x_bo_lookup(drm, file, cmdbuf.handle);
171 if (!bo) {
172 err = -ENOENT;
173 goto fail;
174 }
175
176 host1x_job_add_gather(job, bo, cmdbuf.words, cmdbuf.offset);
177 num_cmdbufs--;
178 cmdbufs++;
179 }
180
181 err = copy_from_user(job->relocarray, relocs,
182 sizeof(*relocs) * num_relocs);
183 if (err)
184 goto fail;
185
186 while (num_relocs--) {
187 struct host1x_reloc *reloc = &job->relocarray[num_relocs];
188 struct host1x_bo *cmdbuf, *target;
189
190 cmdbuf = host1x_bo_lookup(drm, file, (u32)reloc->cmdbuf);
191 target = host1x_bo_lookup(drm, file, (u32)reloc->target);
192
193 reloc->cmdbuf = cmdbuf;
194 reloc->target = target;
195
196 if (!reloc->target || !reloc->cmdbuf) {
197 err = -ENOENT;
198 goto fail;
199 }
200 }
201
202 err = copy_from_user(job->waitchk, waitchks,
203 sizeof(*waitchks) * num_waitchks);
204 if (err)
205 goto fail;
206
207 err = copy_from_user(&syncpt, (void * __user)(uintptr_t)args->syncpts,
208 sizeof(syncpt));
209 if (err)
210 goto fail;
211
212 job->is_addr_reg = context->client->ops->is_addr_reg;
213 job->syncpt_incrs = syncpt.incrs;
214 job->syncpt_id = syncpt.id;
215 job->timeout = 10000;
216
217 if (args->timeout && args->timeout < 10000)
218 job->timeout = args->timeout;
219
220 err = host1x_job_pin(job, context->client->base.dev);
221 if (err)
222 goto fail;
223
224 err = host1x_job_submit(job);
225 if (err)
226 goto fail_submit;
227
228 args->fence = job->syncpt_end;
229
230 host1x_job_put(job);
231 return 0;
232
233fail_submit:
234 host1x_job_unpin(job);
235fail:
236 host1x_job_put(job);
237 return err;
238}
239
240
241#ifdef CONFIG_DRM_TEGRA_STAGING
242static struct tegra_drm_context *tegra_drm_get_context(__u64 context)
243{
244 return (struct tegra_drm_context *)(uintptr_t)context;
245}
246
247static bool tegra_drm_file_owns_context(struct tegra_drm_file *file,
248 struct tegra_drm_context *context)
249{
250 struct tegra_drm_context *ctx;
251
252 list_for_each_entry(ctx, &file->contexts, list)
253 if (ctx == context)
254 return true;
255
256 return false;
257}
258
259static int tegra_gem_create(struct drm_device *drm, void *data,
260 struct drm_file *file)
261{
262 struct drm_tegra_gem_create *args = data;
263 struct tegra_bo *bo;
264
265 bo = tegra_bo_create_with_handle(file, drm, args->size, args->flags,
266 &args->handle);
267 if (IS_ERR(bo))
268 return PTR_ERR(bo);
269
270 return 0;
271}
272
273static int tegra_gem_mmap(struct drm_device *drm, void *data,
274 struct drm_file *file)
275{
276 struct drm_tegra_gem_mmap *args = data;
277 struct drm_gem_object *gem;
278 struct tegra_bo *bo;
279
280 gem = drm_gem_object_lookup(drm, file, args->handle);
281 if (!gem)
282 return -EINVAL;
283
284 bo = to_tegra_bo(gem);
285
286 args->offset = drm_vma_node_offset_addr(&bo->gem.vma_node);
287
288 drm_gem_object_unreference(gem);
289
290 return 0;
291}
292
293static int tegra_syncpt_read(struct drm_device *drm, void *data,
294 struct drm_file *file)
295{
296 struct host1x *host = dev_get_drvdata(drm->dev->parent);
297 struct drm_tegra_syncpt_read *args = data;
298 struct host1x_syncpt *sp;
299
300 sp = host1x_syncpt_get(host, args->id);
301 if (!sp)
302 return -EINVAL;
303
304 args->value = host1x_syncpt_read_min(sp);
305 return 0;
306}
307
308static int tegra_syncpt_incr(struct drm_device *drm, void *data,
309 struct drm_file *file)
310{
311 struct host1x *host1x = dev_get_drvdata(drm->dev->parent);
312 struct drm_tegra_syncpt_incr *args = data;
313 struct host1x_syncpt *sp;
314
315 sp = host1x_syncpt_get(host1x, args->id);
316 if (!sp)
317 return -EINVAL;
318
319 return host1x_syncpt_incr(sp);
320}
321
322static int tegra_syncpt_wait(struct drm_device *drm, void *data,
323 struct drm_file *file)
324{
325 struct host1x *host1x = dev_get_drvdata(drm->dev->parent);
326 struct drm_tegra_syncpt_wait *args = data;
327 struct host1x_syncpt *sp;
328
329 sp = host1x_syncpt_get(host1x, args->id);
330 if (!sp)
331 return -EINVAL;
332
333 return host1x_syncpt_wait(sp, args->thresh, args->timeout,
334 &args->value);
335}
336
337static int tegra_open_channel(struct drm_device *drm, void *data,
338 struct drm_file *file)
339{
340 struct tegra_drm_file *fpriv = file->driver_priv;
341 struct tegra_drm *tegra = drm->dev_private;
342 struct drm_tegra_open_channel *args = data;
343 struct tegra_drm_context *context;
344 struct tegra_drm_client *client;
345 int err = -ENODEV;
346
347 context = kzalloc(sizeof(*context), GFP_KERNEL);
348 if (!context)
349 return -ENOMEM;
350
351 list_for_each_entry(client, &tegra->clients, list)
352 if (client->base.class == args->client) {
353 err = client->ops->open_channel(client, context);
354 if (err)
355 break;
356
357 list_add(&context->list, &fpriv->contexts);
358 args->context = (uintptr_t)context;
359 context->client = client;
360 return 0;
361 }
362
363 kfree(context);
364 return err;
365}
366
367static int tegra_close_channel(struct drm_device *drm, void *data,
368 struct drm_file *file)
369{
370 struct tegra_drm_file *fpriv = file->driver_priv;
371 struct drm_tegra_close_channel *args = data;
372 struct tegra_drm_context *context;
373
374 context = tegra_drm_get_context(args->context);
375
376 if (!tegra_drm_file_owns_context(fpriv, context))
377 return -EINVAL;
378
379 list_del(&context->list);
380 tegra_drm_context_free(context);
381
382 return 0;
383}
384
385static int tegra_get_syncpt(struct drm_device *drm, void *data,
386 struct drm_file *file)
387{
388 struct tegra_drm_file *fpriv = file->driver_priv;
389 struct drm_tegra_get_syncpt *args = data;
390 struct tegra_drm_context *context;
391 struct host1x_syncpt *syncpt;
392
393 context = tegra_drm_get_context(args->context);
394
395 if (!tegra_drm_file_owns_context(fpriv, context))
396 return -ENODEV;
397
398 if (args->index >= context->client->base.num_syncpts)
399 return -EINVAL;
400
401 syncpt = context->client->base.syncpts[args->index];
402 args->id = host1x_syncpt_id(syncpt);
403
404 return 0;
405}
406
407static int tegra_submit(struct drm_device *drm, void *data,
408 struct drm_file *file)
409{
410 struct tegra_drm_file *fpriv = file->driver_priv;
411 struct drm_tegra_submit *args = data;
412 struct tegra_drm_context *context;
413
414 context = tegra_drm_get_context(args->context);
415
416 if (!tegra_drm_file_owns_context(fpriv, context))
417 return -ENODEV;
418
419 return context->client->ops->submit(context, args, drm, file);
420}
421
422static int tegra_get_syncpt_base(struct drm_device *drm, void *data,
423 struct drm_file *file)
424{
425 struct tegra_drm_file *fpriv = file->driver_priv;
426 struct drm_tegra_get_syncpt_base *args = data;
427 struct tegra_drm_context *context;
428 struct host1x_syncpt_base *base;
429 struct host1x_syncpt *syncpt;
430
431 context = tegra_drm_get_context(args->context);
432
433 if (!tegra_drm_file_owns_context(fpriv, context))
434 return -ENODEV;
435
436 if (args->syncpt >= context->client->base.num_syncpts)
437 return -EINVAL;
438
439 syncpt = context->client->base.syncpts[args->syncpt];
440
441 base = host1x_syncpt_get_base(syncpt);
442 if (!base)
443 return -ENXIO;
444
445 args->id = host1x_syncpt_base_id(base);
446
447 return 0;
448}
449#endif
450
451static const struct drm_ioctl_desc tegra_drm_ioctls[] = {
452#ifdef CONFIG_DRM_TEGRA_STAGING
453 DRM_IOCTL_DEF_DRV(TEGRA_GEM_CREATE, tegra_gem_create, DRM_UNLOCKED | DRM_AUTH),
454 DRM_IOCTL_DEF_DRV(TEGRA_GEM_MMAP, tegra_gem_mmap, DRM_UNLOCKED),
455 DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_READ, tegra_syncpt_read, DRM_UNLOCKED),
456 DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_INCR, tegra_syncpt_incr, DRM_UNLOCKED),
457 DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_WAIT, tegra_syncpt_wait, DRM_UNLOCKED),
458 DRM_IOCTL_DEF_DRV(TEGRA_OPEN_CHANNEL, tegra_open_channel, DRM_UNLOCKED),
459 DRM_IOCTL_DEF_DRV(TEGRA_CLOSE_CHANNEL, tegra_close_channel, DRM_UNLOCKED),
460 DRM_IOCTL_DEF_DRV(TEGRA_GET_SYNCPT, tegra_get_syncpt, DRM_UNLOCKED),
461 DRM_IOCTL_DEF_DRV(TEGRA_SUBMIT, tegra_submit, DRM_UNLOCKED),
462 DRM_IOCTL_DEF_DRV(TEGRA_GET_SYNCPT_BASE, tegra_get_syncpt_base, DRM_UNLOCKED),
463#endif
464};
465
466static const struct file_operations tegra_drm_fops = {
467 .owner = THIS_MODULE,
468 .open = drm_open,
469 .release = drm_release,
470 .unlocked_ioctl = drm_ioctl,
471 .mmap = tegra_drm_mmap,
472 .poll = drm_poll,
473 .read = drm_read,
474#ifdef CONFIG_COMPAT
475 .compat_ioctl = drm_compat_ioctl,
476#endif
477 .llseek = noop_llseek,
478};
479
480static struct drm_crtc *tegra_crtc_from_pipe(struct drm_device *drm, int pipe)
481{
482 struct drm_crtc *crtc;
483
484 list_for_each_entry(crtc, &drm->mode_config.crtc_list, head) {
485 struct tegra_dc *dc = to_tegra_dc(crtc);
486
487 if (dc->pipe == pipe)
488 return crtc;
489 }
490
491 return NULL;
492}
493
494static u32 tegra_drm_get_vblank_counter(struct drm_device *dev, int crtc)
495{
496 /* TODO: implement real hardware counter using syncpoints */
497 return drm_vblank_count(dev, crtc);
498}
499
500static int tegra_drm_enable_vblank(struct drm_device *drm, int pipe)
501{
502 struct drm_crtc *crtc = tegra_crtc_from_pipe(drm, pipe);
503 struct tegra_dc *dc = to_tegra_dc(crtc);
504
505 if (!crtc)
506 return -ENODEV;
507
508 tegra_dc_enable_vblank(dc);
509
510 return 0;
511}
512
513static void tegra_drm_disable_vblank(struct drm_device *drm, int pipe)
514{
515 struct drm_crtc *crtc = tegra_crtc_from_pipe(drm, pipe);
516 struct tegra_dc *dc = to_tegra_dc(crtc);
517
518 if (crtc)
519 tegra_dc_disable_vblank(dc);
520}
521
522static void tegra_drm_preclose(struct drm_device *drm, struct drm_file *file)
523{
524 struct tegra_drm_file *fpriv = file->driver_priv;
525 struct tegra_drm_context *context, *tmp;
526 struct drm_crtc *crtc;
527
528 list_for_each_entry(crtc, &drm->mode_config.crtc_list, head)
529 tegra_dc_cancel_page_flip(crtc, file);
530
531 list_for_each_entry_safe(context, tmp, &fpriv->contexts, list)
532 tegra_drm_context_free(context);
533
534 kfree(fpriv);
535}
536
537#ifdef CONFIG_DEBUG_FS
538static int tegra_debugfs_framebuffers(struct seq_file *s, void *data)
539{
540 struct drm_info_node *node = (struct drm_info_node *)s->private;
541 struct drm_device *drm = node->minor->dev;
542 struct drm_framebuffer *fb;
543
544 mutex_lock(&drm->mode_config.fb_lock);
545
546 list_for_each_entry(fb, &drm->mode_config.fb_list, head) {
547 seq_printf(s, "%3d: user size: %d x %d, depth %d, %d bpp, refcount %d\n",
548 fb->base.id, fb->width, fb->height, fb->depth,
549 fb->bits_per_pixel,
550 atomic_read(&fb->refcount.refcount));
551 }
552
553 mutex_unlock(&drm->mode_config.fb_lock);
554
555 return 0;
556}
557
558static struct drm_info_list tegra_debugfs_list[] = {
559 { "framebuffers", tegra_debugfs_framebuffers, 0 },
560};
561
562static int tegra_debugfs_init(struct drm_minor *minor)
563{
564 return drm_debugfs_create_files(tegra_debugfs_list,
565 ARRAY_SIZE(tegra_debugfs_list),
566 minor->debugfs_root, minor);
567}
568
569static void tegra_debugfs_cleanup(struct drm_minor *minor)
570{
571 drm_debugfs_remove_files(tegra_debugfs_list,
572 ARRAY_SIZE(tegra_debugfs_list), minor);
573}
574#endif
575
576struct drm_driver tegra_drm_driver = {
577 .driver_features = DRIVER_MODESET | DRIVER_GEM,
578 .load = tegra_drm_load,
579 .unload = tegra_drm_unload,
580 .open = tegra_drm_open,
581 .preclose = tegra_drm_preclose,
582 .lastclose = tegra_drm_lastclose,
583
584 .get_vblank_counter = tegra_drm_get_vblank_counter,
585 .enable_vblank = tegra_drm_enable_vblank,
586 .disable_vblank = tegra_drm_disable_vblank,
587
588#if defined(CONFIG_DEBUG_FS)
589 .debugfs_init = tegra_debugfs_init,
590 .debugfs_cleanup = tegra_debugfs_cleanup,
591#endif
592
593 .gem_free_object = tegra_bo_free_object,
594 .gem_vm_ops = &tegra_bo_vm_ops,
595 .dumb_create = tegra_bo_dumb_create,
596 .dumb_map_offset = tegra_bo_dumb_map_offset,
597 .dumb_destroy = drm_gem_dumb_destroy,
598
599 .ioctls = tegra_drm_ioctls,
600 .num_ioctls = ARRAY_SIZE(tegra_drm_ioctls),
601 .fops = &tegra_drm_fops,
602
603 .name = DRIVER_NAME,
604 .desc = DRIVER_DESC,
605 .date = DRIVER_DATE,
606 .major = DRIVER_MAJOR,
607 .minor = DRIVER_MINOR,
608 .patchlevel = DRIVER_PATCHLEVEL,
609};
610
611int tegra_drm_register_client(struct tegra_drm *tegra,
612 struct tegra_drm_client *client)
613{
614 mutex_lock(&tegra->clients_lock);
615 list_add_tail(&client->list, &tegra->clients);
616 mutex_unlock(&tegra->clients_lock);
617
618 return 0;
619}
620
621int tegra_drm_unregister_client(struct tegra_drm *tegra,
622 struct tegra_drm_client *client)
623{
624 mutex_lock(&tegra->clients_lock);
625 list_del_init(&client->list);
626 mutex_unlock(&tegra->clients_lock);
627
628 return 0;
629}
630
631static int host1x_drm_probe(struct host1x_device *device)
632{
633 return drm_host1x_init(&tegra_drm_driver, device);
634}
635
636static int host1x_drm_remove(struct host1x_device *device)
637{
638 drm_host1x_exit(&tegra_drm_driver, device);
639
640 return 0;
641}
642
643static const struct of_device_id host1x_drm_subdevs[] = {
644 { .compatible = "nvidia,tegra20-dc", },
645 { .compatible = "nvidia,tegra20-hdmi", },
646 { .compatible = "nvidia,tegra20-gr2d", },
647 { .compatible = "nvidia,tegra20-gr3d", },
648 { .compatible = "nvidia,tegra30-dc", },
649 { .compatible = "nvidia,tegra30-hdmi", },
650 { .compatible = "nvidia,tegra30-gr2d", },
651 { .compatible = "nvidia,tegra30-gr3d", },
652 { .compatible = "nvidia,tegra114-hdmi", },
653 { .compatible = "nvidia,tegra114-gr3d", },
654 { /* sentinel */ }
655};
656
657static struct host1x_driver host1x_drm_driver = {
658 .name = "drm",
659 .probe = host1x_drm_probe,
660 .remove = host1x_drm_remove,
661 .subdevs = host1x_drm_subdevs,
662};
663
664static int __init host1x_drm_init(void)
665{
666 int err;
667
668 err = host1x_driver_register(&host1x_drm_driver);
669 if (err < 0)
670 return err;
671
672 err = platform_driver_register(&tegra_dc_driver);
673 if (err < 0)
674 goto unregister_host1x;
675
676 err = platform_driver_register(&tegra_hdmi_driver);
677 if (err < 0)
678 goto unregister_dc;
679
680 err = platform_driver_register(&tegra_gr2d_driver);
681 if (err < 0)
682 goto unregister_hdmi;
683
684 err = platform_driver_register(&tegra_gr3d_driver);
685 if (err < 0)
686 goto unregister_gr2d;
687
688 return 0;
689
690unregister_gr2d:
691 platform_driver_unregister(&tegra_gr2d_driver);
692unregister_hdmi:
693 platform_driver_unregister(&tegra_hdmi_driver);
694unregister_dc:
695 platform_driver_unregister(&tegra_dc_driver);
696unregister_host1x:
697 host1x_driver_unregister(&host1x_drm_driver);
698 return err;
699}
700module_init(host1x_drm_init);
701
702static void __exit host1x_drm_exit(void)
703{
704 platform_driver_unregister(&tegra_gr3d_driver);
705 platform_driver_unregister(&tegra_gr2d_driver);
706 platform_driver_unregister(&tegra_hdmi_driver);
707 platform_driver_unregister(&tegra_dc_driver);
708 host1x_driver_unregister(&host1x_drm_driver);
709}
710module_exit(host1x_drm_exit);
711
712MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>");
713MODULE_DESCRIPTION("NVIDIA Tegra DRM driver");
714MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/host1x/drm/drm.h b/drivers/gpu/drm/tegra/drm.h
index 02ce020f2575..fdfe259ed7f8 100644
--- a/drivers/gpu/host1x/drm/drm.h
+++ b/drivers/gpu/drm/tegra/drm.h
@@ -10,14 +10,14 @@
10#ifndef HOST1X_DRM_H 10#ifndef HOST1X_DRM_H
11#define HOST1X_DRM_H 1 11#define HOST1X_DRM_H 1
12 12
13#include <uapi/drm/tegra_drm.h>
14#include <linux/host1x.h>
15
13#include <drm/drmP.h> 16#include <drm/drmP.h>
14#include <drm/drm_crtc_helper.h> 17#include <drm/drm_crtc_helper.h>
15#include <drm/drm_edid.h> 18#include <drm/drm_edid.h>
16#include <drm/drm_fb_helper.h> 19#include <drm/drm_fb_helper.h>
17#include <drm/drm_fixed.h> 20#include <drm/drm_fixed.h>
18#include <uapi/drm/tegra_drm.h>
19
20#include "host1x.h"
21 21
22struct tegra_fb { 22struct tegra_fb {
23 struct drm_framebuffer base; 23 struct drm_framebuffer base;
@@ -30,17 +30,8 @@ struct tegra_fbdev {
30 struct tegra_fb *fb; 30 struct tegra_fb *fb;
31}; 31};
32 32
33struct host1x_drm { 33struct tegra_drm {
34 struct drm_device *drm; 34 struct drm_device *drm;
35 struct device *dev;
36 void __iomem *regs;
37 struct clk *clk;
38 int syncpt;
39 int irq;
40
41 struct mutex drm_clients_lock;
42 struct list_head drm_clients;
43 struct list_head drm_active;
44 35
45 struct mutex clients_lock; 36 struct mutex clients_lock;
46 struct list_head clients; 37 struct list_head clients;
@@ -48,66 +39,60 @@ struct host1x_drm {
48 struct tegra_fbdev *fbdev; 39 struct tegra_fbdev *fbdev;
49}; 40};
50 41
51struct host1x_client; 42struct tegra_drm_client;
52 43
53struct host1x_drm_context { 44struct tegra_drm_context {
54 struct host1x_client *client; 45 struct tegra_drm_client *client;
55 struct host1x_channel *channel; 46 struct host1x_channel *channel;
56 struct list_head list; 47 struct list_head list;
57}; 48};
58 49
59struct host1x_client_ops { 50struct tegra_drm_client_ops {
60 int (*drm_init)(struct host1x_client *client, struct drm_device *drm); 51 int (*open_channel)(struct tegra_drm_client *client,
61 int (*drm_exit)(struct host1x_client *client); 52 struct tegra_drm_context *context);
62 int (*open_channel)(struct host1x_client *client, 53 void (*close_channel)(struct tegra_drm_context *context);
63 struct host1x_drm_context *context); 54 int (*is_addr_reg)(struct device *dev, u32 class, u32 offset);
64 void (*close_channel)(struct host1x_drm_context *context); 55 int (*submit)(struct tegra_drm_context *context,
65 int (*submit)(struct host1x_drm_context *context,
66 struct drm_tegra_submit *args, struct drm_device *drm, 56 struct drm_tegra_submit *args, struct drm_device *drm,
67 struct drm_file *file); 57 struct drm_file *file);
68}; 58};
69 59
70struct host1x_drm_file { 60int tegra_drm_submit(struct tegra_drm_context *context,
71 struct list_head contexts; 61 struct drm_tegra_submit *args, struct drm_device *drm,
72}; 62 struct drm_file *file);
73
74struct host1x_client {
75 struct host1x_drm *host1x;
76 struct device *dev;
77
78 const struct host1x_client_ops *ops;
79
80 enum host1x_class class;
81 struct host1x_channel *channel;
82
83 struct host1x_syncpt **syncpts;
84 unsigned int num_syncpts;
85 63
64struct tegra_drm_client {
65 struct host1x_client base;
86 struct list_head list; 66 struct list_head list;
67
68 const struct tegra_drm_client_ops *ops;
87}; 69};
88 70
89extern int host1x_drm_init(struct host1x_drm *host1x, struct drm_device *drm); 71static inline struct tegra_drm_client *
90extern int host1x_drm_exit(struct host1x_drm *host1x); 72host1x_to_drm_client(struct host1x_client *client)
73{
74 return container_of(client, struct tegra_drm_client, base);
75}
76
77extern int tegra_drm_register_client(struct tegra_drm *tegra,
78 struct tegra_drm_client *client);
79extern int tegra_drm_unregister_client(struct tegra_drm *tegra,
80 struct tegra_drm_client *client);
91 81
92extern int host1x_register_client(struct host1x_drm *host1x, 82extern int tegra_drm_init(struct tegra_drm *tegra, struct drm_device *drm);
93 struct host1x_client *client); 83extern int tegra_drm_exit(struct tegra_drm *tegra);
94extern int host1x_unregister_client(struct host1x_drm *host1x,
95 struct host1x_client *client);
96 84
97struct tegra_output; 85struct tegra_output;
98 86
99struct tegra_dc { 87struct tegra_dc {
100 struct host1x_client client; 88 struct host1x_client client;
101 spinlock_t lock;
102
103 struct host1x_drm *host1x;
104 struct device *dev; 89 struct device *dev;
90 spinlock_t lock;
105 91
106 struct drm_crtc base; 92 struct drm_crtc base;
107 int pipe; 93 int pipe;
108 94
109 struct clk *clk; 95 struct clk *clk;
110
111 void __iomem *regs; 96 void __iomem *regs;
112 int irq; 97 int irq;
113 98
@@ -123,7 +108,8 @@ struct tegra_dc {
123 struct drm_pending_vblank_event *event; 108 struct drm_pending_vblank_event *event;
124}; 109};
125 110
126static inline struct tegra_dc *host1x_client_to_dc(struct host1x_client *client) 111static inline struct tegra_dc *
112host1x_client_to_dc(struct host1x_client *client)
127{ 113{
128 return container_of(client, struct tegra_dc, client); 114 return container_of(client, struct tegra_dc, client);
129} 115}
@@ -162,6 +148,8 @@ struct tegra_dc_window {
162 unsigned int format; 148 unsigned int format;
163 unsigned int stride[2]; 149 unsigned int stride[2];
164 unsigned long base[3]; 150 unsigned long base[3];
151 bool bottom_up;
152 bool tiled;
165}; 153};
166 154
167/* from dc.c */ 155/* from dc.c */
@@ -249,23 +237,34 @@ static inline int tegra_output_check_mode(struct tegra_output *output,
249 return output ? -ENOSYS : -EINVAL; 237 return output ? -ENOSYS : -EINVAL;
250} 238}
251 239
240/* from bus.c */
241int drm_host1x_init(struct drm_driver *driver, struct host1x_device *device);
242void drm_host1x_exit(struct drm_driver *driver, struct host1x_device *device);
243
252/* from rgb.c */ 244/* from rgb.c */
253extern int tegra_dc_rgb_probe(struct tegra_dc *dc); 245extern int tegra_dc_rgb_probe(struct tegra_dc *dc);
246extern int tegra_dc_rgb_remove(struct tegra_dc *dc);
254extern int tegra_dc_rgb_init(struct drm_device *drm, struct tegra_dc *dc); 247extern int tegra_dc_rgb_init(struct drm_device *drm, struct tegra_dc *dc);
255extern int tegra_dc_rgb_exit(struct tegra_dc *dc); 248extern int tegra_dc_rgb_exit(struct tegra_dc *dc);
256 249
257/* from output.c */ 250/* from output.c */
258extern int tegra_output_parse_dt(struct tegra_output *output); 251extern int tegra_output_probe(struct tegra_output *output);
252extern int tegra_output_remove(struct tegra_output *output);
259extern int tegra_output_init(struct drm_device *drm, struct tegra_output *output); 253extern int tegra_output_init(struct drm_device *drm, struct tegra_output *output);
260extern int tegra_output_exit(struct tegra_output *output); 254extern int tegra_output_exit(struct tegra_output *output);
261 255
262/* from fb.c */ 256/* from fb.c */
263struct tegra_bo *tegra_fb_get_plane(struct drm_framebuffer *framebuffer, 257struct tegra_bo *tegra_fb_get_plane(struct drm_framebuffer *framebuffer,
264 unsigned int index); 258 unsigned int index);
259bool tegra_fb_is_bottom_up(struct drm_framebuffer *framebuffer);
260bool tegra_fb_is_tiled(struct drm_framebuffer *framebuffer);
265extern int tegra_drm_fb_init(struct drm_device *drm); 261extern int tegra_drm_fb_init(struct drm_device *drm);
266extern void tegra_drm_fb_exit(struct drm_device *drm); 262extern void tegra_drm_fb_exit(struct drm_device *drm);
267extern void tegra_fbdev_restore_mode(struct tegra_fbdev *fbdev); 263extern void tegra_fbdev_restore_mode(struct tegra_fbdev *fbdev);
268 264
269extern struct drm_driver tegra_drm_driver; 265extern struct platform_driver tegra_dc_driver;
266extern struct platform_driver tegra_hdmi_driver;
267extern struct platform_driver tegra_gr2d_driver;
268extern struct platform_driver tegra_gr3d_driver;
270 269
271#endif /* HOST1X_DRM_H */ 270#endif /* HOST1X_DRM_H */
diff --git a/drivers/gpu/host1x/drm/fb.c b/drivers/gpu/drm/tegra/fb.c
index 979a3e32b78b..490f7719e317 100644
--- a/drivers/gpu/host1x/drm/fb.c
+++ b/drivers/gpu/drm/tegra/fb.c
@@ -10,8 +10,6 @@
10 * published by the Free Software Foundation. 10 * published by the Free Software Foundation.
11 */ 11 */
12 12
13#include <linux/module.h>
14
15#include "drm.h" 13#include "drm.h"
16#include "gem.h" 14#include "gem.h"
17 15
@@ -36,6 +34,26 @@ struct tegra_bo *tegra_fb_get_plane(struct drm_framebuffer *framebuffer,
36 return fb->planes[index]; 34 return fb->planes[index];
37} 35}
38 36
37bool tegra_fb_is_bottom_up(struct drm_framebuffer *framebuffer)
38{
39 struct tegra_fb *fb = to_tegra_fb(framebuffer);
40
41 if (fb->planes[0]->flags & TEGRA_BO_BOTTOM_UP)
42 return true;
43
44 return false;
45}
46
47bool tegra_fb_is_tiled(struct drm_framebuffer *framebuffer)
48{
49 struct tegra_fb *fb = to_tegra_fb(framebuffer);
50
51 if (fb->planes[0]->flags & TEGRA_BO_TILED)
52 return true;
53
54 return false;
55}
56
39static void tegra_fb_destroy(struct drm_framebuffer *framebuffer) 57static void tegra_fb_destroy(struct drm_framebuffer *framebuffer)
40{ 58{
41 struct tegra_fb *fb = to_tegra_fb(framebuffer); 59 struct tegra_fb *fb = to_tegra_fb(framebuffer);
@@ -190,7 +208,7 @@ static int tegra_fbdev_probe(struct drm_fb_helper *helper,
190 208
191 size = cmd.pitches[0] * cmd.height; 209 size = cmd.pitches[0] * cmd.height;
192 210
193 bo = tegra_bo_create(drm, size); 211 bo = tegra_bo_create(drm, size, 0);
194 if (IS_ERR(bo)) 212 if (IS_ERR(bo))
195 return PTR_ERR(bo); 213 return PTR_ERR(bo);
196 214
@@ -323,10 +341,10 @@ static void tegra_fbdev_free(struct tegra_fbdev *fbdev)
323 341
324static void tegra_fb_output_poll_changed(struct drm_device *drm) 342static void tegra_fb_output_poll_changed(struct drm_device *drm)
325{ 343{
326 struct host1x_drm *host1x = drm->dev_private; 344 struct tegra_drm *tegra = drm->dev_private;
327 345
328 if (host1x->fbdev) 346 if (tegra->fbdev)
329 drm_fb_helper_hotplug_event(&host1x->fbdev->base); 347 drm_fb_helper_hotplug_event(&tegra->fbdev->base);
330} 348}
331 349
332static const struct drm_mode_config_funcs tegra_drm_mode_funcs = { 350static const struct drm_mode_config_funcs tegra_drm_mode_funcs = {
@@ -336,7 +354,7 @@ static const struct drm_mode_config_funcs tegra_drm_mode_funcs = {
336 354
337int tegra_drm_fb_init(struct drm_device *drm) 355int tegra_drm_fb_init(struct drm_device *drm)
338{ 356{
339 struct host1x_drm *host1x = drm->dev_private; 357 struct tegra_drm *tegra = drm->dev_private;
340 struct tegra_fbdev *fbdev; 358 struct tegra_fbdev *fbdev;
341 359
342 drm->mode_config.min_width = 0; 360 drm->mode_config.min_width = 0;
@@ -352,16 +370,16 @@ int tegra_drm_fb_init(struct drm_device *drm)
352 if (IS_ERR(fbdev)) 370 if (IS_ERR(fbdev))
353 return PTR_ERR(fbdev); 371 return PTR_ERR(fbdev);
354 372
355 host1x->fbdev = fbdev; 373 tegra->fbdev = fbdev;
356 374
357 return 0; 375 return 0;
358} 376}
359 377
360void tegra_drm_fb_exit(struct drm_device *drm) 378void tegra_drm_fb_exit(struct drm_device *drm)
361{ 379{
362 struct host1x_drm *host1x = drm->dev_private; 380 struct tegra_drm *tegra = drm->dev_private;
363 381
364 tegra_fbdev_free(host1x->fbdev); 382 tegra_fbdev_free(tegra->fbdev);
365} 383}
366 384
367void tegra_fbdev_restore_mode(struct tegra_fbdev *fbdev) 385void tegra_fbdev_restore_mode(struct tegra_fbdev *fbdev)
diff --git a/drivers/gpu/host1x/drm/gem.c b/drivers/gpu/drm/tegra/gem.c
index 59623de4ee15..28a9cbc07ab9 100644
--- a/drivers/gpu/host1x/drm/gem.c
+++ b/drivers/gpu/drm/tegra/gem.c
@@ -18,25 +18,18 @@
18 * GNU General Public License for more details. 18 * GNU General Public License for more details.
19 */ 19 */
20 20
21#include <linux/mm.h> 21#include <drm/tegra_drm.h>
22#include <linux/slab.h>
23#include <linux/mutex.h>
24#include <linux/export.h>
25#include <linux/dma-mapping.h>
26
27#include <drm/drmP.h>
28#include <drm/drm.h>
29 22
30#include "gem.h" 23#include "gem.h"
31 24
32static inline struct tegra_bo *host1x_to_drm_bo(struct host1x_bo *bo) 25static inline struct tegra_bo *host1x_to_tegra_bo(struct host1x_bo *bo)
33{ 26{
34 return container_of(bo, struct tegra_bo, base); 27 return container_of(bo, struct tegra_bo, base);
35} 28}
36 29
37static void tegra_bo_put(struct host1x_bo *bo) 30static void tegra_bo_put(struct host1x_bo *bo)
38{ 31{
39 struct tegra_bo *obj = host1x_to_drm_bo(bo); 32 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
40 struct drm_device *drm = obj->gem.dev; 33 struct drm_device *drm = obj->gem.dev;
41 34
42 mutex_lock(&drm->struct_mutex); 35 mutex_lock(&drm->struct_mutex);
@@ -46,7 +39,7 @@ static void tegra_bo_put(struct host1x_bo *bo)
46 39
47static dma_addr_t tegra_bo_pin(struct host1x_bo *bo, struct sg_table **sgt) 40static dma_addr_t tegra_bo_pin(struct host1x_bo *bo, struct sg_table **sgt)
48{ 41{
49 struct tegra_bo *obj = host1x_to_drm_bo(bo); 42 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
50 43
51 return obj->paddr; 44 return obj->paddr;
52} 45}
@@ -57,7 +50,7 @@ static void tegra_bo_unpin(struct host1x_bo *bo, struct sg_table *sgt)
57 50
58static void *tegra_bo_mmap(struct host1x_bo *bo) 51static void *tegra_bo_mmap(struct host1x_bo *bo)
59{ 52{
60 struct tegra_bo *obj = host1x_to_drm_bo(bo); 53 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
61 54
62 return obj->vaddr; 55 return obj->vaddr;
63} 56}
@@ -68,7 +61,7 @@ static void tegra_bo_munmap(struct host1x_bo *bo, void *addr)
68 61
69static void *tegra_bo_kmap(struct host1x_bo *bo, unsigned int page) 62static void *tegra_bo_kmap(struct host1x_bo *bo, unsigned int page)
70{ 63{
71 struct tegra_bo *obj = host1x_to_drm_bo(bo); 64 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
72 65
73 return obj->vaddr + page * PAGE_SIZE; 66 return obj->vaddr + page * PAGE_SIZE;
74} 67}
@@ -80,7 +73,7 @@ static void tegra_bo_kunmap(struct host1x_bo *bo, unsigned int page,
80 73
81static struct host1x_bo *tegra_bo_get(struct host1x_bo *bo) 74static struct host1x_bo *tegra_bo_get(struct host1x_bo *bo)
82{ 75{
83 struct tegra_bo *obj = host1x_to_drm_bo(bo); 76 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
84 struct drm_device *drm = obj->gem.dev; 77 struct drm_device *drm = obj->gem.dev;
85 78
86 mutex_lock(&drm->struct_mutex); 79 mutex_lock(&drm->struct_mutex);
@@ -106,7 +99,8 @@ static void tegra_bo_destroy(struct drm_device *drm, struct tegra_bo *bo)
106 dma_free_writecombine(drm->dev, bo->gem.size, bo->vaddr, bo->paddr); 99 dma_free_writecombine(drm->dev, bo->gem.size, bo->vaddr, bo->paddr);
107} 100}
108 101
109struct tegra_bo *tegra_bo_create(struct drm_device *drm, unsigned int size) 102struct tegra_bo *tegra_bo_create(struct drm_device *drm, unsigned int size,
103 unsigned long flags)
110{ 104{
111 struct tegra_bo *bo; 105 struct tegra_bo *bo;
112 int err; 106 int err;
@@ -135,6 +129,12 @@ struct tegra_bo *tegra_bo_create(struct drm_device *drm, unsigned int size)
135 if (err) 129 if (err)
136 goto err_mmap; 130 goto err_mmap;
137 131
132 if (flags & DRM_TEGRA_GEM_CREATE_TILED)
133 bo->flags |= TEGRA_BO_TILED;
134
135 if (flags & DRM_TEGRA_GEM_CREATE_BOTTOM_UP)
136 bo->flags |= TEGRA_BO_BOTTOM_UP;
137
138 return bo; 138 return bo;
139 139
140err_mmap: 140err_mmap:
@@ -149,14 +149,15 @@ err_dma:
149} 149}
150 150
151struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file, 151struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file,
152 struct drm_device *drm, 152 struct drm_device *drm,
153 unsigned int size, 153 unsigned int size,
154 unsigned int *handle) 154 unsigned long flags,
155 unsigned int *handle)
155{ 156{
156 struct tegra_bo *bo; 157 struct tegra_bo *bo;
157 int ret; 158 int ret;
158 159
159 bo = tegra_bo_create(drm, size); 160 bo = tegra_bo_create(drm, size, flags);
160 if (IS_ERR(bo)) 161 if (IS_ERR(bo))
161 return bo; 162 return bo;
162 163
@@ -178,7 +179,6 @@ void tegra_bo_free_object(struct drm_gem_object *gem)
178 struct tegra_bo *bo = to_tegra_bo(gem); 179 struct tegra_bo *bo = to_tegra_bo(gem);
179 180
180 drm_gem_free_mmap_offset(gem); 181 drm_gem_free_mmap_offset(gem);
181
182 drm_gem_object_release(gem); 182 drm_gem_object_release(gem);
183 tegra_bo_destroy(gem->dev, bo); 183 tegra_bo_destroy(gem->dev, bo);
184 184
@@ -197,8 +197,8 @@ int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm,
197 if (args->size < args->pitch * args->height) 197 if (args->size < args->pitch * args->height)
198 args->size = args->pitch * args->height; 198 args->size = args->pitch * args->height;
199 199
200 bo = tegra_bo_create_with_handle(file, drm, args->size, 200 bo = tegra_bo_create_with_handle(file, drm, args->size, 0,
201 &args->handle); 201 &args->handle);
202 if (IS_ERR(bo)) 202 if (IS_ERR(bo))
203 return PTR_ERR(bo); 203 return PTR_ERR(bo);
204 204
diff --git a/drivers/gpu/host1x/drm/gem.h b/drivers/gpu/drm/tegra/gem.h
index 492533a2dacb..7674000bf47d 100644
--- a/drivers/gpu/host1x/drm/gem.h
+++ b/drivers/gpu/drm/tegra/gem.h
@@ -19,14 +19,18 @@
19#ifndef __HOST1X_GEM_H 19#ifndef __HOST1X_GEM_H
20#define __HOST1X_GEM_H 20#define __HOST1X_GEM_H
21 21
22#include <linux/host1x.h>
23
22#include <drm/drm.h> 24#include <drm/drm.h>
23#include <drm/drmP.h> 25#include <drm/drmP.h>
24 26
25#include "host1x_bo.h" 27#define TEGRA_BO_TILED (1 << 0)
28#define TEGRA_BO_BOTTOM_UP (1 << 1)
26 29
27struct tegra_bo { 30struct tegra_bo {
28 struct drm_gem_object gem; 31 struct drm_gem_object gem;
29 struct host1x_bo base; 32 struct host1x_bo base;
33 unsigned long flags;
30 dma_addr_t paddr; 34 dma_addr_t paddr;
31 void *vaddr; 35 void *vaddr;
32}; 36};
@@ -38,11 +42,13 @@ static inline struct tegra_bo *to_tegra_bo(struct drm_gem_object *gem)
38 42
39extern const struct host1x_bo_ops tegra_bo_ops; 43extern const struct host1x_bo_ops tegra_bo_ops;
40 44
41struct tegra_bo *tegra_bo_create(struct drm_device *drm, unsigned int size); 45struct tegra_bo *tegra_bo_create(struct drm_device *drm, unsigned int size,
46 unsigned long flags);
42struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file, 47struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file,
43 struct drm_device *drm, 48 struct drm_device *drm,
44 unsigned int size, 49 unsigned int size,
45 unsigned int *handle); 50 unsigned long flags,
51 unsigned int *handle);
46void tegra_bo_free_object(struct drm_gem_object *gem); 52void tegra_bo_free_object(struct drm_gem_object *gem);
47int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm, 53int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm,
48 struct drm_mode_create_dumb *args); 54 struct drm_mode_create_dumb *args);
diff --git a/drivers/gpu/drm/tegra/gr2d.c b/drivers/gpu/drm/tegra/gr2d.c
new file mode 100644
index 000000000000..7ec4259ffded
--- /dev/null
+++ b/drivers/gpu/drm/tegra/gr2d.c
@@ -0,0 +1,227 @@
1/*
2 * Copyright (c) 2012-2013, NVIDIA Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17#include <linux/clk.h>
18
19#include "drm.h"
20#include "gem.h"
21#include "gr2d.h"
22
23struct gr2d {
24 struct tegra_drm_client client;
25 struct host1x_channel *channel;
26 struct clk *clk;
27
28 DECLARE_BITMAP(addr_regs, GR2D_NUM_REGS);
29};
30
31static inline struct gr2d *to_gr2d(struct tegra_drm_client *client)
32{
33 return container_of(client, struct gr2d, client);
34}
35
36static int gr2d_init(struct host1x_client *client)
37{
38 struct tegra_drm_client *drm = host1x_to_drm_client(client);
39 struct tegra_drm *tegra = dev_get_drvdata(client->parent);
40 unsigned long flags = HOST1X_SYNCPT_HAS_BASE;
41 struct gr2d *gr2d = to_gr2d(drm);
42
43 gr2d->channel = host1x_channel_request(client->dev);
44 if (!gr2d->channel)
45 return -ENOMEM;
46
47 client->syncpts[0] = host1x_syncpt_request(client->dev, flags);
48 if (!client->syncpts[0]) {
49 host1x_channel_free(gr2d->channel);
50 return -ENOMEM;
51 }
52
53 return tegra_drm_register_client(tegra, drm);
54}
55
56static int gr2d_exit(struct host1x_client *client)
57{
58 struct tegra_drm_client *drm = host1x_to_drm_client(client);
59 struct tegra_drm *tegra = dev_get_drvdata(client->parent);
60 struct gr2d *gr2d = to_gr2d(drm);
61 int err;
62
63 err = tegra_drm_unregister_client(tegra, drm);
64 if (err < 0)
65 return err;
66
67 host1x_syncpt_free(client->syncpts[0]);
68 host1x_channel_free(gr2d->channel);
69
70 return 0;
71}
72
73static const struct host1x_client_ops gr2d_client_ops = {
74 .init = gr2d_init,
75 .exit = gr2d_exit,
76};
77
78static int gr2d_open_channel(struct tegra_drm_client *client,
79 struct tegra_drm_context *context)
80{
81 struct gr2d *gr2d = to_gr2d(client);
82
83 context->channel = host1x_channel_get(gr2d->channel);
84 if (!context->channel)
85 return -ENOMEM;
86
87 return 0;
88}
89
90static void gr2d_close_channel(struct tegra_drm_context *context)
91{
92 host1x_channel_put(context->channel);
93}
94
95static int gr2d_is_addr_reg(struct device *dev, u32 class, u32 offset)
96{
97 struct gr2d *gr2d = dev_get_drvdata(dev);
98
99 switch (class) {
100 case HOST1X_CLASS_HOST1X:
101 if (offset == 0x2b)
102 return 1;
103
104 break;
105
106 case HOST1X_CLASS_GR2D:
107 case HOST1X_CLASS_GR2D_SB:
108 if (offset >= GR2D_NUM_REGS)
109 break;
110
111 if (test_bit(offset, gr2d->addr_regs))
112 return 1;
113
114 break;
115 }
116
117 return 0;
118}
119
120static const struct tegra_drm_client_ops gr2d_ops = {
121 .open_channel = gr2d_open_channel,
122 .close_channel = gr2d_close_channel,
123 .is_addr_reg = gr2d_is_addr_reg,
124 .submit = tegra_drm_submit,
125};
126
127static const struct of_device_id gr2d_match[] = {
128 { .compatible = "nvidia,tegra30-gr2d" },
129 { .compatible = "nvidia,tegra20-gr2d" },
130 { },
131};
132
133static const u32 gr2d_addr_regs[] = {
134 GR2D_UA_BASE_ADDR,
135 GR2D_VA_BASE_ADDR,
136 GR2D_PAT_BASE_ADDR,
137 GR2D_DSTA_BASE_ADDR,
138 GR2D_DSTB_BASE_ADDR,
139 GR2D_DSTC_BASE_ADDR,
140 GR2D_SRCA_BASE_ADDR,
141 GR2D_SRCB_BASE_ADDR,
142 GR2D_SRC_BASE_ADDR_SB,
143 GR2D_DSTA_BASE_ADDR_SB,
144 GR2D_DSTB_BASE_ADDR_SB,
145 GR2D_UA_BASE_ADDR_SB,
146 GR2D_VA_BASE_ADDR_SB,
147};
148
149static int gr2d_probe(struct platform_device *pdev)
150{
151 struct device *dev = &pdev->dev;
152 struct host1x_syncpt **syncpts;
153 struct gr2d *gr2d;
154 unsigned int i;
155 int err;
156
157 gr2d = devm_kzalloc(dev, sizeof(*gr2d), GFP_KERNEL);
158 if (!gr2d)
159 return -ENOMEM;
160
161 syncpts = devm_kzalloc(dev, sizeof(*syncpts), GFP_KERNEL);
162 if (!syncpts)
163 return -ENOMEM;
164
165 gr2d->clk = devm_clk_get(dev, NULL);
166 if (IS_ERR(gr2d->clk)) {
167 dev_err(dev, "cannot get clock\n");
168 return PTR_ERR(gr2d->clk);
169 }
170
171 err = clk_prepare_enable(gr2d->clk);
172 if (err) {
173 dev_err(dev, "cannot turn on clock\n");
174 return err;
175 }
176
177 INIT_LIST_HEAD(&gr2d->client.base.list);
178 gr2d->client.base.ops = &gr2d_client_ops;
179 gr2d->client.base.dev = dev;
180 gr2d->client.base.class = HOST1X_CLASS_GR2D;
181 gr2d->client.base.syncpts = syncpts;
182 gr2d->client.base.num_syncpts = 1;
183
184 INIT_LIST_HEAD(&gr2d->client.list);
185 gr2d->client.ops = &gr2d_ops;
186
187 err = host1x_client_register(&gr2d->client.base);
188 if (err < 0) {
189 dev_err(dev, "failed to register host1x client: %d\n", err);
190 clk_disable_unprepare(gr2d->clk);
191 return err;
192 }
193
194 /* initialize address register map */
195 for (i = 0; i < ARRAY_SIZE(gr2d_addr_regs); i++)
196 set_bit(gr2d_addr_regs[i], gr2d->addr_regs);
197
198 platform_set_drvdata(pdev, gr2d);
199
200 return 0;
201}
202
203static int gr2d_remove(struct platform_device *pdev)
204{
205 struct gr2d *gr2d = platform_get_drvdata(pdev);
206 int err;
207
208 err = host1x_client_unregister(&gr2d->client.base);
209 if (err < 0) {
210 dev_err(&pdev->dev, "failed to unregister host1x client: %d\n",
211 err);
212 return err;
213 }
214
215 clk_disable_unprepare(gr2d->clk);
216
217 return 0;
218}
219
220struct platform_driver tegra_gr2d_driver = {
221 .driver = {
222 .name = "tegra-gr2d",
223 .of_match_table = gr2d_match,
224 },
225 .probe = gr2d_probe,
226 .remove = gr2d_remove,
227};
diff --git a/drivers/gpu/drm/tegra/gr2d.h b/drivers/gpu/drm/tegra/gr2d.h
new file mode 100644
index 000000000000..4d7304fb015e
--- /dev/null
+++ b/drivers/gpu/drm/tegra/gr2d.h
@@ -0,0 +1,28 @@
1/*
2 * Copyright (C) 2013 NVIDIA Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#ifndef TEGRA_GR2D_H
10#define TEGRA_GR2D_H
11
12#define GR2D_UA_BASE_ADDR 0x1a
13#define GR2D_VA_BASE_ADDR 0x1b
14#define GR2D_PAT_BASE_ADDR 0x26
15#define GR2D_DSTA_BASE_ADDR 0x2b
16#define GR2D_DSTB_BASE_ADDR 0x2c
17#define GR2D_DSTC_BASE_ADDR 0x2d
18#define GR2D_SRCA_BASE_ADDR 0x31
19#define GR2D_SRCB_BASE_ADDR 0x32
20#define GR2D_SRC_BASE_ADDR_SB 0x48
21#define GR2D_DSTA_BASE_ADDR_SB 0x49
22#define GR2D_DSTB_BASE_ADDR_SB 0x4a
23#define GR2D_UA_BASE_ADDR_SB 0x4b
24#define GR2D_VA_BASE_ADDR_SB 0x4c
25
26#define GR2D_NUM_REGS 0x4d
27
28#endif
diff --git a/drivers/gpu/drm/tegra/gr3d.c b/drivers/gpu/drm/tegra/gr3d.c
new file mode 100644
index 000000000000..4cec8f526af7
--- /dev/null
+++ b/drivers/gpu/drm/tegra/gr3d.c
@@ -0,0 +1,338 @@
1/*
2 * Copyright (C) 2013 Avionic Design GmbH
3 * Copyright (C) 2013 NVIDIA Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9
10#include <linux/clk.h>
11#include <linux/host1x.h>
12#include <linux/module.h>
13#include <linux/platform_device.h>
14#include <linux/tegra-powergate.h>
15
16#include "drm.h"
17#include "gem.h"
18#include "gr3d.h"
19
20struct gr3d {
21 struct tegra_drm_client client;
22 struct host1x_channel *channel;
23 struct clk *clk_secondary;
24 struct clk *clk;
25
26 DECLARE_BITMAP(addr_regs, GR3D_NUM_REGS);
27};
28
29static inline struct gr3d *to_gr3d(struct tegra_drm_client *client)
30{
31 return container_of(client, struct gr3d, client);
32}
33
34static int gr3d_init(struct host1x_client *client)
35{
36 struct tegra_drm_client *drm = host1x_to_drm_client(client);
37 struct tegra_drm *tegra = dev_get_drvdata(client->parent);
38 unsigned long flags = HOST1X_SYNCPT_HAS_BASE;
39 struct gr3d *gr3d = to_gr3d(drm);
40
41 gr3d->channel = host1x_channel_request(client->dev);
42 if (!gr3d->channel)
43 return -ENOMEM;
44
45 client->syncpts[0] = host1x_syncpt_request(client->dev, flags);
46 if (!client->syncpts[0]) {
47 host1x_channel_free(gr3d->channel);
48 return -ENOMEM;
49 }
50
51 return tegra_drm_register_client(tegra, drm);
52}
53
54static int gr3d_exit(struct host1x_client *client)
55{
56 struct tegra_drm_client *drm = host1x_to_drm_client(client);
57 struct tegra_drm *tegra = dev_get_drvdata(client->parent);
58 struct gr3d *gr3d = to_gr3d(drm);
59 int err;
60
61 err = tegra_drm_unregister_client(tegra, drm);
62 if (err < 0)
63 return err;
64
65 host1x_syncpt_free(client->syncpts[0]);
66 host1x_channel_free(gr3d->channel);
67
68 return 0;
69}
70
71static const struct host1x_client_ops gr3d_client_ops = {
72 .init = gr3d_init,
73 .exit = gr3d_exit,
74};
75
76static int gr3d_open_channel(struct tegra_drm_client *client,
77 struct tegra_drm_context *context)
78{
79 struct gr3d *gr3d = to_gr3d(client);
80
81 context->channel = host1x_channel_get(gr3d->channel);
82 if (!context->channel)
83 return -ENOMEM;
84
85 return 0;
86}
87
88static void gr3d_close_channel(struct tegra_drm_context *context)
89{
90 host1x_channel_put(context->channel);
91}
92
93static int gr3d_is_addr_reg(struct device *dev, u32 class, u32 offset)
94{
95 struct gr3d *gr3d = dev_get_drvdata(dev);
96
97 switch (class) {
98 case HOST1X_CLASS_HOST1X:
99 if (offset == 0x2b)
100 return 1;
101
102 break;
103
104 case HOST1X_CLASS_GR3D:
105 if (offset >= GR3D_NUM_REGS)
106 break;
107
108 if (test_bit(offset, gr3d->addr_regs))
109 return 1;
110
111 break;
112 }
113
114 return 0;
115}
116
117static const struct tegra_drm_client_ops gr3d_ops = {
118 .open_channel = gr3d_open_channel,
119 .close_channel = gr3d_close_channel,
120 .is_addr_reg = gr3d_is_addr_reg,
121 .submit = tegra_drm_submit,
122};
123
124static const struct of_device_id tegra_gr3d_match[] = {
125 { .compatible = "nvidia,tegra114-gr3d" },
126 { .compatible = "nvidia,tegra30-gr3d" },
127 { .compatible = "nvidia,tegra20-gr3d" },
128 { }
129};
130
131static const u32 gr3d_addr_regs[] = {
132 GR3D_IDX_ATTRIBUTE( 0),
133 GR3D_IDX_ATTRIBUTE( 1),
134 GR3D_IDX_ATTRIBUTE( 2),
135 GR3D_IDX_ATTRIBUTE( 3),
136 GR3D_IDX_ATTRIBUTE( 4),
137 GR3D_IDX_ATTRIBUTE( 5),
138 GR3D_IDX_ATTRIBUTE( 6),
139 GR3D_IDX_ATTRIBUTE( 7),
140 GR3D_IDX_ATTRIBUTE( 8),
141 GR3D_IDX_ATTRIBUTE( 9),
142 GR3D_IDX_ATTRIBUTE(10),
143 GR3D_IDX_ATTRIBUTE(11),
144 GR3D_IDX_ATTRIBUTE(12),
145 GR3D_IDX_ATTRIBUTE(13),
146 GR3D_IDX_ATTRIBUTE(14),
147 GR3D_IDX_ATTRIBUTE(15),
148 GR3D_IDX_INDEX_BASE,
149 GR3D_QR_ZTAG_ADDR,
150 GR3D_QR_CTAG_ADDR,
151 GR3D_QR_CZ_ADDR,
152 GR3D_TEX_TEX_ADDR( 0),
153 GR3D_TEX_TEX_ADDR( 1),
154 GR3D_TEX_TEX_ADDR( 2),
155 GR3D_TEX_TEX_ADDR( 3),
156 GR3D_TEX_TEX_ADDR( 4),
157 GR3D_TEX_TEX_ADDR( 5),
158 GR3D_TEX_TEX_ADDR( 6),
159 GR3D_TEX_TEX_ADDR( 7),
160 GR3D_TEX_TEX_ADDR( 8),
161 GR3D_TEX_TEX_ADDR( 9),
162 GR3D_TEX_TEX_ADDR(10),
163 GR3D_TEX_TEX_ADDR(11),
164 GR3D_TEX_TEX_ADDR(12),
165 GR3D_TEX_TEX_ADDR(13),
166 GR3D_TEX_TEX_ADDR(14),
167 GR3D_TEX_TEX_ADDR(15),
168 GR3D_DW_MEMORY_OUTPUT_ADDRESS,
169 GR3D_GLOBAL_SURFADDR( 0),
170 GR3D_GLOBAL_SURFADDR( 1),
171 GR3D_GLOBAL_SURFADDR( 2),
172 GR3D_GLOBAL_SURFADDR( 3),
173 GR3D_GLOBAL_SURFADDR( 4),
174 GR3D_GLOBAL_SURFADDR( 5),
175 GR3D_GLOBAL_SURFADDR( 6),
176 GR3D_GLOBAL_SURFADDR( 7),
177 GR3D_GLOBAL_SURFADDR( 8),
178 GR3D_GLOBAL_SURFADDR( 9),
179 GR3D_GLOBAL_SURFADDR(10),
180 GR3D_GLOBAL_SURFADDR(11),
181 GR3D_GLOBAL_SURFADDR(12),
182 GR3D_GLOBAL_SURFADDR(13),
183 GR3D_GLOBAL_SURFADDR(14),
184 GR3D_GLOBAL_SURFADDR(15),
185 GR3D_GLOBAL_SPILLSURFADDR,
186 GR3D_GLOBAL_SURFOVERADDR( 0),
187 GR3D_GLOBAL_SURFOVERADDR( 1),
188 GR3D_GLOBAL_SURFOVERADDR( 2),
189 GR3D_GLOBAL_SURFOVERADDR( 3),
190 GR3D_GLOBAL_SURFOVERADDR( 4),
191 GR3D_GLOBAL_SURFOVERADDR( 5),
192 GR3D_GLOBAL_SURFOVERADDR( 6),
193 GR3D_GLOBAL_SURFOVERADDR( 7),
194 GR3D_GLOBAL_SURFOVERADDR( 8),
195 GR3D_GLOBAL_SURFOVERADDR( 9),
196 GR3D_GLOBAL_SURFOVERADDR(10),
197 GR3D_GLOBAL_SURFOVERADDR(11),
198 GR3D_GLOBAL_SURFOVERADDR(12),
199 GR3D_GLOBAL_SURFOVERADDR(13),
200 GR3D_GLOBAL_SURFOVERADDR(14),
201 GR3D_GLOBAL_SURFOVERADDR(15),
202 GR3D_GLOBAL_SAMP01SURFADDR( 0),
203 GR3D_GLOBAL_SAMP01SURFADDR( 1),
204 GR3D_GLOBAL_SAMP01SURFADDR( 2),
205 GR3D_GLOBAL_SAMP01SURFADDR( 3),
206 GR3D_GLOBAL_SAMP01SURFADDR( 4),
207 GR3D_GLOBAL_SAMP01SURFADDR( 5),
208 GR3D_GLOBAL_SAMP01SURFADDR( 6),
209 GR3D_GLOBAL_SAMP01SURFADDR( 7),
210 GR3D_GLOBAL_SAMP01SURFADDR( 8),
211 GR3D_GLOBAL_SAMP01SURFADDR( 9),
212 GR3D_GLOBAL_SAMP01SURFADDR(10),
213 GR3D_GLOBAL_SAMP01SURFADDR(11),
214 GR3D_GLOBAL_SAMP01SURFADDR(12),
215 GR3D_GLOBAL_SAMP01SURFADDR(13),
216 GR3D_GLOBAL_SAMP01SURFADDR(14),
217 GR3D_GLOBAL_SAMP01SURFADDR(15),
218 GR3D_GLOBAL_SAMP23SURFADDR( 0),
219 GR3D_GLOBAL_SAMP23SURFADDR( 1),
220 GR3D_GLOBAL_SAMP23SURFADDR( 2),
221 GR3D_GLOBAL_SAMP23SURFADDR( 3),
222 GR3D_GLOBAL_SAMP23SURFADDR( 4),
223 GR3D_GLOBAL_SAMP23SURFADDR( 5),
224 GR3D_GLOBAL_SAMP23SURFADDR( 6),
225 GR3D_GLOBAL_SAMP23SURFADDR( 7),
226 GR3D_GLOBAL_SAMP23SURFADDR( 8),
227 GR3D_GLOBAL_SAMP23SURFADDR( 9),
228 GR3D_GLOBAL_SAMP23SURFADDR(10),
229 GR3D_GLOBAL_SAMP23SURFADDR(11),
230 GR3D_GLOBAL_SAMP23SURFADDR(12),
231 GR3D_GLOBAL_SAMP23SURFADDR(13),
232 GR3D_GLOBAL_SAMP23SURFADDR(14),
233 GR3D_GLOBAL_SAMP23SURFADDR(15),
234};
235
236static int gr3d_probe(struct platform_device *pdev)
237{
238 struct device_node *np = pdev->dev.of_node;
239 struct host1x_syncpt **syncpts;
240 struct gr3d *gr3d;
241 unsigned int i;
242 int err;
243
244 gr3d = devm_kzalloc(&pdev->dev, sizeof(*gr3d), GFP_KERNEL);
245 if (!gr3d)
246 return -ENOMEM;
247
248 syncpts = devm_kzalloc(&pdev->dev, sizeof(*syncpts), GFP_KERNEL);
249 if (!syncpts)
250 return -ENOMEM;
251
252 gr3d->clk = devm_clk_get(&pdev->dev, NULL);
253 if (IS_ERR(gr3d->clk)) {
254 dev_err(&pdev->dev, "cannot get clock\n");
255 return PTR_ERR(gr3d->clk);
256 }
257
258 if (of_device_is_compatible(np, "nvidia,tegra30-gr3d")) {
259 gr3d->clk_secondary = devm_clk_get(&pdev->dev, "3d2");
260 if (IS_ERR(gr3d->clk)) {
261 dev_err(&pdev->dev, "cannot get secondary clock\n");
262 return PTR_ERR(gr3d->clk);
263 }
264 }
265
266 err = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_3D, gr3d->clk);
267 if (err < 0) {
268 dev_err(&pdev->dev, "failed to power up 3D unit\n");
269 return err;
270 }
271
272 if (gr3d->clk_secondary) {
273 err = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_3D1,
274 gr3d->clk_secondary);
275 if (err < 0) {
276 dev_err(&pdev->dev,
277 "failed to power up secondary 3D unit\n");
278 return err;
279 }
280 }
281
282 INIT_LIST_HEAD(&gr3d->client.base.list);
283 gr3d->client.base.ops = &gr3d_client_ops;
284 gr3d->client.base.dev = &pdev->dev;
285 gr3d->client.base.class = HOST1X_CLASS_GR3D;
286 gr3d->client.base.syncpts = syncpts;
287 gr3d->client.base.num_syncpts = 1;
288
289 INIT_LIST_HEAD(&gr3d->client.list);
290 gr3d->client.ops = &gr3d_ops;
291
292 err = host1x_client_register(&gr3d->client.base);
293 if (err < 0) {
294 dev_err(&pdev->dev, "failed to register host1x client: %d\n",
295 err);
296 return err;
297 }
298
299 /* initialize address register map */
300 for (i = 0; i < ARRAY_SIZE(gr3d_addr_regs); i++)
301 set_bit(gr3d_addr_regs[i], gr3d->addr_regs);
302
303 platform_set_drvdata(pdev, gr3d);
304
305 return 0;
306}
307
308static int gr3d_remove(struct platform_device *pdev)
309{
310 struct gr3d *gr3d = platform_get_drvdata(pdev);
311 int err;
312
313 err = host1x_client_unregister(&gr3d->client.base);
314 if (err < 0) {
315 dev_err(&pdev->dev, "failed to unregister host1x client: %d\n",
316 err);
317 return err;
318 }
319
320 if (gr3d->clk_secondary) {
321 tegra_powergate_power_off(TEGRA_POWERGATE_3D1);
322 clk_disable_unprepare(gr3d->clk_secondary);
323 }
324
325 tegra_powergate_power_off(TEGRA_POWERGATE_3D);
326 clk_disable_unprepare(gr3d->clk);
327
328 return 0;
329}
330
331struct platform_driver tegra_gr3d_driver = {
332 .driver = {
333 .name = "tegra-gr3d",
334 .of_match_table = tegra_gr3d_match,
335 },
336 .probe = gr3d_probe,
337 .remove = gr3d_remove,
338};
diff --git a/drivers/gpu/drm/tegra/gr3d.h b/drivers/gpu/drm/tegra/gr3d.h
new file mode 100644
index 000000000000..0c30a1351c83
--- /dev/null
+++ b/drivers/gpu/drm/tegra/gr3d.h
@@ -0,0 +1,27 @@
1/*
2 * Copyright (C) 2013 NVIDIA Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#ifndef TEGRA_GR3D_H
10#define TEGRA_GR3D_H
11
12#define GR3D_IDX_ATTRIBUTE(x) (0x100 + (x) * 2)
13#define GR3D_IDX_INDEX_BASE 0x121
14#define GR3D_QR_ZTAG_ADDR 0x415
15#define GR3D_QR_CTAG_ADDR 0x417
16#define GR3D_QR_CZ_ADDR 0x419
17#define GR3D_TEX_TEX_ADDR(x) (0x710 + (x))
18#define GR3D_DW_MEMORY_OUTPUT_ADDRESS 0x904
19#define GR3D_GLOBAL_SURFADDR(x) (0xe00 + (x))
20#define GR3D_GLOBAL_SPILLSURFADDR 0xe2a
21#define GR3D_GLOBAL_SURFOVERADDR(x) (0xe30 + (x))
22#define GR3D_GLOBAL_SAMP01SURFADDR(x) (0xe50 + (x))
23#define GR3D_GLOBAL_SAMP23SURFADDR(x) (0xe60 + (x))
24
25#define GR3D_NUM_REGS 0xe88
26
27#endif
diff --git a/drivers/gpu/host1x/drm/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
index 644d95c7d489..0cd9bc2056e8 100644
--- a/drivers/gpu/host1x/drm/hdmi.c
+++ b/drivers/gpu/drm/tegra/hdmi.c
@@ -8,21 +8,33 @@
8 */ 8 */
9 9
10#include <linux/clk.h> 10#include <linux/clk.h>
11#include <linux/clk/tegra.h>
11#include <linux/debugfs.h> 12#include <linux/debugfs.h>
12#include <linux/gpio.h>
13#include <linux/hdmi.h> 13#include <linux/hdmi.h>
14#include <linux/module.h>
15#include <linux/of.h>
16#include <linux/platform_device.h>
17#include <linux/regulator/consumer.h> 14#include <linux/regulator/consumer.h>
18#include <linux/clk/tegra.h>
19
20#include <drm/drm_edid.h>
21 15
22#include "hdmi.h" 16#include "hdmi.h"
23#include "drm.h" 17#include "drm.h"
24#include "dc.h" 18#include "dc.h"
25#include "host1x_client.h" 19
20struct tmds_config {
21 unsigned int pclk;
22 u32 pll0;
23 u32 pll1;
24 u32 pe_current;
25 u32 drive_current;
26 u32 peak_current;
27};
28
29struct tegra_hdmi_config {
30 const struct tmds_config *tmds;
31 unsigned int num_tmds;
32
33 unsigned long fuse_override_offset;
34 unsigned long fuse_override_value;
35
36 bool has_sor_io_peak_current;
37};
26 38
27struct tegra_hdmi { 39struct tegra_hdmi {
28 struct host1x_client client; 40 struct host1x_client client;
@@ -38,6 +50,8 @@ struct tegra_hdmi {
38 struct clk *clk_parent; 50 struct clk *clk_parent;
39 struct clk *clk; 51 struct clk *clk;
40 52
53 const struct tegra_hdmi_config *config;
54
41 unsigned int audio_source; 55 unsigned int audio_source;
42 unsigned int audio_freq; 56 unsigned int audio_freq;
43 bool stereo; 57 bool stereo;
@@ -143,15 +157,7 @@ static const struct tegra_hdmi_audio_config tegra_hdmi_audio_192k[] = {
143 { 0, 0, 0, 0 }, 157 { 0, 0, 0, 0 },
144}; 158};
145 159
146struct tmds_config { 160static const struct tmds_config tegra20_tmds_config[] = {
147 unsigned int pclk;
148 u32 pll0;
149 u32 pll1;
150 u32 pe_current;
151 u32 drive_current;
152};
153
154static const struct tmds_config tegra2_tmds_config[] = {
155 { /* slow pixel clock modes */ 161 { /* slow pixel clock modes */
156 .pclk = 27000000, 162 .pclk = 27000000,
157 .pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) | 163 .pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) |
@@ -184,7 +190,7 @@ static const struct tmds_config tegra2_tmds_config[] = {
184 }, 190 },
185}; 191};
186 192
187static const struct tmds_config tegra3_tmds_config[] = { 193static const struct tmds_config tegra30_tmds_config[] = {
188 { /* 480p modes */ 194 { /* 480p modes */
189 .pclk = 27000000, 195 .pclk = 27000000,
190 .pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) | 196 .pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) |
@@ -230,6 +236,85 @@ static const struct tmds_config tegra3_tmds_config[] = {
230 }, 236 },
231}; 237};
232 238
239static const struct tmds_config tegra114_tmds_config[] = {
240 { /* 480p/576p / 25.2MHz/27MHz modes */
241 .pclk = 27000000,
242 .pll0 = SOR_PLL_ICHPMP(1) | SOR_PLL_BG_V17_S(3) |
243 SOR_PLL_VCOCAP(0) | SOR_PLL_RESISTORSEL,
244 .pll1 = SOR_PLL_LOADADJ(3) | SOR_PLL_TMDS_TERMADJ(0),
245 .pe_current = PE_CURRENT0(PE_CURRENT_0_mA_T114) |
246 PE_CURRENT1(PE_CURRENT_0_mA_T114) |
247 PE_CURRENT2(PE_CURRENT_0_mA_T114) |
248 PE_CURRENT3(PE_CURRENT_0_mA_T114),
249 .drive_current =
250 DRIVE_CURRENT_LANE0_T114(DRIVE_CURRENT_10_400_mA_T114) |
251 DRIVE_CURRENT_LANE1_T114(DRIVE_CURRENT_10_400_mA_T114) |
252 DRIVE_CURRENT_LANE2_T114(DRIVE_CURRENT_10_400_mA_T114) |
253 DRIVE_CURRENT_LANE3_T114(DRIVE_CURRENT_10_400_mA_T114),
254 .peak_current = PEAK_CURRENT_LANE0(PEAK_CURRENT_0_000_mA) |
255 PEAK_CURRENT_LANE1(PEAK_CURRENT_0_000_mA) |
256 PEAK_CURRENT_LANE2(PEAK_CURRENT_0_000_mA) |
257 PEAK_CURRENT_LANE3(PEAK_CURRENT_0_000_mA),
258 }, { /* 720p / 74.25MHz modes */
259 .pclk = 74250000,
260 .pll0 = SOR_PLL_ICHPMP(1) | SOR_PLL_BG_V17_S(3) |
261 SOR_PLL_VCOCAP(1) | SOR_PLL_RESISTORSEL,
262 .pll1 = SOR_PLL_PE_EN | SOR_PLL_LOADADJ(3) |
263 SOR_PLL_TMDS_TERMADJ(0),
264 .pe_current = PE_CURRENT0(PE_CURRENT_15_mA_T114) |
265 PE_CURRENT1(PE_CURRENT_15_mA_T114) |
266 PE_CURRENT2(PE_CURRENT_15_mA_T114) |
267 PE_CURRENT3(PE_CURRENT_15_mA_T114),
268 .drive_current =
269 DRIVE_CURRENT_LANE0_T114(DRIVE_CURRENT_10_400_mA_T114) |
270 DRIVE_CURRENT_LANE1_T114(DRIVE_CURRENT_10_400_mA_T114) |
271 DRIVE_CURRENT_LANE2_T114(DRIVE_CURRENT_10_400_mA_T114) |
272 DRIVE_CURRENT_LANE3_T114(DRIVE_CURRENT_10_400_mA_T114),
273 .peak_current = PEAK_CURRENT_LANE0(PEAK_CURRENT_0_000_mA) |
274 PEAK_CURRENT_LANE1(PEAK_CURRENT_0_000_mA) |
275 PEAK_CURRENT_LANE2(PEAK_CURRENT_0_000_mA) |
276 PEAK_CURRENT_LANE3(PEAK_CURRENT_0_000_mA),
277 }, { /* 1080p / 148.5MHz modes */
278 .pclk = 148500000,
279 .pll0 = SOR_PLL_ICHPMP(1) | SOR_PLL_BG_V17_S(3) |
280 SOR_PLL_VCOCAP(3) | SOR_PLL_RESISTORSEL,
281 .pll1 = SOR_PLL_PE_EN | SOR_PLL_LOADADJ(3) |
282 SOR_PLL_TMDS_TERMADJ(0),
283 .pe_current = PE_CURRENT0(PE_CURRENT_10_mA_T114) |
284 PE_CURRENT1(PE_CURRENT_10_mA_T114) |
285 PE_CURRENT2(PE_CURRENT_10_mA_T114) |
286 PE_CURRENT3(PE_CURRENT_10_mA_T114),
287 .drive_current =
288 DRIVE_CURRENT_LANE0_T114(DRIVE_CURRENT_12_400_mA_T114) |
289 DRIVE_CURRENT_LANE1_T114(DRIVE_CURRENT_12_400_mA_T114) |
290 DRIVE_CURRENT_LANE2_T114(DRIVE_CURRENT_12_400_mA_T114) |
291 DRIVE_CURRENT_LANE3_T114(DRIVE_CURRENT_12_400_mA_T114),
292 .peak_current = PEAK_CURRENT_LANE0(PEAK_CURRENT_0_000_mA) |
293 PEAK_CURRENT_LANE1(PEAK_CURRENT_0_000_mA) |
294 PEAK_CURRENT_LANE2(PEAK_CURRENT_0_000_mA) |
295 PEAK_CURRENT_LANE3(PEAK_CURRENT_0_000_mA),
296 }, { /* 225/297MHz modes */
297 .pclk = UINT_MAX,
298 .pll0 = SOR_PLL_ICHPMP(1) | SOR_PLL_BG_V17_S(3) |
299 SOR_PLL_VCOCAP(0xf) | SOR_PLL_RESISTORSEL,
300 .pll1 = SOR_PLL_LOADADJ(3) | SOR_PLL_TMDS_TERMADJ(7)
301 | SOR_PLL_TMDS_TERM_ENABLE,
302 .pe_current = PE_CURRENT0(PE_CURRENT_0_mA_T114) |
303 PE_CURRENT1(PE_CURRENT_0_mA_T114) |
304 PE_CURRENT2(PE_CURRENT_0_mA_T114) |
305 PE_CURRENT3(PE_CURRENT_0_mA_T114),
306 .drive_current =
307 DRIVE_CURRENT_LANE0_T114(DRIVE_CURRENT_25_200_mA_T114) |
308 DRIVE_CURRENT_LANE1_T114(DRIVE_CURRENT_25_200_mA_T114) |
309 DRIVE_CURRENT_LANE2_T114(DRIVE_CURRENT_25_200_mA_T114) |
310 DRIVE_CURRENT_LANE3_T114(DRIVE_CURRENT_19_200_mA_T114),
311 .peak_current = PEAK_CURRENT_LANE0(PEAK_CURRENT_3_000_mA) |
312 PEAK_CURRENT_LANE1(PEAK_CURRENT_3_000_mA) |
313 PEAK_CURRENT_LANE2(PEAK_CURRENT_3_000_mA) |
314 PEAK_CURRENT_LANE3(PEAK_CURRENT_0_800_mA),
315 },
316};
317
233static const struct tegra_hdmi_audio_config * 318static const struct tegra_hdmi_audio_config *
234tegra_hdmi_get_audio_config(unsigned int audio_freq, unsigned int pclk) 319tegra_hdmi_get_audio_config(unsigned int audio_freq, unsigned int pclk)
235{ 320{
@@ -511,7 +596,7 @@ static void tegra_hdmi_setup_audio_infoframe(struct tegra_hdmi *hdmi)
511 596
512 err = hdmi_audio_infoframe_init(&frame); 597 err = hdmi_audio_infoframe_init(&frame);
513 if (err < 0) { 598 if (err < 0) {
514 dev_err(hdmi->dev, "failed to initialize audio infoframe: %d\n", 599 dev_err(hdmi->dev, "failed to setup audio infoframe: %zd\n",
515 err); 600 err);
516 return; 601 return;
517 } 602 }
@@ -531,7 +616,7 @@ static void tegra_hdmi_setup_audio_infoframe(struct tegra_hdmi *hdmi)
531 * contain 7 bytes. Including the 3 byte header only the first 10 616 * contain 7 bytes. Including the 3 byte header only the first 10
532 * bytes can be programmed. 617 * bytes can be programmed.
533 */ 618 */
534 tegra_hdmi_write_infopack(hdmi, buffer, min(10, err)); 619 tegra_hdmi_write_infopack(hdmi, buffer, min_t(size_t, 10, err));
535 620
536 tegra_hdmi_writel(hdmi, INFOFRAME_CTRL_ENABLE, 621 tegra_hdmi_writel(hdmi, INFOFRAME_CTRL_ENABLE,
537 HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL); 622 HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL);
@@ -577,8 +662,28 @@ static void tegra_hdmi_setup_tmds(struct tegra_hdmi *hdmi,
577 tegra_hdmi_writel(hdmi, tmds->pll1, HDMI_NV_PDISP_SOR_PLL1); 662 tegra_hdmi_writel(hdmi, tmds->pll1, HDMI_NV_PDISP_SOR_PLL1);
578 tegra_hdmi_writel(hdmi, tmds->pe_current, HDMI_NV_PDISP_PE_CURRENT); 663 tegra_hdmi_writel(hdmi, tmds->pe_current, HDMI_NV_PDISP_PE_CURRENT);
579 664
580 value = tmds->drive_current | DRIVE_CURRENT_FUSE_OVERRIDE; 665 tegra_hdmi_writel(hdmi, tmds->drive_current,
581 tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_LANE_DRIVE_CURRENT); 666 HDMI_NV_PDISP_SOR_LANE_DRIVE_CURRENT);
667
668 value = tegra_hdmi_readl(hdmi, hdmi->config->fuse_override_offset);
669 value |= hdmi->config->fuse_override_value;
670 tegra_hdmi_writel(hdmi, value, hdmi->config->fuse_override_offset);
671
672 if (hdmi->config->has_sor_io_peak_current)
673 tegra_hdmi_writel(hdmi, tmds->peak_current,
674 HDMI_NV_PDISP_SOR_IO_PEAK_CURRENT);
675}
676
677static bool tegra_output_is_hdmi(struct tegra_output *output)
678{
679 struct edid *edid;
680
681 if (!output->connector.edid_blob_ptr)
682 return false;
683
684 edid = (struct edid *)output->connector.edid_blob_ptr->data;
685
686 return drm_detect_hdmi_monitor(edid);
582} 687}
583 688
584static int tegra_output_hdmi_enable(struct tegra_output *output) 689static int tegra_output_hdmi_enable(struct tegra_output *output)
@@ -589,23 +694,17 @@ static int tegra_output_hdmi_enable(struct tegra_output *output)
589 struct tegra_hdmi *hdmi = to_hdmi(output); 694 struct tegra_hdmi *hdmi = to_hdmi(output);
590 struct device_node *node = hdmi->dev->of_node; 695 struct device_node *node = hdmi->dev->of_node;
591 unsigned int pulse_start, div82, pclk; 696 unsigned int pulse_start, div82, pclk;
592 const struct tmds_config *tmds;
593 unsigned int num_tmds;
594 unsigned long value; 697 unsigned long value;
595 int retries = 1000; 698 int retries = 1000;
596 int err; 699 int err;
597 700
701 hdmi->dvi = !tegra_output_is_hdmi(output);
702
598 pclk = mode->clock * 1000; 703 pclk = mode->clock * 1000;
599 h_sync_width = mode->hsync_end - mode->hsync_start; 704 h_sync_width = mode->hsync_end - mode->hsync_start;
600 h_back_porch = mode->htotal - mode->hsync_end; 705 h_back_porch = mode->htotal - mode->hsync_end;
601 h_front_porch = mode->hsync_start - mode->hdisplay; 706 h_front_porch = mode->hsync_start - mode->hdisplay;
602 707
603 err = regulator_enable(hdmi->vdd);
604 if (err < 0) {
605 dev_err(hdmi->dev, "failed to enable VDD regulator: %d\n", err);
606 return err;
607 }
608
609 err = regulator_enable(hdmi->pll); 708 err = regulator_enable(hdmi->pll);
610 if (err < 0) { 709 if (err < 0) {
611 dev_err(hdmi->dev, "failed to enable PLL regulator: %d\n", err); 710 dev_err(hdmi->dev, "failed to enable PLL regulator: %d\n", err);
@@ -710,17 +809,9 @@ static int tegra_output_hdmi_enable(struct tegra_output *output)
710 tegra_hdmi_setup_stereo_infoframe(hdmi); 809 tegra_hdmi_setup_stereo_infoframe(hdmi);
711 810
712 /* TMDS CONFIG */ 811 /* TMDS CONFIG */
713 if (of_device_is_compatible(node, "nvidia,tegra30-hdmi")) { 812 for (i = 0; i < hdmi->config->num_tmds; i++) {
714 num_tmds = ARRAY_SIZE(tegra3_tmds_config); 813 if (pclk <= hdmi->config->tmds[i].pclk) {
715 tmds = tegra3_tmds_config; 814 tegra_hdmi_setup_tmds(hdmi, &hdmi->config->tmds[i]);
716 } else {
717 num_tmds = ARRAY_SIZE(tegra2_tmds_config);
718 tmds = tegra2_tmds_config;
719 }
720
721 for (i = 0; i < num_tmds; i++) {
722 if (pclk <= tmds[i].pclk) {
723 tegra_hdmi_setup_tmds(hdmi, &tmds[i]);
724 break; 815 break;
725 } 816 }
726 } 817 }
@@ -824,7 +915,6 @@ static int tegra_output_hdmi_disable(struct tegra_output *output)
824 tegra_periph_reset_assert(hdmi->clk); 915 tegra_periph_reset_assert(hdmi->clk);
825 clk_disable(hdmi->clk); 916 clk_disable(hdmi->clk);
826 regulator_disable(hdmi->pll); 917 regulator_disable(hdmi->pll);
827 regulator_disable(hdmi->vdd);
828 918
829 return 0; 919 return 0;
830} 920}
@@ -1055,6 +1145,7 @@ static int tegra_hdmi_show_regs(struct seq_file *s, void *data)
1055 DUMP_REG(HDMI_NV_PDISP_SOR_AUDIO_CNTRL0); 1145 DUMP_REG(HDMI_NV_PDISP_SOR_AUDIO_CNTRL0);
1056 DUMP_REG(HDMI_NV_PDISP_SOR_AUDIO_HDA_ELD_BUFWR); 1146 DUMP_REG(HDMI_NV_PDISP_SOR_AUDIO_HDA_ELD_BUFWR);
1057 DUMP_REG(HDMI_NV_PDISP_SOR_AUDIO_HDA_PRESENSE); 1147 DUMP_REG(HDMI_NV_PDISP_SOR_AUDIO_HDA_PRESENSE);
1148 DUMP_REG(HDMI_NV_PDISP_SOR_IO_PEAK_CURRENT);
1058 1149
1059#undef DUMP_REG 1150#undef DUMP_REG
1060 1151
@@ -1122,24 +1213,31 @@ static int tegra_hdmi_debugfs_exit(struct tegra_hdmi *hdmi)
1122 return 0; 1213 return 0;
1123} 1214}
1124 1215
1125static int tegra_hdmi_drm_init(struct host1x_client *client, 1216static int tegra_hdmi_init(struct host1x_client *client)
1126 struct drm_device *drm)
1127{ 1217{
1218 struct tegra_drm *tegra = dev_get_drvdata(client->parent);
1128 struct tegra_hdmi *hdmi = host1x_client_to_hdmi(client); 1219 struct tegra_hdmi *hdmi = host1x_client_to_hdmi(client);
1129 int err; 1220 int err;
1130 1221
1222 err = regulator_enable(hdmi->vdd);
1223 if (err < 0) {
1224 dev_err(client->dev, "failed to enable VDD regulator: %d\n",
1225 err);
1226 return err;
1227 }
1228
1131 hdmi->output.type = TEGRA_OUTPUT_HDMI; 1229 hdmi->output.type = TEGRA_OUTPUT_HDMI;
1132 hdmi->output.dev = client->dev; 1230 hdmi->output.dev = client->dev;
1133 hdmi->output.ops = &hdmi_ops; 1231 hdmi->output.ops = &hdmi_ops;
1134 1232
1135 err = tegra_output_init(drm, &hdmi->output); 1233 err = tegra_output_init(tegra->drm, &hdmi->output);
1136 if (err < 0) { 1234 if (err < 0) {
1137 dev_err(client->dev, "output setup failed: %d\n", err); 1235 dev_err(client->dev, "output setup failed: %d\n", err);
1138 return err; 1236 return err;
1139 } 1237 }
1140 1238
1141 if (IS_ENABLED(CONFIG_DEBUG_FS)) { 1239 if (IS_ENABLED(CONFIG_DEBUG_FS)) {
1142 err = tegra_hdmi_debugfs_init(hdmi, drm->primary); 1240 err = tegra_hdmi_debugfs_init(hdmi, tegra->drm->primary);
1143 if (err < 0) 1241 if (err < 0)
1144 dev_err(client->dev, "debugfs setup failed: %d\n", err); 1242 dev_err(client->dev, "debugfs setup failed: %d\n", err);
1145 } 1243 }
@@ -1147,7 +1245,7 @@ static int tegra_hdmi_drm_init(struct host1x_client *client,
1147 return 0; 1245 return 0;
1148} 1246}
1149 1247
1150static int tegra_hdmi_drm_exit(struct host1x_client *client) 1248static int tegra_hdmi_exit(struct host1x_client *client)
1151{ 1249{
1152 struct tegra_hdmi *hdmi = host1x_client_to_hdmi(client); 1250 struct tegra_hdmi *hdmi = host1x_client_to_hdmi(client);
1153 int err; 1251 int err;
@@ -1171,25 +1269,63 @@ static int tegra_hdmi_drm_exit(struct host1x_client *client)
1171 return err; 1269 return err;
1172 } 1270 }
1173 1271
1272 regulator_disable(hdmi->vdd);
1273
1174 return 0; 1274 return 0;
1175} 1275}
1176 1276
1177static const struct host1x_client_ops hdmi_client_ops = { 1277static const struct host1x_client_ops hdmi_client_ops = {
1178 .drm_init = tegra_hdmi_drm_init, 1278 .init = tegra_hdmi_init,
1179 .drm_exit = tegra_hdmi_drm_exit, 1279 .exit = tegra_hdmi_exit,
1280};
1281
1282static const struct tegra_hdmi_config tegra20_hdmi_config = {
1283 .tmds = tegra20_tmds_config,
1284 .num_tmds = ARRAY_SIZE(tegra20_tmds_config),
1285 .fuse_override_offset = HDMI_NV_PDISP_SOR_LANE_DRIVE_CURRENT,
1286 .fuse_override_value = 1 << 31,
1287 .has_sor_io_peak_current = false,
1288};
1289
1290static const struct tegra_hdmi_config tegra30_hdmi_config = {
1291 .tmds = tegra30_tmds_config,
1292 .num_tmds = ARRAY_SIZE(tegra30_tmds_config),
1293 .fuse_override_offset = HDMI_NV_PDISP_SOR_LANE_DRIVE_CURRENT,
1294 .fuse_override_value = 1 << 31,
1295 .has_sor_io_peak_current = false,
1296};
1297
1298static const struct tegra_hdmi_config tegra114_hdmi_config = {
1299 .tmds = tegra114_tmds_config,
1300 .num_tmds = ARRAY_SIZE(tegra114_tmds_config),
1301 .fuse_override_offset = HDMI_NV_PDISP_SOR_PAD_CTLS0,
1302 .fuse_override_value = 1 << 31,
1303 .has_sor_io_peak_current = true,
1304};
1305
1306static const struct of_device_id tegra_hdmi_of_match[] = {
1307 { .compatible = "nvidia,tegra114-hdmi", .data = &tegra114_hdmi_config },
1308 { .compatible = "nvidia,tegra30-hdmi", .data = &tegra30_hdmi_config },
1309 { .compatible = "nvidia,tegra20-hdmi", .data = &tegra20_hdmi_config },
1310 { },
1180}; 1311};
1181 1312
1182static int tegra_hdmi_probe(struct platform_device *pdev) 1313static int tegra_hdmi_probe(struct platform_device *pdev)
1183{ 1314{
1184 struct host1x_drm *host1x = host1x_get_drm_data(pdev->dev.parent); 1315 const struct of_device_id *match;
1185 struct tegra_hdmi *hdmi; 1316 struct tegra_hdmi *hdmi;
1186 struct resource *regs; 1317 struct resource *regs;
1187 int err; 1318 int err;
1188 1319
1320 match = of_match_node(tegra_hdmi_of_match, pdev->dev.of_node);
1321 if (!match)
1322 return -ENODEV;
1323
1189 hdmi = devm_kzalloc(&pdev->dev, sizeof(*hdmi), GFP_KERNEL); 1324 hdmi = devm_kzalloc(&pdev->dev, sizeof(*hdmi), GFP_KERNEL);
1190 if (!hdmi) 1325 if (!hdmi)
1191 return -ENOMEM; 1326 return -ENOMEM;
1192 1327
1328 hdmi->config = match->data;
1193 hdmi->dev = &pdev->dev; 1329 hdmi->dev = &pdev->dev;
1194 hdmi->audio_source = AUTO; 1330 hdmi->audio_source = AUTO;
1195 hdmi->audio_freq = 44100; 1331 hdmi->audio_freq = 44100;
@@ -1234,7 +1370,7 @@ static int tegra_hdmi_probe(struct platform_device *pdev)
1234 1370
1235 hdmi->output.dev = &pdev->dev; 1371 hdmi->output.dev = &pdev->dev;
1236 1372
1237 err = tegra_output_parse_dt(&hdmi->output); 1373 err = tegra_output_probe(&hdmi->output);
1238 if (err < 0) 1374 if (err < 0)
1239 return err; 1375 return err;
1240 1376
@@ -1252,11 +1388,11 @@ static int tegra_hdmi_probe(struct platform_device *pdev)
1252 1388
1253 hdmi->irq = err; 1389 hdmi->irq = err;
1254 1390
1255 hdmi->client.ops = &hdmi_client_ops;
1256 INIT_LIST_HEAD(&hdmi->client.list); 1391 INIT_LIST_HEAD(&hdmi->client.list);
1392 hdmi->client.ops = &hdmi_client_ops;
1257 hdmi->client.dev = &pdev->dev; 1393 hdmi->client.dev = &pdev->dev;
1258 1394
1259 err = host1x_register_client(host1x, &hdmi->client); 1395 err = host1x_client_register(&hdmi->client);
1260 if (err < 0) { 1396 if (err < 0) {
1261 dev_err(&pdev->dev, "failed to register host1x client: %d\n", 1397 dev_err(&pdev->dev, "failed to register host1x client: %d\n",
1262 err); 1398 err);
@@ -1270,29 +1406,28 @@ static int tegra_hdmi_probe(struct platform_device *pdev)
1270 1406
1271static int tegra_hdmi_remove(struct platform_device *pdev) 1407static int tegra_hdmi_remove(struct platform_device *pdev)
1272{ 1408{
1273 struct host1x_drm *host1x = host1x_get_drm_data(pdev->dev.parent);
1274 struct tegra_hdmi *hdmi = platform_get_drvdata(pdev); 1409 struct tegra_hdmi *hdmi = platform_get_drvdata(pdev);
1275 int err; 1410 int err;
1276 1411
1277 err = host1x_unregister_client(host1x, &hdmi->client); 1412 err = host1x_client_unregister(&hdmi->client);
1278 if (err < 0) { 1413 if (err < 0) {
1279 dev_err(&pdev->dev, "failed to unregister host1x client: %d\n", 1414 dev_err(&pdev->dev, "failed to unregister host1x client: %d\n",
1280 err); 1415 err);
1281 return err; 1416 return err;
1282 } 1417 }
1283 1418
1419 err = tegra_output_remove(&hdmi->output);
1420 if (err < 0) {
1421 dev_err(&pdev->dev, "failed to remove output: %d\n", err);
1422 return err;
1423 }
1424
1284 clk_unprepare(hdmi->clk_parent); 1425 clk_unprepare(hdmi->clk_parent);
1285 clk_unprepare(hdmi->clk); 1426 clk_unprepare(hdmi->clk);
1286 1427
1287 return 0; 1428 return 0;
1288} 1429}
1289 1430
1290static struct of_device_id tegra_hdmi_of_match[] = {
1291 { .compatible = "nvidia,tegra30-hdmi", },
1292 { .compatible = "nvidia,tegra20-hdmi", },
1293 { },
1294};
1295
1296struct platform_driver tegra_hdmi_driver = { 1431struct platform_driver tegra_hdmi_driver = {
1297 .driver = { 1432 .driver = {
1298 .name = "tegra-hdmi", 1433 .name = "tegra-hdmi",
diff --git a/drivers/gpu/host1x/drm/hdmi.h b/drivers/gpu/drm/tegra/hdmi.h
index 52ac36e08ccb..0aebc485f7fa 100644
--- a/drivers/gpu/host1x/drm/hdmi.h
+++ b/drivers/gpu/drm/tegra/hdmi.h
@@ -233,7 +233,10 @@
233#define DRIVE_CURRENT_LANE1(x) (((x) & 0x3f) << 8) 233#define DRIVE_CURRENT_LANE1(x) (((x) & 0x3f) << 8)
234#define DRIVE_CURRENT_LANE2(x) (((x) & 0x3f) << 16) 234#define DRIVE_CURRENT_LANE2(x) (((x) & 0x3f) << 16)
235#define DRIVE_CURRENT_LANE3(x) (((x) & 0x3f) << 24) 235#define DRIVE_CURRENT_LANE3(x) (((x) & 0x3f) << 24)
236#define DRIVE_CURRENT_FUSE_OVERRIDE (1 << 31) 236#define DRIVE_CURRENT_LANE0_T114(x) (((x) & 0x7f) << 0)
237#define DRIVE_CURRENT_LANE1_T114(x) (((x) & 0x7f) << 8)
238#define DRIVE_CURRENT_LANE2_T114(x) (((x) & 0x7f) << 16)
239#define DRIVE_CURRENT_LANE3_T114(x) (((x) & 0x7f) << 24)
237 240
238#define DRIVE_CURRENT_1_500_mA 0x00 241#define DRIVE_CURRENT_1_500_mA 0x00
239#define DRIVE_CURRENT_1_875_mA 0x01 242#define DRIVE_CURRENT_1_875_mA 0x01
@@ -299,6 +302,79 @@
299#define DRIVE_CURRENT_24_375_mA 0x3d 302#define DRIVE_CURRENT_24_375_mA 0x3d
300#define DRIVE_CURRENT_24_750_mA 0x3e 303#define DRIVE_CURRENT_24_750_mA 0x3e
301 304
305#define DRIVE_CURRENT_0_000_mA_T114 0x00
306#define DRIVE_CURRENT_0_400_mA_T114 0x01
307#define DRIVE_CURRENT_0_800_mA_T114 0x02
308#define DRIVE_CURRENT_1_200_mA_T114 0x03
309#define DRIVE_CURRENT_1_600_mA_T114 0x04
310#define DRIVE_CURRENT_2_000_mA_T114 0x05
311#define DRIVE_CURRENT_2_400_mA_T114 0x06
312#define DRIVE_CURRENT_2_800_mA_T114 0x07
313#define DRIVE_CURRENT_3_200_mA_T114 0x08
314#define DRIVE_CURRENT_3_600_mA_T114 0x09
315#define DRIVE_CURRENT_4_000_mA_T114 0x0a
316#define DRIVE_CURRENT_4_400_mA_T114 0x0b
317#define DRIVE_CURRENT_4_800_mA_T114 0x0c
318#define DRIVE_CURRENT_5_200_mA_T114 0x0d
319#define DRIVE_CURRENT_5_600_mA_T114 0x0e
320#define DRIVE_CURRENT_6_000_mA_T114 0x0f
321#define DRIVE_CURRENT_6_400_mA_T114 0x10
322#define DRIVE_CURRENT_6_800_mA_T114 0x11
323#define DRIVE_CURRENT_7_200_mA_T114 0x12
324#define DRIVE_CURRENT_7_600_mA_T114 0x13
325#define DRIVE_CURRENT_8_000_mA_T114 0x14
326#define DRIVE_CURRENT_8_400_mA_T114 0x15
327#define DRIVE_CURRENT_8_800_mA_T114 0x16
328#define DRIVE_CURRENT_9_200_mA_T114 0x17
329#define DRIVE_CURRENT_9_600_mA_T114 0x18
330#define DRIVE_CURRENT_10_000_mA_T114 0x19
331#define DRIVE_CURRENT_10_400_mA_T114 0x1a
332#define DRIVE_CURRENT_10_800_mA_T114 0x1b
333#define DRIVE_CURRENT_11_200_mA_T114 0x1c
334#define DRIVE_CURRENT_11_600_mA_T114 0x1d
335#define DRIVE_CURRENT_12_000_mA_T114 0x1e
336#define DRIVE_CURRENT_12_400_mA_T114 0x1f
337#define DRIVE_CURRENT_12_800_mA_T114 0x20
338#define DRIVE_CURRENT_13_200_mA_T114 0x21
339#define DRIVE_CURRENT_13_600_mA_T114 0x22
340#define DRIVE_CURRENT_14_000_mA_T114 0x23
341#define DRIVE_CURRENT_14_400_mA_T114 0x24
342#define DRIVE_CURRENT_14_800_mA_T114 0x25
343#define DRIVE_CURRENT_15_200_mA_T114 0x26
344#define DRIVE_CURRENT_15_600_mA_T114 0x27
345#define DRIVE_CURRENT_16_000_mA_T114 0x28
346#define DRIVE_CURRENT_16_400_mA_T114 0x29
347#define DRIVE_CURRENT_16_800_mA_T114 0x2a
348#define DRIVE_CURRENT_17_200_mA_T114 0x2b
349#define DRIVE_CURRENT_17_600_mA_T114 0x2c
350#define DRIVE_CURRENT_18_000_mA_T114 0x2d
351#define DRIVE_CURRENT_18_400_mA_T114 0x2e
352#define DRIVE_CURRENT_18_800_mA_T114 0x2f
353#define DRIVE_CURRENT_19_200_mA_T114 0x30
354#define DRIVE_CURRENT_19_600_mA_T114 0x31
355#define DRIVE_CURRENT_20_000_mA_T114 0x32
356#define DRIVE_CURRENT_20_400_mA_T114 0x33
357#define DRIVE_CURRENT_20_800_mA_T114 0x34
358#define DRIVE_CURRENT_21_200_mA_T114 0x35
359#define DRIVE_CURRENT_21_600_mA_T114 0x36
360#define DRIVE_CURRENT_22_000_mA_T114 0x37
361#define DRIVE_CURRENT_22_400_mA_T114 0x38
362#define DRIVE_CURRENT_22_800_mA_T114 0x39
363#define DRIVE_CURRENT_23_200_mA_T114 0x3a
364#define DRIVE_CURRENT_23_600_mA_T114 0x3b
365#define DRIVE_CURRENT_24_000_mA_T114 0x3c
366#define DRIVE_CURRENT_24_400_mA_T114 0x3d
367#define DRIVE_CURRENT_24_800_mA_T114 0x3e
368#define DRIVE_CURRENT_25_200_mA_T114 0x3f
369#define DRIVE_CURRENT_25_400_mA_T114 0x40
370#define DRIVE_CURRENT_25_800_mA_T114 0x41
371#define DRIVE_CURRENT_26_200_mA_T114 0x42
372#define DRIVE_CURRENT_26_600_mA_T114 0x43
373#define DRIVE_CURRENT_27_000_mA_T114 0x44
374#define DRIVE_CURRENT_27_400_mA_T114 0x45
375#define DRIVE_CURRENT_27_800_mA_T114 0x46
376#define DRIVE_CURRENT_28_200_mA_T114 0x47
377
302#define HDMI_NV_PDISP_AUDIO_DEBUG0 0x7f 378#define HDMI_NV_PDISP_AUDIO_DEBUG0 0x7f
303#define HDMI_NV_PDISP_AUDIO_DEBUG1 0x80 379#define HDMI_NV_PDISP_AUDIO_DEBUG1 0x80
304#define HDMI_NV_PDISP_AUDIO_DEBUG2 0x81 380#define HDMI_NV_PDISP_AUDIO_DEBUG2 0x81
@@ -358,6 +434,23 @@
358#define PE_CURRENT_7_0_mA 0xe 434#define PE_CURRENT_7_0_mA 0xe
359#define PE_CURRENT_7_5_mA 0xf 435#define PE_CURRENT_7_5_mA 0xf
360 436
437#define PE_CURRENT_0_mA_T114 0x0
438#define PE_CURRENT_1_mA_T114 0x1
439#define PE_CURRENT_2_mA_T114 0x2
440#define PE_CURRENT_3_mA_T114 0x3
441#define PE_CURRENT_4_mA_T114 0x4
442#define PE_CURRENT_5_mA_T114 0x5
443#define PE_CURRENT_6_mA_T114 0x6
444#define PE_CURRENT_7_mA_T114 0x7
445#define PE_CURRENT_8_mA_T114 0x8
446#define PE_CURRENT_9_mA_T114 0x9
447#define PE_CURRENT_10_mA_T114 0xa
448#define PE_CURRENT_11_mA_T114 0xb
449#define PE_CURRENT_12_mA_T114 0xc
450#define PE_CURRENT_13_mA_T114 0xd
451#define PE_CURRENT_14_mA_T114 0xe
452#define PE_CURRENT_15_mA_T114 0xf
453
361#define HDMI_NV_PDISP_KEY_CTRL 0x9a 454#define HDMI_NV_PDISP_KEY_CTRL 0x9a
362#define HDMI_NV_PDISP_KEY_DEBUG0 0x9b 455#define HDMI_NV_PDISP_KEY_DEBUG0 0x9b
363#define HDMI_NV_PDISP_KEY_DEBUG1 0x9c 456#define HDMI_NV_PDISP_KEY_DEBUG1 0x9c
@@ -383,4 +476,61 @@
383#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_1920 0xc5 476#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_1920 0xc5
384#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_DEFAULT 0xc5 477#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_DEFAULT 0xc5
385 478
479#define HDMI_NV_PDISP_SOR_IO_PEAK_CURRENT 0xd1
480#define PEAK_CURRENT_LANE0(x) (((x) & 0x7f) << 0)
481#define PEAK_CURRENT_LANE1(x) (((x) & 0x7f) << 8)
482#define PEAK_CURRENT_LANE2(x) (((x) & 0x7f) << 16)
483#define PEAK_CURRENT_LANE3(x) (((x) & 0x7f) << 24)
484
485#define PEAK_CURRENT_0_000_mA 0x00
486#define PEAK_CURRENT_0_200_mA 0x01
487#define PEAK_CURRENT_0_400_mA 0x02
488#define PEAK_CURRENT_0_600_mA 0x03
489#define PEAK_CURRENT_0_800_mA 0x04
490#define PEAK_CURRENT_1_000_mA 0x05
491#define PEAK_CURRENT_1_200_mA 0x06
492#define PEAK_CURRENT_1_400_mA 0x07
493#define PEAK_CURRENT_1_600_mA 0x08
494#define PEAK_CURRENT_1_800_mA 0x09
495#define PEAK_CURRENT_2_000_mA 0x0a
496#define PEAK_CURRENT_2_200_mA 0x0b
497#define PEAK_CURRENT_2_400_mA 0x0c
498#define PEAK_CURRENT_2_600_mA 0x0d
499#define PEAK_CURRENT_2_800_mA 0x0e
500#define PEAK_CURRENT_3_000_mA 0x0f
501#define PEAK_CURRENT_3_200_mA 0x10
502#define PEAK_CURRENT_3_400_mA 0x11
503#define PEAK_CURRENT_3_600_mA 0x12
504#define PEAK_CURRENT_3_800_mA 0x13
505#define PEAK_CURRENT_4_000_mA 0x14
506#define PEAK_CURRENT_4_200_mA 0x15
507#define PEAK_CURRENT_4_400_mA 0x16
508#define PEAK_CURRENT_4_600_mA 0x17
509#define PEAK_CURRENT_4_800_mA 0x18
510#define PEAK_CURRENT_5_000_mA 0x19
511#define PEAK_CURRENT_5_200_mA 0x1a
512#define PEAK_CURRENT_5_400_mA 0x1b
513#define PEAK_CURRENT_5_600_mA 0x1c
514#define PEAK_CURRENT_5_800_mA 0x1d
515#define PEAK_CURRENT_6_000_mA 0x1e
516#define PEAK_CURRENT_6_200_mA 0x1f
517#define PEAK_CURRENT_6_400_mA 0x20
518#define PEAK_CURRENT_6_600_mA 0x21
519#define PEAK_CURRENT_6_800_mA 0x22
520#define PEAK_CURRENT_7_000_mA 0x23
521#define PEAK_CURRENT_7_200_mA 0x24
522#define PEAK_CURRENT_7_400_mA 0x25
523#define PEAK_CURRENT_7_600_mA 0x26
524#define PEAK_CURRENT_7_800_mA 0x27
525#define PEAK_CURRENT_8_000_mA 0x28
526#define PEAK_CURRENT_8_200_mA 0x29
527#define PEAK_CURRENT_8_400_mA 0x2a
528#define PEAK_CURRENT_8_600_mA 0x2b
529#define PEAK_CURRENT_8_800_mA 0x2c
530#define PEAK_CURRENT_9_000_mA 0x2d
531#define PEAK_CURRENT_9_200_mA 0x2e
532#define PEAK_CURRENT_9_400_mA 0x2f
533
534#define HDMI_NV_PDISP_SOR_PAD_CTLS0 0xd2
535
386#endif /* TEGRA_HDMI_H */ 536#endif /* TEGRA_HDMI_H */
diff --git a/drivers/gpu/host1x/drm/output.c b/drivers/gpu/drm/tegra/output.c
index 137ae81ab80e..2cb0065e0578 100644
--- a/drivers/gpu/host1x/drm/output.c
+++ b/drivers/gpu/drm/tegra/output.c
@@ -7,9 +7,7 @@
7 * published by the Free Software Foundation. 7 * published by the Free Software Foundation.
8 */ 8 */
9 9
10#include <linux/module.h>
11#include <linux/of_gpio.h> 10#include <linux/of_gpio.h>
12#include <linux/i2c.h>
13 11
14#include "drm.h" 12#include "drm.h"
15 13
@@ -81,10 +79,16 @@ tegra_connector_detect(struct drm_connector *connector, bool force)
81 return status; 79 return status;
82} 80}
83 81
82static void drm_connector_clear(struct drm_connector *connector)
83{
84 memset(connector, 0, sizeof(*connector));
85}
86
84static void tegra_connector_destroy(struct drm_connector *connector) 87static void tegra_connector_destroy(struct drm_connector *connector)
85{ 88{
86 drm_sysfs_connector_remove(connector); 89 drm_sysfs_connector_remove(connector);
87 drm_connector_cleanup(connector); 90 drm_connector_cleanup(connector);
91 drm_connector_clear(connector);
88} 92}
89 93
90static const struct drm_connector_funcs connector_funcs = { 94static const struct drm_connector_funcs connector_funcs = {
@@ -94,9 +98,15 @@ static const struct drm_connector_funcs connector_funcs = {
94 .destroy = tegra_connector_destroy, 98 .destroy = tegra_connector_destroy,
95}; 99};
96 100
101static void drm_encoder_clear(struct drm_encoder *encoder)
102{
103 memset(encoder, 0, sizeof(*encoder));
104}
105
97static void tegra_encoder_destroy(struct drm_encoder *encoder) 106static void tegra_encoder_destroy(struct drm_encoder *encoder)
98{ 107{
99 drm_encoder_cleanup(encoder); 108 drm_encoder_cleanup(encoder);
109 drm_encoder_clear(encoder);
100} 110}
101 111
102static const struct drm_encoder_funcs encoder_funcs = { 112static const struct drm_encoder_funcs encoder_funcs = {
@@ -151,7 +161,7 @@ static irqreturn_t hpd_irq(int irq, void *data)
151 return IRQ_HANDLED; 161 return IRQ_HANDLED;
152} 162}
153 163
154int tegra_output_parse_dt(struct tegra_output *output) 164int tegra_output_probe(struct tegra_output *output)
155{ 165{
156 enum of_gpio_flags flags; 166 enum of_gpio_flags flags;
157 struct device_node *ddc; 167 struct device_node *ddc;
@@ -181,14 +191,6 @@ int tegra_output_parse_dt(struct tegra_output *output)
181 output->hpd_gpio = of_get_named_gpio_flags(output->of_node, 191 output->hpd_gpio = of_get_named_gpio_flags(output->of_node,
182 "nvidia,hpd-gpio", 0, 192 "nvidia,hpd-gpio", 0,
183 &flags); 193 &flags);
184
185 return 0;
186}
187
188int tegra_output_init(struct drm_device *drm, struct tegra_output *output)
189{
190 int connector, encoder, err;
191
192 if (gpio_is_valid(output->hpd_gpio)) { 194 if (gpio_is_valid(output->hpd_gpio)) {
193 unsigned long flags; 195 unsigned long flags;
194 196
@@ -202,7 +204,8 @@ int tegra_output_init(struct drm_device *drm, struct tegra_output *output)
202 err = gpio_to_irq(output->hpd_gpio); 204 err = gpio_to_irq(output->hpd_gpio);
203 if (err < 0) { 205 if (err < 0) {
204 dev_err(output->dev, "gpio_to_irq(): %d\n", err); 206 dev_err(output->dev, "gpio_to_irq(): %d\n", err);
205 goto free_hpd; 207 gpio_free(output->hpd_gpio);
208 return err;
206 } 209 }
207 210
208 output->hpd_irq = err; 211 output->hpd_irq = err;
@@ -215,12 +218,33 @@ int tegra_output_init(struct drm_device *drm, struct tegra_output *output)
215 if (err < 0) { 218 if (err < 0) {
216 dev_err(output->dev, "failed to request IRQ#%u: %d\n", 219 dev_err(output->dev, "failed to request IRQ#%u: %d\n",
217 output->hpd_irq, err); 220 output->hpd_irq, err);
218 goto free_hpd; 221 gpio_free(output->hpd_gpio);
222 return err;
219 } 223 }
220 224
221 output->connector.polled = DRM_CONNECTOR_POLL_HPD; 225 output->connector.polled = DRM_CONNECTOR_POLL_HPD;
222 } 226 }
223 227
228 return 0;
229}
230
231int tegra_output_remove(struct tegra_output *output)
232{
233 if (gpio_is_valid(output->hpd_gpio)) {
234 free_irq(output->hpd_irq, output);
235 gpio_free(output->hpd_gpio);
236 }
237
238 if (output->ddc)
239 put_device(&output->ddc->dev);
240
241 return 0;
242}
243
244int tegra_output_init(struct drm_device *drm, struct tegra_output *output)
245{
246 int connector, encoder;
247
224 switch (output->type) { 248 switch (output->type) {
225 case TEGRA_OUTPUT_RGB: 249 case TEGRA_OUTPUT_RGB:
226 connector = DRM_MODE_CONNECTOR_LVDS; 250 connector = DRM_MODE_CONNECTOR_LVDS;
@@ -241,6 +265,7 @@ int tegra_output_init(struct drm_device *drm, struct tegra_output *output)
241 drm_connector_init(drm, &output->connector, &connector_funcs, 265 drm_connector_init(drm, &output->connector, &connector_funcs,
242 connector); 266 connector);
243 drm_connector_helper_add(&output->connector, &connector_helper_funcs); 267 drm_connector_helper_add(&output->connector, &connector_helper_funcs);
268 output->connector.dpms = DRM_MODE_DPMS_OFF;
244 269
245 drm_encoder_init(drm, &output->encoder, &encoder_funcs, encoder); 270 drm_encoder_init(drm, &output->encoder, &encoder_funcs, encoder);
246 drm_encoder_helper_add(&output->encoder, &encoder_helper_funcs); 271 drm_encoder_helper_add(&output->encoder, &encoder_helper_funcs);
@@ -251,22 +276,9 @@ int tegra_output_init(struct drm_device *drm, struct tegra_output *output)
251 output->encoder.possible_crtcs = 0x3; 276 output->encoder.possible_crtcs = 0x3;
252 277
253 return 0; 278 return 0;
254
255free_hpd:
256 gpio_free(output->hpd_gpio);
257
258 return err;
259} 279}
260 280
261int tegra_output_exit(struct tegra_output *output) 281int tegra_output_exit(struct tegra_output *output)
262{ 282{
263 if (gpio_is_valid(output->hpd_gpio)) {
264 free_irq(output->hpd_irq, output);
265 gpio_free(output->hpd_gpio);
266 }
267
268 if (output->ddc)
269 put_device(&output->ddc->dev);
270
271 return 0; 283 return 0;
272} 284}
diff --git a/drivers/gpu/host1x/drm/rgb.c b/drivers/gpu/drm/tegra/rgb.c
index 5aa66ef7a946..ba47ca4fb880 100644
--- a/drivers/gpu/host1x/drm/rgb.c
+++ b/drivers/gpu/drm/tegra/rgb.c
@@ -8,9 +8,6 @@
8 */ 8 */
9 9
10#include <linux/clk.h> 10#include <linux/clk.h>
11#include <linux/module.h>
12#include <linux/of.h>
13#include <linux/platform_device.h>
14 11
15#include "drm.h" 12#include "drm.h"
16#include "dc.h" 13#include "dc.h"
@@ -150,7 +147,7 @@ int tegra_dc_rgb_probe(struct tegra_dc *dc)
150 rgb->output.dev = dc->dev; 147 rgb->output.dev = dc->dev;
151 rgb->output.of_node = np; 148 rgb->output.of_node = np;
152 149
153 err = tegra_output_parse_dt(&rgb->output); 150 err = tegra_output_probe(&rgb->output);
154 if (err < 0) 151 if (err < 0)
155 return err; 152 return err;
156 153
@@ -177,6 +174,20 @@ int tegra_dc_rgb_probe(struct tegra_dc *dc)
177 return 0; 174 return 0;
178} 175}
179 176
177int tegra_dc_rgb_remove(struct tegra_dc *dc)
178{
179 int err;
180
181 if (!dc->rgb)
182 return 0;
183
184 err = tegra_output_remove(dc->rgb);
185 if (err < 0)
186 return err;
187
188 return 0;
189}
190
180int tegra_dc_rgb_init(struct drm_device *drm, struct tegra_dc *dc) 191int tegra_dc_rgb_init(struct drm_device *drm, struct tegra_dc *dc)
181{ 192{
182 struct tegra_rgb *rgb = to_rgb(dc->rgb); 193 struct tegra_rgb *rgb = to_rgb(dc->rgb);
diff --git a/drivers/gpu/drm/tilcdc/Kconfig b/drivers/gpu/drm/tilcdc/Kconfig
index 7a4d10106906..7c3ef79fcb37 100644
--- a/drivers/gpu/drm/tilcdc/Kconfig
+++ b/drivers/gpu/drm/tilcdc/Kconfig
@@ -2,6 +2,7 @@ config DRM_TILCDC
2 tristate "DRM Support for TI LCDC Display Controller" 2 tristate "DRM Support for TI LCDC Display Controller"
3 depends on DRM && OF && ARM 3 depends on DRM && OF && ARM
4 select DRM_KMS_HELPER 4 select DRM_KMS_HELPER
5 select DRM_KMS_FB_HELPER
5 select DRM_KMS_CMA_HELPER 6 select DRM_KMS_CMA_HELPER
6 select DRM_GEM_CMA_HELPER 7 select DRM_GEM_CMA_HELPER
7 select VIDEOMODE_HELPERS 8 select VIDEOMODE_HELPERS
diff --git a/drivers/gpu/drm/ttm/Makefile b/drivers/gpu/drm/ttm/Makefile
index b2b33dde2afb..b433b9f040c9 100644
--- a/drivers/gpu/drm/ttm/Makefile
+++ b/drivers/gpu/drm/ttm/Makefile
@@ -5,10 +5,6 @@ ccflags-y := -Iinclude/drm
5ttm-y := ttm_agp_backend.o ttm_memory.o ttm_tt.o ttm_bo.o \ 5ttm-y := ttm_agp_backend.o ttm_memory.o ttm_tt.o ttm_bo.o \
6 ttm_bo_util.o ttm_bo_vm.o ttm_module.o \ 6 ttm_bo_util.o ttm_bo_vm.o ttm_module.o \
7 ttm_object.o ttm_lock.o ttm_execbuf_util.o ttm_page_alloc.o \ 7 ttm_object.o ttm_lock.o ttm_execbuf_util.o ttm_page_alloc.o \
8 ttm_bo_manager.o 8 ttm_bo_manager.o ttm_page_alloc_dma.o
9
10ifeq ($(CONFIG_SWIOTLB),y)
11ttm-y += ttm_page_alloc_dma.o
12endif
13 9
14obj-$(CONFIG_DRM_TTM) += ttm.o 10obj-$(CONFIG_DRM_TTM) += ttm.o
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index f1a857ec1021..8d5a646ebe6a 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -429,8 +429,20 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
429 sync_obj = driver->sync_obj_ref(bo->sync_obj); 429 sync_obj = driver->sync_obj_ref(bo->sync_obj);
430 spin_unlock(&bdev->fence_lock); 430 spin_unlock(&bdev->fence_lock);
431 431
432 if (!ret) 432 if (!ret) {
433
434 /*
435 * Make NO_EVICT bos immediately available to
436 * shrinkers, now that they are queued for
437 * destruction.
438 */
439 if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) {
440 bo->mem.placement &= ~TTM_PL_FLAG_NO_EVICT;
441 ttm_bo_add_to_lru(bo);
442 }
443
433 ww_mutex_unlock(&bo->resv->lock); 444 ww_mutex_unlock(&bo->resv->lock);
445 }
434 446
435 kref_get(&bo->list_kref); 447 kref_get(&bo->list_kref);
436 list_add_tail(&bo->ddestroy, &bdev->ddestroy); 448 list_add_tail(&bo->ddestroy, &bdev->ddestroy);
@@ -986,24 +998,32 @@ out_unlock:
986 return ret; 998 return ret;
987} 999}
988 1000
989static int ttm_bo_mem_compat(struct ttm_placement *placement, 1001static bool ttm_bo_mem_compat(struct ttm_placement *placement,
990 struct ttm_mem_reg *mem) 1002 struct ttm_mem_reg *mem,
1003 uint32_t *new_flags)
991{ 1004{
992 int i; 1005 int i;
993 1006
994 if (mem->mm_node && placement->lpfn != 0 && 1007 if (mem->mm_node && placement->lpfn != 0 &&
995 (mem->start < placement->fpfn || 1008 (mem->start < placement->fpfn ||
996 mem->start + mem->num_pages > placement->lpfn)) 1009 mem->start + mem->num_pages > placement->lpfn))
997 return -1; 1010 return false;
998 1011
999 for (i = 0; i < placement->num_placement; i++) { 1012 for (i = 0; i < placement->num_placement; i++) {
1000 if ((placement->placement[i] & mem->placement & 1013 *new_flags = placement->placement[i];
1001 TTM_PL_MASK_CACHING) && 1014 if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) &&
1002 (placement->placement[i] & mem->placement & 1015 (*new_flags & mem->placement & TTM_PL_MASK_MEM))
1003 TTM_PL_MASK_MEM)) 1016 return true;
1004 return i;
1005 } 1017 }
1006 return -1; 1018
1019 for (i = 0; i < placement->num_busy_placement; i++) {
1020 *new_flags = placement->busy_placement[i];
1021 if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) &&
1022 (*new_flags & mem->placement & TTM_PL_MASK_MEM))
1023 return true;
1024 }
1025
1026 return false;
1007} 1027}
1008 1028
1009int ttm_bo_validate(struct ttm_buffer_object *bo, 1029int ttm_bo_validate(struct ttm_buffer_object *bo,
@@ -1012,6 +1032,7 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
1012 bool no_wait_gpu) 1032 bool no_wait_gpu)
1013{ 1033{
1014 int ret; 1034 int ret;
1035 uint32_t new_flags;
1015 1036
1016 lockdep_assert_held(&bo->resv->lock.base); 1037 lockdep_assert_held(&bo->resv->lock.base);
1017 /* Check that range is valid */ 1038 /* Check that range is valid */
@@ -1022,8 +1043,7 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
1022 /* 1043 /*
1023 * Check whether we need to move buffer. 1044 * Check whether we need to move buffer.
1024 */ 1045 */
1025 ret = ttm_bo_mem_compat(placement, &bo->mem); 1046 if (!ttm_bo_mem_compat(placement, &bo->mem, &new_flags)) {
1026 if (ret < 0) {
1027 ret = ttm_bo_move_buffer(bo, placement, interruptible, 1047 ret = ttm_bo_move_buffer(bo, placement, interruptible,
1028 no_wait_gpu); 1048 no_wait_gpu);
1029 if (ret) 1049 if (ret)
@@ -1033,7 +1053,7 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
1033 * Use the access and other non-mapping-related flag bits from 1053 * Use the access and other non-mapping-related flag bits from
1034 * the compatible memory placement flags to the active flags 1054 * the compatible memory placement flags to the active flags
1035 */ 1055 */
1036 ttm_flag_masked(&bo->mem.placement, placement->placement[ret], 1056 ttm_flag_masked(&bo->mem.placement, new_flags,
1037 ~TTM_PL_MASK_MEMTYPE); 1057 ~TTM_PL_MASK_MEMTYPE);
1038 } 1058 }
1039 /* 1059 /*
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 7cc904d3a4d1..4834c463c38b 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -343,19 +343,25 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
343 if (ret) 343 if (ret)
344 goto out; 344 goto out;
345 345
346 /*
347 * Single TTM move. NOP.
348 */
346 if (old_iomap == NULL && new_iomap == NULL) 349 if (old_iomap == NULL && new_iomap == NULL)
347 goto out2; 350 goto out2;
351
352 /*
353 * Move nonexistent data. NOP.
354 */
348 if (old_iomap == NULL && ttm == NULL) 355 if (old_iomap == NULL && ttm == NULL)
349 goto out2; 356 goto out2;
350 357
351 if (ttm->state == tt_unpopulated) { 358 /*
359 * TTM might be null for moves within the same region.
360 */
361 if (ttm && ttm->state == tt_unpopulated) {
352 ret = ttm->bdev->driver->ttm_tt_populate(ttm); 362 ret = ttm->bdev->driver->ttm_tt_populate(ttm);
353 if (ret) { 363 if (ret)
354 /* if we fail here don't nuke the mm node
355 * as the bo still owns it */
356 old_copy.mm_node = NULL;
357 goto out1; 364 goto out1;
358 }
359 } 365 }
360 366
361 add = 0; 367 add = 0;
@@ -381,11 +387,8 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
381 prot); 387 prot);
382 } else 388 } else
383 ret = ttm_copy_io_page(new_iomap, old_iomap, page); 389 ret = ttm_copy_io_page(new_iomap, old_iomap, page);
384 if (ret) { 390 if (ret)
385 /* failing here, means keep old copy as-is */
386 old_copy.mm_node = NULL;
387 goto out1; 391 goto out1;
388 }
389 } 392 }
390 mb(); 393 mb();
391out2: 394out2:
@@ -403,7 +406,12 @@ out1:
403 ttm_mem_reg_iounmap(bdev, old_mem, new_iomap); 406 ttm_mem_reg_iounmap(bdev, old_mem, new_iomap);
404out: 407out:
405 ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap); 408 ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
406 ttm_bo_mem_put(bo, &old_copy); 409
410 /*
411 * On error, keep the mm node!
412 */
413 if (!ret)
414 ttm_bo_mem_put(bo, &old_copy);
407 return ret; 415 return ret;
408} 416}
409EXPORT_SYMBOL(ttm_bo_move_memcpy); 417EXPORT_SYMBOL(ttm_bo_move_memcpy);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 1006c15445e9..ac617f3ecd0c 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -41,6 +41,51 @@
41 41
42#define TTM_BO_VM_NUM_PREFAULT 16 42#define TTM_BO_VM_NUM_PREFAULT 16
43 43
44static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
45 struct vm_area_struct *vma,
46 struct vm_fault *vmf)
47{
48 struct ttm_bo_device *bdev = bo->bdev;
49 int ret = 0;
50
51 spin_lock(&bdev->fence_lock);
52 if (likely(!test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)))
53 goto out_unlock;
54
55 /*
56 * Quick non-stalling check for idle.
57 */
58 ret = ttm_bo_wait(bo, false, false, true);
59 if (likely(ret == 0))
60 goto out_unlock;
61
62 /*
63 * If possible, avoid waiting for GPU with mmap_sem
64 * held.
65 */
66 if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) {
67 ret = VM_FAULT_RETRY;
68 if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
69 goto out_unlock;
70
71 up_read(&vma->vm_mm->mmap_sem);
72 (void) ttm_bo_wait(bo, false, true, false);
73 goto out_unlock;
74 }
75
76 /*
77 * Ordinary wait.
78 */
79 ret = ttm_bo_wait(bo, false, true, false);
80 if (unlikely(ret != 0))
81 ret = (ret != -ERESTARTSYS) ? VM_FAULT_SIGBUS :
82 VM_FAULT_NOPAGE;
83
84out_unlock:
85 spin_unlock(&bdev->fence_lock);
86 return ret;
87}
88
44static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 89static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
45{ 90{
46 struct ttm_buffer_object *bo = (struct ttm_buffer_object *) 91 struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
@@ -57,6 +102,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
57 int retval = VM_FAULT_NOPAGE; 102 int retval = VM_FAULT_NOPAGE;
58 struct ttm_mem_type_manager *man = 103 struct ttm_mem_type_manager *man =
59 &bdev->man[bo->mem.mem_type]; 104 &bdev->man[bo->mem.mem_type];
105 struct vm_area_struct cvma;
60 106
61 /* 107 /*
62 * Work around locking order reversal in fault / nopfn 108 * Work around locking order reversal in fault / nopfn
@@ -91,18 +137,11 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
91 * Wait for buffer data in transit, due to a pipelined 137 * Wait for buffer data in transit, due to a pipelined
92 * move. 138 * move.
93 */ 139 */
94 140 ret = ttm_bo_vm_fault_idle(bo, vma, vmf);
95 spin_lock(&bdev->fence_lock); 141 if (unlikely(ret != 0)) {
96 if (test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)) { 142 retval = ret;
97 ret = ttm_bo_wait(bo, false, true, false); 143 goto out_unlock;
98 spin_unlock(&bdev->fence_lock); 144 }
99 if (unlikely(ret != 0)) {
100 retval = (ret != -ERESTARTSYS) ?
101 VM_FAULT_SIGBUS : VM_FAULT_NOPAGE;
102 goto out_unlock;
103 }
104 } else
105 spin_unlock(&bdev->fence_lock);
106 145
107 ret = ttm_mem_io_lock(man, true); 146 ret = ttm_mem_io_lock(man, true);
108 if (unlikely(ret != 0)) { 147 if (unlikely(ret != 0)) {
@@ -126,26 +165,21 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
126 } 165 }
127 166
128 /* 167 /*
129 * Strictly, we're not allowed to modify vma->vm_page_prot here, 168 * Make a local vma copy to modify the page_prot member
130 * since the mmap_sem is only held in read mode. However, we 169 * and vm_flags if necessary. The vma parameter is protected
131 * modify only the caching bits of vma->vm_page_prot and 170 * by mmap_sem in write mode.
132 * consider those bits protected by
133 * the bo->mutex, as we should be the only writers.
134 * There shouldn't really be any readers of these bits except
135 * within vm_insert_mixed()? fork?
136 *
137 * TODO: Add a list of vmas to the bo, and change the
138 * vma->vm_page_prot when the object changes caching policy, with
139 * the correct locks held.
140 */ 171 */
172 cvma = *vma;
173 cvma.vm_page_prot = vm_get_page_prot(cvma.vm_flags);
174
141 if (bo->mem.bus.is_iomem) { 175 if (bo->mem.bus.is_iomem) {
142 vma->vm_page_prot = ttm_io_prot(bo->mem.placement, 176 cvma.vm_page_prot = ttm_io_prot(bo->mem.placement,
143 vma->vm_page_prot); 177 cvma.vm_page_prot);
144 } else { 178 } else {
145 ttm = bo->ttm; 179 ttm = bo->ttm;
146 vma->vm_page_prot = (bo->mem.placement & TTM_PL_FLAG_CACHED) ? 180 if (!(bo->mem.placement & TTM_PL_FLAG_CACHED))
147 vm_get_page_prot(vma->vm_flags) : 181 cvma.vm_page_prot = ttm_io_prot(bo->mem.placement,
148 ttm_io_prot(bo->mem.placement, vma->vm_page_prot); 182 cvma.vm_page_prot);
149 183
150 /* Allocate all page at once, most common usage */ 184 /* Allocate all page at once, most common usage */
151 if (ttm->bdev->driver->ttm_tt_populate(ttm)) { 185 if (ttm->bdev->driver->ttm_tt_populate(ttm)) {
@@ -172,7 +206,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
172 pfn = page_to_pfn(page); 206 pfn = page_to_pfn(page);
173 } 207 }
174 208
175 ret = vm_insert_mixed(vma, address, pfn); 209 ret = vm_insert_mixed(&cvma, address, pfn);
176 /* 210 /*
177 * Somebody beat us to this PTE or prefaulting to 211 * Somebody beat us to this PTE or prefaulting to
178 * an already populated PTE, or prefaulting error. 212 * an already populated PTE, or prefaulting error.
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
index 7957beeeaf73..fb8259f69839 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
@@ -33,6 +33,7 @@
33 * when freed). 33 * when freed).
34 */ 34 */
35 35
36#if defined(CONFIG_SWIOTLB) || defined(CONFIG_INTEL_IOMMU)
36#define pr_fmt(fmt) "[TTM] " fmt 37#define pr_fmt(fmt) "[TTM] " fmt
37 38
38#include <linux/dma-mapping.h> 39#include <linux/dma-mapping.h>
@@ -1142,3 +1143,5 @@ int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data)
1142 return 0; 1143 return 0;
1143} 1144}
1144EXPORT_SYMBOL_GPL(ttm_dma_page_alloc_debugfs); 1145EXPORT_SYMBOL_GPL(ttm_dma_page_alloc_debugfs);
1146
1147#endif
diff --git a/drivers/gpu/drm/udl/Kconfig b/drivers/gpu/drm/udl/Kconfig
index 6222af19f456..f02528686cd5 100644
--- a/drivers/gpu/drm/udl/Kconfig
+++ b/drivers/gpu/drm/udl/Kconfig
@@ -8,6 +8,7 @@ config DRM_UDL
8 select FB_SYS_IMAGEBLIT 8 select FB_SYS_IMAGEBLIT
9 select FB_DEFERRED_IO 9 select FB_DEFERRED_IO
10 select DRM_KMS_HELPER 10 select DRM_KMS_HELPER
11 select DRM_KMS_FB_HELPER
11 help 12 help
12 This is a KMS driver for the USB displaylink video adapters. 13 This is a KMS driver for the USB displaylink video adapters.
13 Say M/Y to add support for these devices via drm/kms interfaces. 14 Say M/Y to add support for these devices via drm/kms interfaces.
diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c
index 7650dc0d78ce..3ddd6cd98ac1 100644
--- a/drivers/gpu/drm/udl/udl_drv.c
+++ b/drivers/gpu/drm/udl/udl_drv.c
@@ -77,7 +77,6 @@ static struct drm_driver driver = {
77 .unload = udl_driver_unload, 77 .unload = udl_driver_unload,
78 78
79 /* gem hooks */ 79 /* gem hooks */
80 .gem_init_object = udl_gem_init_object,
81 .gem_free_object = udl_gem_free_object, 80 .gem_free_object = udl_gem_free_object,
82 .gem_vm_ops = &udl_gem_vm_ops, 81 .gem_vm_ops = &udl_gem_vm_ops,
83 82
diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h
index 56aec9409fa3..1fbf7b357f16 100644
--- a/drivers/gpu/drm/udl/udl_drv.h
+++ b/drivers/gpu/drm/udl/udl_drv.h
@@ -115,7 +115,6 @@ int udl_dumb_create(struct drm_file *file_priv,
115int udl_gem_mmap(struct drm_file *file_priv, struct drm_device *dev, 115int udl_gem_mmap(struct drm_file *file_priv, struct drm_device *dev,
116 uint32_t handle, uint64_t *offset); 116 uint32_t handle, uint64_t *offset);
117 117
118int udl_gem_init_object(struct drm_gem_object *obj);
119void udl_gem_free_object(struct drm_gem_object *gem_obj); 118void udl_gem_free_object(struct drm_gem_object *gem_obj);
120struct udl_gem_object *udl_gem_alloc_object(struct drm_device *dev, 119struct udl_gem_object *udl_gem_alloc_object(struct drm_device *dev,
121 size_t size); 120 size_t size);
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
index 8bf646183bac..24ffbe990736 100644
--- a/drivers/gpu/drm/udl/udl_gem.c
+++ b/drivers/gpu/drm/udl/udl_gem.c
@@ -107,13 +107,6 @@ int udl_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
107 } 107 }
108} 108}
109 109
110int udl_gem_init_object(struct drm_gem_object *obj)
111{
112 BUG();
113
114 return 0;
115}
116
117static int udl_gem_get_pages(struct udl_gem_object *obj, gfp_t gfpmask) 110static int udl_gem_get_pages(struct udl_gem_object *obj, gfp_t gfpmask)
118{ 111{
119 struct page **pages; 112 struct page **pages;
diff --git a/drivers/gpu/drm/via/via_mm.c b/drivers/gpu/drm/via/via_mm.c
index 7e3ad87c366c..927889105483 100644
--- a/drivers/gpu/drm/via/via_mm.c
+++ b/drivers/gpu/drm/via/via_mm.c
@@ -79,7 +79,7 @@ int via_final_context(struct drm_device *dev, int context)
79 79
80 /* Linux specific until context tracking code gets ported to BSD */ 80 /* Linux specific until context tracking code gets ported to BSD */
81 /* Last context, perform cleanup */ 81 /* Last context, perform cleanup */
82 if (dev->ctx_count == 1 && dev->dev_private) { 82 if (list_is_singular(&dev->ctxlist) && dev->dev_private) {
83 DRM_DEBUG("Last Context\n"); 83 DRM_DEBUG("Last Context\n");
84 drm_irq_uninstall(dev); 84 drm_irq_uninstall(dev);
85 via_cleanup_futex(dev_priv); 85 via_cleanup_futex(dev_priv);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
index 96dc84dc34d0..7776e6f0aef6 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
@@ -141,37 +141,374 @@ struct ttm_placement vmw_srf_placement = {
141}; 141};
142 142
143struct vmw_ttm_tt { 143struct vmw_ttm_tt {
144 struct ttm_tt ttm; 144 struct ttm_dma_tt dma_ttm;
145 struct vmw_private *dev_priv; 145 struct vmw_private *dev_priv;
146 int gmr_id; 146 int gmr_id;
147 struct sg_table sgt;
148 struct vmw_sg_table vsgt;
149 uint64_t sg_alloc_size;
150 bool mapped;
147}; 151};
148 152
153/**
154 * Helper functions to advance a struct vmw_piter iterator.
155 *
156 * @viter: Pointer to the iterator.
157 *
158 * These functions return false if past the end of the list,
159 * true otherwise. Functions are selected depending on the current
160 * DMA mapping mode.
161 */
162static bool __vmw_piter_non_sg_next(struct vmw_piter *viter)
163{
164 return ++(viter->i) < viter->num_pages;
165}
166
167static bool __vmw_piter_sg_next(struct vmw_piter *viter)
168{
169 return __sg_page_iter_next(&viter->iter);
170}
171
172
173/**
174 * Helper functions to return a pointer to the current page.
175 *
176 * @viter: Pointer to the iterator
177 *
178 * These functions return a pointer to the page currently
179 * pointed to by @viter. Functions are selected depending on the
180 * current mapping mode.
181 */
182static struct page *__vmw_piter_non_sg_page(struct vmw_piter *viter)
183{
184 return viter->pages[viter->i];
185}
186
187static struct page *__vmw_piter_sg_page(struct vmw_piter *viter)
188{
189 return sg_page_iter_page(&viter->iter);
190}
191
192
193/**
194 * Helper functions to return the DMA address of the current page.
195 *
196 * @viter: Pointer to the iterator
197 *
198 * These functions return the DMA address of the page currently
199 * pointed to by @viter. Functions are selected depending on the
200 * current mapping mode.
201 */
202static dma_addr_t __vmw_piter_phys_addr(struct vmw_piter *viter)
203{
204 return page_to_phys(viter->pages[viter->i]);
205}
206
207static dma_addr_t __vmw_piter_dma_addr(struct vmw_piter *viter)
208{
209 return viter->addrs[viter->i];
210}
211
212static dma_addr_t __vmw_piter_sg_addr(struct vmw_piter *viter)
213{
214 return sg_page_iter_dma_address(&viter->iter);
215}
216
217
218/**
219 * vmw_piter_start - Initialize a struct vmw_piter.
220 *
221 * @viter: Pointer to the iterator to initialize
222 * @vsgt: Pointer to a struct vmw_sg_table to initialize from
223 *
224 * Note that we're following the convention of __sg_page_iter_start, so that
225 * the iterator doesn't point to a valid page after initialization; it has
226 * to be advanced one step first.
227 */
228void vmw_piter_start(struct vmw_piter *viter, const struct vmw_sg_table *vsgt,
229 unsigned long p_offset)
230{
231 viter->i = p_offset - 1;
232 viter->num_pages = vsgt->num_pages;
233 switch (vsgt->mode) {
234 case vmw_dma_phys:
235 viter->next = &__vmw_piter_non_sg_next;
236 viter->dma_address = &__vmw_piter_phys_addr;
237 viter->page = &__vmw_piter_non_sg_page;
238 viter->pages = vsgt->pages;
239 break;
240 case vmw_dma_alloc_coherent:
241 viter->next = &__vmw_piter_non_sg_next;
242 viter->dma_address = &__vmw_piter_dma_addr;
243 viter->page = &__vmw_piter_non_sg_page;
244 viter->addrs = vsgt->addrs;
245 break;
246 case vmw_dma_map_populate:
247 case vmw_dma_map_bind:
248 viter->next = &__vmw_piter_sg_next;
249 viter->dma_address = &__vmw_piter_sg_addr;
250 viter->page = &__vmw_piter_sg_page;
251 __sg_page_iter_start(&viter->iter, vsgt->sgt->sgl,
252 vsgt->sgt->orig_nents, p_offset);
253 break;
254 default:
255 BUG();
256 }
257}
258
259/**
260 * vmw_ttm_unmap_from_dma - unmap device addresses previsouly mapped for
261 * TTM pages
262 *
263 * @vmw_tt: Pointer to a struct vmw_ttm_backend
264 *
265 * Used to free dma mappings previously mapped by vmw_ttm_map_for_dma.
266 */
267static void vmw_ttm_unmap_from_dma(struct vmw_ttm_tt *vmw_tt)
268{
269 struct device *dev = vmw_tt->dev_priv->dev->dev;
270
271 dma_unmap_sg(dev, vmw_tt->sgt.sgl, vmw_tt->sgt.nents,
272 DMA_BIDIRECTIONAL);
273 vmw_tt->sgt.nents = vmw_tt->sgt.orig_nents;
274}
275
276/**
277 * vmw_ttm_map_for_dma - map TTM pages to get device addresses
278 *
279 * @vmw_tt: Pointer to a struct vmw_ttm_backend
280 *
281 * This function is used to get device addresses from the kernel DMA layer.
282 * However, it's violating the DMA API in that when this operation has been
283 * performed, it's illegal for the CPU to write to the pages without first
284 * unmapping the DMA mappings, or calling dma_sync_sg_for_cpu(). It is
285 * therefore only legal to call this function if we know that the function
286 * dma_sync_sg_for_cpu() is a NOP, and dma_sync_sg_for_device() is at most
287 * a CPU write buffer flush.
288 */
289static int vmw_ttm_map_for_dma(struct vmw_ttm_tt *vmw_tt)
290{
291 struct device *dev = vmw_tt->dev_priv->dev->dev;
292 int ret;
293
294 ret = dma_map_sg(dev, vmw_tt->sgt.sgl, vmw_tt->sgt.orig_nents,
295 DMA_BIDIRECTIONAL);
296 if (unlikely(ret == 0))
297 return -ENOMEM;
298
299 vmw_tt->sgt.nents = ret;
300
301 return 0;
302}
303
304/**
305 * vmw_ttm_map_dma - Make sure TTM pages are visible to the device
306 *
307 * @vmw_tt: Pointer to a struct vmw_ttm_tt
308 *
309 * Select the correct function for and make sure the TTM pages are
310 * visible to the device. Allocate storage for the device mappings.
311 * If a mapping has already been performed, indicated by the storage
312 * pointer being non NULL, the function returns success.
313 */
314static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
315{
316 struct vmw_private *dev_priv = vmw_tt->dev_priv;
317 struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
318 struct vmw_sg_table *vsgt = &vmw_tt->vsgt;
319 struct vmw_piter iter;
320 dma_addr_t old;
321 int ret = 0;
322 static size_t sgl_size;
323 static size_t sgt_size;
324
325 if (vmw_tt->mapped)
326 return 0;
327
328 vsgt->mode = dev_priv->map_mode;
329 vsgt->pages = vmw_tt->dma_ttm.ttm.pages;
330 vsgt->num_pages = vmw_tt->dma_ttm.ttm.num_pages;
331 vsgt->addrs = vmw_tt->dma_ttm.dma_address;
332 vsgt->sgt = &vmw_tt->sgt;
333
334 switch (dev_priv->map_mode) {
335 case vmw_dma_map_bind:
336 case vmw_dma_map_populate:
337 if (unlikely(!sgl_size)) {
338 sgl_size = ttm_round_pot(sizeof(struct scatterlist));
339 sgt_size = ttm_round_pot(sizeof(struct sg_table));
340 }
341 vmw_tt->sg_alloc_size = sgt_size + sgl_size * vsgt->num_pages;
342 ret = ttm_mem_global_alloc(glob, vmw_tt->sg_alloc_size, false,
343 true);
344 if (unlikely(ret != 0))
345 return ret;
346
347 ret = sg_alloc_table_from_pages(&vmw_tt->sgt, vsgt->pages,
348 vsgt->num_pages, 0,
349 (unsigned long)
350 vsgt->num_pages << PAGE_SHIFT,
351 GFP_KERNEL);
352 if (unlikely(ret != 0))
353 goto out_sg_alloc_fail;
354
355 if (vsgt->num_pages > vmw_tt->sgt.nents) {
356 uint64_t over_alloc =
357 sgl_size * (vsgt->num_pages -
358 vmw_tt->sgt.nents);
359
360 ttm_mem_global_free(glob, over_alloc);
361 vmw_tt->sg_alloc_size -= over_alloc;
362 }
363
364 ret = vmw_ttm_map_for_dma(vmw_tt);
365 if (unlikely(ret != 0))
366 goto out_map_fail;
367
368 break;
369 default:
370 break;
371 }
372
373 old = ~((dma_addr_t) 0);
374 vmw_tt->vsgt.num_regions = 0;
375 for (vmw_piter_start(&iter, vsgt, 0); vmw_piter_next(&iter);) {
376 dma_addr_t cur = vmw_piter_dma_addr(&iter);
377
378 if (cur != old + PAGE_SIZE)
379 vmw_tt->vsgt.num_regions++;
380 old = cur;
381 }
382
383 vmw_tt->mapped = true;
384 return 0;
385
386out_map_fail:
387 sg_free_table(vmw_tt->vsgt.sgt);
388 vmw_tt->vsgt.sgt = NULL;
389out_sg_alloc_fail:
390 ttm_mem_global_free(glob, vmw_tt->sg_alloc_size);
391 return ret;
392}
393
394/**
395 * vmw_ttm_unmap_dma - Tear down any TTM page device mappings
396 *
397 * @vmw_tt: Pointer to a struct vmw_ttm_tt
398 *
399 * Tear down any previously set up device DMA mappings and free
400 * any storage space allocated for them. If there are no mappings set up,
401 * this function is a NOP.
402 */
403static void vmw_ttm_unmap_dma(struct vmw_ttm_tt *vmw_tt)
404{
405 struct vmw_private *dev_priv = vmw_tt->dev_priv;
406
407 if (!vmw_tt->vsgt.sgt)
408 return;
409
410 switch (dev_priv->map_mode) {
411 case vmw_dma_map_bind:
412 case vmw_dma_map_populate:
413 vmw_ttm_unmap_from_dma(vmw_tt);
414 sg_free_table(vmw_tt->vsgt.sgt);
415 vmw_tt->vsgt.sgt = NULL;
416 ttm_mem_global_free(vmw_mem_glob(dev_priv),
417 vmw_tt->sg_alloc_size);
418 break;
419 default:
420 break;
421 }
422 vmw_tt->mapped = false;
423}
424
149static int vmw_ttm_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem) 425static int vmw_ttm_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
150{ 426{
151 struct vmw_ttm_tt *vmw_be = container_of(ttm, struct vmw_ttm_tt, ttm); 427 struct vmw_ttm_tt *vmw_be =
428 container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
429 int ret;
430
431 ret = vmw_ttm_map_dma(vmw_be);
432 if (unlikely(ret != 0))
433 return ret;
152 434
153 vmw_be->gmr_id = bo_mem->start; 435 vmw_be->gmr_id = bo_mem->start;
154 436
155 return vmw_gmr_bind(vmw_be->dev_priv, ttm->pages, 437 return vmw_gmr_bind(vmw_be->dev_priv, &vmw_be->vsgt,
156 ttm->num_pages, vmw_be->gmr_id); 438 ttm->num_pages, vmw_be->gmr_id);
157} 439}
158 440
159static int vmw_ttm_unbind(struct ttm_tt *ttm) 441static int vmw_ttm_unbind(struct ttm_tt *ttm)
160{ 442{
161 struct vmw_ttm_tt *vmw_be = container_of(ttm, struct vmw_ttm_tt, ttm); 443 struct vmw_ttm_tt *vmw_be =
444 container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
162 445
163 vmw_gmr_unbind(vmw_be->dev_priv, vmw_be->gmr_id); 446 vmw_gmr_unbind(vmw_be->dev_priv, vmw_be->gmr_id);
447
448 if (vmw_be->dev_priv->map_mode == vmw_dma_map_bind)
449 vmw_ttm_unmap_dma(vmw_be);
450
164 return 0; 451 return 0;
165} 452}
166 453
167static void vmw_ttm_destroy(struct ttm_tt *ttm) 454static void vmw_ttm_destroy(struct ttm_tt *ttm)
168{ 455{
169 struct vmw_ttm_tt *vmw_be = container_of(ttm, struct vmw_ttm_tt, ttm); 456 struct vmw_ttm_tt *vmw_be =
170 457 container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
171 ttm_tt_fini(ttm); 458
459 vmw_ttm_unmap_dma(vmw_be);
460 if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent)
461 ttm_dma_tt_fini(&vmw_be->dma_ttm);
462 else
463 ttm_tt_fini(ttm);
172 kfree(vmw_be); 464 kfree(vmw_be);
173} 465}
174 466
467static int vmw_ttm_populate(struct ttm_tt *ttm)
468{
469 struct vmw_ttm_tt *vmw_tt =
470 container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
471 struct vmw_private *dev_priv = vmw_tt->dev_priv;
472 struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
473 int ret;
474
475 if (ttm->state != tt_unpopulated)
476 return 0;
477
478 if (dev_priv->map_mode == vmw_dma_alloc_coherent) {
479 size_t size =
480 ttm_round_pot(ttm->num_pages * sizeof(dma_addr_t));
481 ret = ttm_mem_global_alloc(glob, size, false, true);
482 if (unlikely(ret != 0))
483 return ret;
484
485 ret = ttm_dma_populate(&vmw_tt->dma_ttm, dev_priv->dev->dev);
486 if (unlikely(ret != 0))
487 ttm_mem_global_free(glob, size);
488 } else
489 ret = ttm_pool_populate(ttm);
490
491 return ret;
492}
493
494static void vmw_ttm_unpopulate(struct ttm_tt *ttm)
495{
496 struct vmw_ttm_tt *vmw_tt = container_of(ttm, struct vmw_ttm_tt,
497 dma_ttm.ttm);
498 struct vmw_private *dev_priv = vmw_tt->dev_priv;
499 struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
500
501 vmw_ttm_unmap_dma(vmw_tt);
502 if (dev_priv->map_mode == vmw_dma_alloc_coherent) {
503 size_t size =
504 ttm_round_pot(ttm->num_pages * sizeof(dma_addr_t));
505
506 ttm_dma_unpopulate(&vmw_tt->dma_ttm, dev_priv->dev->dev);
507 ttm_mem_global_free(glob, size);
508 } else
509 ttm_pool_unpopulate(ttm);
510}
511
175static struct ttm_backend_func vmw_ttm_func = { 512static struct ttm_backend_func vmw_ttm_func = {
176 .bind = vmw_ttm_bind, 513 .bind = vmw_ttm_bind,
177 .unbind = vmw_ttm_unbind, 514 .unbind = vmw_ttm_unbind,
@@ -183,20 +520,28 @@ struct ttm_tt *vmw_ttm_tt_create(struct ttm_bo_device *bdev,
183 struct page *dummy_read_page) 520 struct page *dummy_read_page)
184{ 521{
185 struct vmw_ttm_tt *vmw_be; 522 struct vmw_ttm_tt *vmw_be;
523 int ret;
186 524
187 vmw_be = kmalloc(sizeof(*vmw_be), GFP_KERNEL); 525 vmw_be = kzalloc(sizeof(*vmw_be), GFP_KERNEL);
188 if (!vmw_be) 526 if (!vmw_be)
189 return NULL; 527 return NULL;
190 528
191 vmw_be->ttm.func = &vmw_ttm_func; 529 vmw_be->dma_ttm.ttm.func = &vmw_ttm_func;
192 vmw_be->dev_priv = container_of(bdev, struct vmw_private, bdev); 530 vmw_be->dev_priv = container_of(bdev, struct vmw_private, bdev);
193 531
194 if (ttm_tt_init(&vmw_be->ttm, bdev, size, page_flags, dummy_read_page)) { 532 if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent)
195 kfree(vmw_be); 533 ret = ttm_dma_tt_init(&vmw_be->dma_ttm, bdev, size, page_flags,
196 return NULL; 534 dummy_read_page);
197 } 535 else
198 536 ret = ttm_tt_init(&vmw_be->dma_ttm.ttm, bdev, size, page_flags,
199 return &vmw_be->ttm; 537 dummy_read_page);
538 if (unlikely(ret != 0))
539 goto out_no_init;
540
541 return &vmw_be->dma_ttm.ttm;
542out_no_init:
543 kfree(vmw_be);
544 return NULL;
200} 545}
201 546
202int vmw_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags) 547int vmw_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
@@ -332,8 +677,8 @@ static int vmw_sync_obj_wait(void *sync_obj, bool lazy, bool interruptible)
332 677
333struct ttm_bo_driver vmw_bo_driver = { 678struct ttm_bo_driver vmw_bo_driver = {
334 .ttm_tt_create = &vmw_ttm_tt_create, 679 .ttm_tt_create = &vmw_ttm_tt_create,
335 .ttm_tt_populate = &ttm_pool_populate, 680 .ttm_tt_populate = &vmw_ttm_populate,
336 .ttm_tt_unpopulate = &ttm_pool_unpopulate, 681 .ttm_tt_unpopulate = &vmw_ttm_unpopulate,
337 .invalidate_caches = vmw_invalidate_caches, 682 .invalidate_caches = vmw_invalidate_caches,
338 .init_mem_type = vmw_init_mem_type, 683 .init_mem_type = vmw_init_mem_type,
339 .evict_flags = vmw_evict_flags, 684 .evict_flags = vmw_evict_flags,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 0508f93b9795..20d5485eaf98 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -32,6 +32,7 @@
32#include <drm/ttm/ttm_bo_driver.h> 32#include <drm/ttm/ttm_bo_driver.h>
33#include <drm/ttm/ttm_object.h> 33#include <drm/ttm/ttm_object.h>
34#include <drm/ttm/ttm_module.h> 34#include <drm/ttm/ttm_module.h>
35#include <linux/dma_remapping.h>
35 36
36#define VMWGFX_DRIVER_NAME "vmwgfx" 37#define VMWGFX_DRIVER_NAME "vmwgfx"
37#define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices" 38#define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices"
@@ -185,6 +186,9 @@ static struct pci_device_id vmw_pci_id_list[] = {
185MODULE_DEVICE_TABLE(pci, vmw_pci_id_list); 186MODULE_DEVICE_TABLE(pci, vmw_pci_id_list);
186 187
187static int enable_fbdev = IS_ENABLED(CONFIG_DRM_VMWGFX_FBCON); 188static int enable_fbdev = IS_ENABLED(CONFIG_DRM_VMWGFX_FBCON);
189static int vmw_force_iommu;
190static int vmw_restrict_iommu;
191static int vmw_force_coherent;
188 192
189static int vmw_probe(struct pci_dev *, const struct pci_device_id *); 193static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
190static void vmw_master_init(struct vmw_master *); 194static void vmw_master_init(struct vmw_master *);
@@ -193,6 +197,13 @@ static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
193 197
194MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev"); 198MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev");
195module_param_named(enable_fbdev, enable_fbdev, int, 0600); 199module_param_named(enable_fbdev, enable_fbdev, int, 0600);
200MODULE_PARM_DESC(force_dma_api, "Force using the DMA API for TTM pages");
201module_param_named(force_dma_api, vmw_force_iommu, int, 0600);
202MODULE_PARM_DESC(restrict_iommu, "Try to limit IOMMU usage for TTM pages");
203module_param_named(restrict_iommu, vmw_restrict_iommu, int, 0600);
204MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages");
205module_param_named(force_coherent, vmw_force_coherent, int, 0600);
206
196 207
197static void vmw_print_capabilities(uint32_t capabilities) 208static void vmw_print_capabilities(uint32_t capabilities)
198{ 209{
@@ -427,12 +438,85 @@ static void vmw_get_initial_size(struct vmw_private *dev_priv)
427 dev_priv->initial_height = height; 438 dev_priv->initial_height = height;
428} 439}
429 440
441/**
442 * vmw_dma_select_mode - Determine how DMA mappings should be set up for this
443 * system.
444 *
445 * @dev_priv: Pointer to a struct vmw_private
446 *
447 * This functions tries to determine the IOMMU setup and what actions
448 * need to be taken by the driver to make system pages visible to the
449 * device.
450 * If this function decides that DMA is not possible, it returns -EINVAL.
451 * The driver may then try to disable features of the device that require
452 * DMA.
453 */
454static int vmw_dma_select_mode(struct vmw_private *dev_priv)
455{
456 static const char *names[vmw_dma_map_max] = {
457 [vmw_dma_phys] = "Using physical TTM page addresses.",
458 [vmw_dma_alloc_coherent] = "Using coherent TTM pages.",
459 [vmw_dma_map_populate] = "Keeping DMA mappings.",
460 [vmw_dma_map_bind] = "Giving up DMA mappings early."};
461#ifdef CONFIG_X86
462 const struct dma_map_ops *dma_ops = get_dma_ops(dev_priv->dev->dev);
463
464#ifdef CONFIG_INTEL_IOMMU
465 if (intel_iommu_enabled) {
466 dev_priv->map_mode = vmw_dma_map_populate;
467 goto out_fixup;
468 }
469#endif
470
471 if (!(vmw_force_iommu || vmw_force_coherent)) {
472 dev_priv->map_mode = vmw_dma_phys;
473 DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]);
474 return 0;
475 }
476
477 dev_priv->map_mode = vmw_dma_map_populate;
478
479 if (dma_ops->sync_single_for_cpu)
480 dev_priv->map_mode = vmw_dma_alloc_coherent;
481#ifdef CONFIG_SWIOTLB
482 if (swiotlb_nr_tbl() == 0)
483 dev_priv->map_mode = vmw_dma_map_populate;
484#endif
485
486#ifdef CONFIG_INTEL_IOMMU
487out_fixup:
488#endif
489 if (dev_priv->map_mode == vmw_dma_map_populate &&
490 vmw_restrict_iommu)
491 dev_priv->map_mode = vmw_dma_map_bind;
492
493 if (vmw_force_coherent)
494 dev_priv->map_mode = vmw_dma_alloc_coherent;
495
496#if !defined(CONFIG_SWIOTLB) && !defined(CONFIG_INTEL_IOMMU)
497 /*
498 * No coherent page pool
499 */
500 if (dev_priv->map_mode == vmw_dma_alloc_coherent)
501 return -EINVAL;
502#endif
503
504#else /* CONFIG_X86 */
505 dev_priv->map_mode = vmw_dma_map_populate;
506#endif /* CONFIG_X86 */
507
508 DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]);
509
510 return 0;
511}
512
430static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) 513static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
431{ 514{
432 struct vmw_private *dev_priv; 515 struct vmw_private *dev_priv;
433 int ret; 516 int ret;
434 uint32_t svga_id; 517 uint32_t svga_id;
435 enum vmw_res_type i; 518 enum vmw_res_type i;
519 bool refuse_dma = false;
436 520
437 dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL); 521 dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
438 if (unlikely(dev_priv == NULL)) { 522 if (unlikely(dev_priv == NULL)) {
@@ -481,6 +565,11 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
481 } 565 }
482 566
483 dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES); 567 dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES);
568 ret = vmw_dma_select_mode(dev_priv);
569 if (unlikely(ret != 0)) {
570 DRM_INFO("Restricting capabilities due to IOMMU setup.\n");
571 refuse_dma = true;
572 }
484 573
485 dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE); 574 dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE);
486 dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE); 575 dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE);
@@ -558,8 +647,9 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
558 } 647 }
559 648
560 dev_priv->has_gmr = true; 649 dev_priv->has_gmr = true;
561 if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR, 650 if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) ||
562 dev_priv->max_gmr_ids) != 0) { 651 refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR,
652 dev_priv->max_gmr_ids) != 0) {
563 DRM_INFO("No GMR memory available. " 653 DRM_INFO("No GMR memory available. "
564 "Graphics memory resources are very limited.\n"); 654 "Graphics memory resources are very limited.\n");
565 dev_priv->has_gmr = false; 655 dev_priv->has_gmr = false;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 150ec64af617..e401d5dbcb96 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -177,6 +177,58 @@ struct vmw_res_cache_entry {
177 struct vmw_resource_val_node *node; 177 struct vmw_resource_val_node *node;
178}; 178};
179 179
180/**
181 * enum vmw_dma_map_mode - indicate how to perform TTM page dma mappings.
182 */
183enum vmw_dma_map_mode {
184 vmw_dma_phys, /* Use physical page addresses */
185 vmw_dma_alloc_coherent, /* Use TTM coherent pages */
186 vmw_dma_map_populate, /* Unmap from DMA just after unpopulate */
187 vmw_dma_map_bind, /* Unmap from DMA just before unbind */
188 vmw_dma_map_max
189};
190
191/**
192 * struct vmw_sg_table - Scatter/gather table for binding, with additional
193 * device-specific information.
194 *
195 * @sgt: Pointer to a struct sg_table with binding information
196 * @num_regions: Number of regions with device-address contigous pages
197 */
198struct vmw_sg_table {
199 enum vmw_dma_map_mode mode;
200 struct page **pages;
201 const dma_addr_t *addrs;
202 struct sg_table *sgt;
203 unsigned long num_regions;
204 unsigned long num_pages;
205};
206
207/**
208 * struct vmw_piter - Page iterator that iterates over a list of pages
209 * and DMA addresses that could be either a scatter-gather list or
210 * arrays
211 *
212 * @pages: Array of page pointers to the pages.
213 * @addrs: DMA addresses to the pages if coherent pages are used.
214 * @iter: Scatter-gather page iterator. Current position in SG list.
215 * @i: Current position in arrays.
216 * @num_pages: Number of pages total.
217 * @next: Function to advance the iterator. Returns false if past the list
218 * of pages, true otherwise.
219 * @dma_address: Function to return the DMA address of the current page.
220 */
221struct vmw_piter {
222 struct page **pages;
223 const dma_addr_t *addrs;
224 struct sg_page_iter iter;
225 unsigned long i;
226 unsigned long num_pages;
227 bool (*next)(struct vmw_piter *);
228 dma_addr_t (*dma_address)(struct vmw_piter *);
229 struct page *(*page)(struct vmw_piter *);
230};
231
180struct vmw_sw_context{ 232struct vmw_sw_context{
181 struct drm_open_hash res_ht; 233 struct drm_open_hash res_ht;
182 bool res_ht_initialized; 234 bool res_ht_initialized;
@@ -358,6 +410,11 @@ struct vmw_private {
358 410
359 struct list_head res_lru[vmw_res_max]; 411 struct list_head res_lru[vmw_res_max];
360 uint32_t used_memory_size; 412 uint32_t used_memory_size;
413
414 /*
415 * DMA mapping stuff.
416 */
417 enum vmw_dma_map_mode map_mode;
361}; 418};
362 419
363static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res) 420static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res)
@@ -405,7 +462,7 @@ void vmw_3d_resource_dec(struct vmw_private *dev_priv, bool hide_svga);
405 */ 462 */
406 463
407extern int vmw_gmr_bind(struct vmw_private *dev_priv, 464extern int vmw_gmr_bind(struct vmw_private *dev_priv,
408 struct page *pages[], 465 const struct vmw_sg_table *vsgt,
409 unsigned long num_pages, 466 unsigned long num_pages,
410 int gmr_id); 467 int gmr_id);
411extern void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id); 468extern void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id);
@@ -568,6 +625,45 @@ extern struct ttm_placement vmw_evictable_placement;
568extern struct ttm_placement vmw_srf_placement; 625extern struct ttm_placement vmw_srf_placement;
569extern struct ttm_bo_driver vmw_bo_driver; 626extern struct ttm_bo_driver vmw_bo_driver;
570extern int vmw_dma_quiescent(struct drm_device *dev); 627extern int vmw_dma_quiescent(struct drm_device *dev);
628extern void vmw_piter_start(struct vmw_piter *viter,
629 const struct vmw_sg_table *vsgt,
630 unsigned long p_offs);
631
632/**
633 * vmw_piter_next - Advance the iterator one page.
634 *
635 * @viter: Pointer to the iterator to advance.
636 *
637 * Returns false if past the list of pages, true otherwise.
638 */
639static inline bool vmw_piter_next(struct vmw_piter *viter)
640{
641 return viter->next(viter);
642}
643
644/**
645 * vmw_piter_dma_addr - Return the DMA address of the current page.
646 *
647 * @viter: Pointer to the iterator
648 *
649 * Returns the DMA address of the page pointed to by @viter.
650 */
651static inline dma_addr_t vmw_piter_dma_addr(struct vmw_piter *viter)
652{
653 return viter->dma_address(viter);
654}
655
656/**
657 * vmw_piter_page - Return a pointer to the current page.
658 *
659 * @viter: Pointer to the iterator
660 *
661 * Returns the DMA address of the page pointed to by @viter.
662 */
663static inline struct page *vmw_piter_page(struct vmw_piter *viter)
664{
665 return viter->page(viter);
666}
571 667
572/** 668/**
573 * Command submission - vmwgfx_execbuf.c 669 * Command submission - vmwgfx_execbuf.c
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
index 1a0bf07fe54b..6ef0b035becb 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
@@ -32,9 +32,11 @@
32#define VMW_PPN_SIZE (sizeof(unsigned long)) 32#define VMW_PPN_SIZE (sizeof(unsigned long))
33/* A future safe maximum remap size. */ 33/* A future safe maximum remap size. */
34#define VMW_PPN_PER_REMAP ((31 * 1024) / VMW_PPN_SIZE) 34#define VMW_PPN_PER_REMAP ((31 * 1024) / VMW_PPN_SIZE)
35#define DMA_ADDR_INVALID ((dma_addr_t) 0)
36#define DMA_PAGE_INVALID 0UL
35 37
36static int vmw_gmr2_bind(struct vmw_private *dev_priv, 38static int vmw_gmr2_bind(struct vmw_private *dev_priv,
37 struct page *pages[], 39 struct vmw_piter *iter,
38 unsigned long num_pages, 40 unsigned long num_pages,
39 int gmr_id) 41 int gmr_id)
40{ 42{
@@ -81,11 +83,13 @@ static int vmw_gmr2_bind(struct vmw_private *dev_priv,
81 83
82 for (i = 0; i < nr; ++i) { 84 for (i = 0; i < nr; ++i) {
83 if (VMW_PPN_SIZE <= 4) 85 if (VMW_PPN_SIZE <= 4)
84 *cmd = page_to_pfn(*pages++); 86 *cmd = vmw_piter_dma_addr(iter) >> PAGE_SHIFT;
85 else 87 else
86 *((uint64_t *)cmd) = page_to_pfn(*pages++); 88 *((uint64_t *)cmd) = vmw_piter_dma_addr(iter) >>
89 PAGE_SHIFT;
87 90
88 cmd += VMW_PPN_SIZE / sizeof(*cmd); 91 cmd += VMW_PPN_SIZE / sizeof(*cmd);
92 vmw_piter_next(iter);
89 } 93 }
90 94
91 num_pages -= nr; 95 num_pages -= nr;
@@ -120,22 +124,56 @@ static void vmw_gmr2_unbind(struct vmw_private *dev_priv,
120 vmw_fifo_commit(dev_priv, define_size); 124 vmw_fifo_commit(dev_priv, define_size);
121} 125}
122 126
127
128static void vmw_gmr_free_descriptors(struct device *dev, dma_addr_t desc_dma,
129 struct list_head *desc_pages)
130{
131 struct page *page, *next;
132 struct svga_guest_mem_descriptor *page_virtual;
133 unsigned int desc_per_page = PAGE_SIZE /
134 sizeof(struct svga_guest_mem_descriptor) - 1;
135
136 if (list_empty(desc_pages))
137 return;
138
139 list_for_each_entry_safe(page, next, desc_pages, lru) {
140 list_del_init(&page->lru);
141
142 if (likely(desc_dma != DMA_ADDR_INVALID)) {
143 dma_unmap_page(dev, desc_dma, PAGE_SIZE,
144 DMA_TO_DEVICE);
145 }
146
147 page_virtual = kmap_atomic(page);
148 desc_dma = (dma_addr_t)
149 le32_to_cpu(page_virtual[desc_per_page].ppn) <<
150 PAGE_SHIFT;
151 kunmap_atomic(page_virtual);
152
153 __free_page(page);
154 }
155}
156
123/** 157/**
124 * FIXME: Adjust to the ttm lowmem / highmem storage to minimize 158 * FIXME: Adjust to the ttm lowmem / highmem storage to minimize
125 * the number of used descriptors. 159 * the number of used descriptors.
160 *
126 */ 161 */
127 162
128static int vmw_gmr_build_descriptors(struct list_head *desc_pages, 163static int vmw_gmr_build_descriptors(struct device *dev,
129 struct page *pages[], 164 struct list_head *desc_pages,
130 unsigned long num_pages) 165 struct vmw_piter *iter,
166 unsigned long num_pages,
167 dma_addr_t *first_dma)
131{ 168{
132 struct page *page, *next; 169 struct page *page;
133 struct svga_guest_mem_descriptor *page_virtual = NULL; 170 struct svga_guest_mem_descriptor *page_virtual = NULL;
134 struct svga_guest_mem_descriptor *desc_virtual = NULL; 171 struct svga_guest_mem_descriptor *desc_virtual = NULL;
135 unsigned int desc_per_page; 172 unsigned int desc_per_page;
136 unsigned long prev_pfn; 173 unsigned long prev_pfn;
137 unsigned long pfn; 174 unsigned long pfn;
138 int ret; 175 int ret;
176 dma_addr_t desc_dma;
139 177
140 desc_per_page = PAGE_SIZE / 178 desc_per_page = PAGE_SIZE /
141 sizeof(struct svga_guest_mem_descriptor) - 1; 179 sizeof(struct svga_guest_mem_descriptor) - 1;
@@ -148,23 +186,12 @@ static int vmw_gmr_build_descriptors(struct list_head *desc_pages,
148 } 186 }
149 187
150 list_add_tail(&page->lru, desc_pages); 188 list_add_tail(&page->lru, desc_pages);
151
152 /*
153 * Point previous page terminating descriptor to this
154 * page before unmapping it.
155 */
156
157 if (likely(page_virtual != NULL)) {
158 desc_virtual->ppn = page_to_pfn(page);
159 kunmap_atomic(page_virtual);
160 }
161
162 page_virtual = kmap_atomic(page); 189 page_virtual = kmap_atomic(page);
163 desc_virtual = page_virtual - 1; 190 desc_virtual = page_virtual - 1;
164 prev_pfn = ~(0UL); 191 prev_pfn = ~(0UL);
165 192
166 while (likely(num_pages != 0)) { 193 while (likely(num_pages != 0)) {
167 pfn = page_to_pfn(*pages); 194 pfn = vmw_piter_dma_addr(iter) >> PAGE_SHIFT;
168 195
169 if (pfn != prev_pfn + 1) { 196 if (pfn != prev_pfn + 1) {
170 197
@@ -181,104 +208,82 @@ static int vmw_gmr_build_descriptors(struct list_head *desc_pages,
181 } 208 }
182 prev_pfn = pfn; 209 prev_pfn = pfn;
183 --num_pages; 210 --num_pages;
184 ++pages; 211 vmw_piter_next(iter);
185 } 212 }
186 213
187 (++desc_virtual)->ppn = cpu_to_le32(0); 214 (++desc_virtual)->ppn = DMA_PAGE_INVALID;
188 desc_virtual->num_pages = cpu_to_le32(0); 215 desc_virtual->num_pages = cpu_to_le32(0);
216 kunmap_atomic(page_virtual);
189 } 217 }
190 218
191 if (likely(page_virtual != NULL)) 219 desc_dma = 0;
220 list_for_each_entry_reverse(page, desc_pages, lru) {
221 page_virtual = kmap_atomic(page);
222 page_virtual[desc_per_page].ppn = cpu_to_le32
223 (desc_dma >> PAGE_SHIFT);
192 kunmap_atomic(page_virtual); 224 kunmap_atomic(page_virtual);
225 desc_dma = dma_map_page(dev, page, 0, PAGE_SIZE,
226 DMA_TO_DEVICE);
227
228 if (unlikely(dma_mapping_error(dev, desc_dma)))
229 goto out_err;
230 }
231 *first_dma = desc_dma;
193 232
194 return 0; 233 return 0;
195out_err: 234out_err:
196 list_for_each_entry_safe(page, next, desc_pages, lru) { 235 vmw_gmr_free_descriptors(dev, DMA_ADDR_INVALID, desc_pages);
197 list_del_init(&page->lru);
198 __free_page(page);
199 }
200 return ret; 236 return ret;
201} 237}
202 238
203static inline void vmw_gmr_free_descriptors(struct list_head *desc_pages)
204{
205 struct page *page, *next;
206
207 list_for_each_entry_safe(page, next, desc_pages, lru) {
208 list_del_init(&page->lru);
209 __free_page(page);
210 }
211}
212
213static void vmw_gmr_fire_descriptors(struct vmw_private *dev_priv, 239static void vmw_gmr_fire_descriptors(struct vmw_private *dev_priv,
214 int gmr_id, struct list_head *desc_pages) 240 int gmr_id, dma_addr_t desc_dma)
215{ 241{
216 struct page *page;
217
218 if (unlikely(list_empty(desc_pages)))
219 return;
220
221 page = list_entry(desc_pages->next, struct page, lru);
222
223 mutex_lock(&dev_priv->hw_mutex); 242 mutex_lock(&dev_priv->hw_mutex);
224 243
225 vmw_write(dev_priv, SVGA_REG_GMR_ID, gmr_id); 244 vmw_write(dev_priv, SVGA_REG_GMR_ID, gmr_id);
226 wmb(); 245 wmb();
227 vmw_write(dev_priv, SVGA_REG_GMR_DESCRIPTOR, page_to_pfn(page)); 246 vmw_write(dev_priv, SVGA_REG_GMR_DESCRIPTOR, desc_dma >> PAGE_SHIFT);
228 mb(); 247 mb();
229 248
230 mutex_unlock(&dev_priv->hw_mutex); 249 mutex_unlock(&dev_priv->hw_mutex);
231 250
232} 251}
233 252
234/**
235 * FIXME: Adjust to the ttm lowmem / highmem storage to minimize
236 * the number of used descriptors.
237 */
238
239static unsigned long vmw_gmr_count_descriptors(struct page *pages[],
240 unsigned long num_pages)
241{
242 unsigned long prev_pfn = ~(0UL);
243 unsigned long pfn;
244 unsigned long descriptors = 0;
245
246 while (num_pages--) {
247 pfn = page_to_pfn(*pages++);
248 if (prev_pfn + 1 != pfn)
249 ++descriptors;
250 prev_pfn = pfn;
251 }
252
253 return descriptors;
254}
255
256int vmw_gmr_bind(struct vmw_private *dev_priv, 253int vmw_gmr_bind(struct vmw_private *dev_priv,
257 struct page *pages[], 254 const struct vmw_sg_table *vsgt,
258 unsigned long num_pages, 255 unsigned long num_pages,
259 int gmr_id) 256 int gmr_id)
260{ 257{
261 struct list_head desc_pages; 258 struct list_head desc_pages;
259 dma_addr_t desc_dma = 0;
260 struct device *dev = dev_priv->dev->dev;
261 struct vmw_piter data_iter;
262 int ret; 262 int ret;
263 263
264 vmw_piter_start(&data_iter, vsgt, 0);
265
266 if (unlikely(!vmw_piter_next(&data_iter)))
267 return 0;
268
264 if (likely(dev_priv->capabilities & SVGA_CAP_GMR2)) 269 if (likely(dev_priv->capabilities & SVGA_CAP_GMR2))
265 return vmw_gmr2_bind(dev_priv, pages, num_pages, gmr_id); 270 return vmw_gmr2_bind(dev_priv, &data_iter, num_pages, gmr_id);
266 271
267 if (unlikely(!(dev_priv->capabilities & SVGA_CAP_GMR))) 272 if (unlikely(!(dev_priv->capabilities & SVGA_CAP_GMR)))
268 return -EINVAL; 273 return -EINVAL;
269 274
270 if (vmw_gmr_count_descriptors(pages, num_pages) > 275 if (vsgt->num_regions > dev_priv->max_gmr_descriptors)
271 dev_priv->max_gmr_descriptors)
272 return -EINVAL; 276 return -EINVAL;
273 277
274 INIT_LIST_HEAD(&desc_pages); 278 INIT_LIST_HEAD(&desc_pages);
275 279
276 ret = vmw_gmr_build_descriptors(&desc_pages, pages, num_pages); 280 ret = vmw_gmr_build_descriptors(dev, &desc_pages, &data_iter,
281 num_pages, &desc_dma);
277 if (unlikely(ret != 0)) 282 if (unlikely(ret != 0))
278 return ret; 283 return ret;
279 284
280 vmw_gmr_fire_descriptors(dev_priv, gmr_id, &desc_pages); 285 vmw_gmr_fire_descriptors(dev_priv, gmr_id, desc_dma);
281 vmw_gmr_free_descriptors(&desc_pages); 286 vmw_gmr_free_descriptors(dev, desc_dma, &desc_pages);
282 287
283 return 0; 288 return 0;
284} 289}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index c509d40c4897..a51f48e3e917 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -168,7 +168,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
168 fb = drm_framebuffer_lookup(dev, arg->fb_id); 168 fb = drm_framebuffer_lookup(dev, arg->fb_id);
169 if (!fb) { 169 if (!fb) {
170 DRM_ERROR("Invalid framebuffer id.\n"); 170 DRM_ERROR("Invalid framebuffer id.\n");
171 ret = -EINVAL; 171 ret = -ENOENT;
172 goto out_no_fb; 172 goto out_no_fb;
173 } 173 }
174 vfb = vmw_framebuffer_to_vfb(fb); 174 vfb = vmw_framebuffer_to_vfb(fb);
@@ -252,7 +252,7 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
252 fb = drm_framebuffer_lookup(dev, arg->fb_id); 252 fb = drm_framebuffer_lookup(dev, arg->fb_id);
253 if (!fb) { 253 if (!fb) {
254 DRM_ERROR("Invalid framebuffer id.\n"); 254 DRM_ERROR("Invalid framebuffer id.\n");
255 ret = -EINVAL; 255 ret = -ENOENT;
256 goto out_no_fb; 256 goto out_no_fb;
257 } 257 }
258 258
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index fc43c0601236..ecb3d867b426 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -1508,7 +1508,7 @@ int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
1508 1508
1509 obj = drm_mode_object_find(dev, arg->crtc_id, DRM_MODE_OBJECT_CRTC); 1509 obj = drm_mode_object_find(dev, arg->crtc_id, DRM_MODE_OBJECT_CRTC);
1510 if (!obj) { 1510 if (!obj) {
1511 ret = -EINVAL; 1511 ret = -ENOENT;
1512 goto out; 1512 goto out;
1513 } 1513 }
1514 1514
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 37fb4befec82..252501a54def 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -32,6 +32,8 @@
32#include <drm/drmP.h> 32#include <drm/drmP.h>
33#include "vmwgfx_resource_priv.h" 33#include "vmwgfx_resource_priv.h"
34 34
35#define VMW_RES_EVICT_ERR_COUNT 10
36
35struct vmw_user_dma_buffer { 37struct vmw_user_dma_buffer {
36 struct ttm_base_object base; 38 struct ttm_base_object base;
37 struct vmw_dma_buffer dma; 39 struct vmw_dma_buffer dma;
@@ -1091,8 +1093,9 @@ vmw_resource_backoff_reservation(struct ww_acquire_ctx *ticket,
1091 * to a backup buffer. 1093 * to a backup buffer.
1092 * 1094 *
1093 * @res: The resource to evict. 1095 * @res: The resource to evict.
1096 * @interruptible: Whether to wait interruptible.
1094 */ 1097 */
1095int vmw_resource_do_evict(struct vmw_resource *res) 1098int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible)
1096{ 1099{
1097 struct ttm_validate_buffer val_buf; 1100 struct ttm_validate_buffer val_buf;
1098 const struct vmw_res_func *func = res->func; 1101 const struct vmw_res_func *func = res->func;
@@ -1102,7 +1105,8 @@ int vmw_resource_do_evict(struct vmw_resource *res)
1102 BUG_ON(!func->may_evict); 1105 BUG_ON(!func->may_evict);
1103 1106
1104 val_buf.bo = NULL; 1107 val_buf.bo = NULL;
1105 ret = vmw_resource_check_buffer(res, &ticket, true, &val_buf); 1108 ret = vmw_resource_check_buffer(res, &ticket, interruptible,
1109 &val_buf);
1106 if (unlikely(ret != 0)) 1110 if (unlikely(ret != 0))
1107 return ret; 1111 return ret;
1108 1112
@@ -1141,6 +1145,7 @@ int vmw_resource_validate(struct vmw_resource *res)
1141 struct vmw_private *dev_priv = res->dev_priv; 1145 struct vmw_private *dev_priv = res->dev_priv;
1142 struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type]; 1146 struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type];
1143 struct ttm_validate_buffer val_buf; 1147 struct ttm_validate_buffer val_buf;
1148 unsigned err_count = 0;
1144 1149
1145 if (likely(!res->func->may_evict)) 1150 if (likely(!res->func->may_evict))
1146 return 0; 1151 return 0;
@@ -1155,7 +1160,7 @@ int vmw_resource_validate(struct vmw_resource *res)
1155 1160
1156 write_lock(&dev_priv->resource_lock); 1161 write_lock(&dev_priv->resource_lock);
1157 if (list_empty(lru_list) || !res->func->may_evict) { 1162 if (list_empty(lru_list) || !res->func->may_evict) {
1158 DRM_ERROR("Out of device device id entries " 1163 DRM_ERROR("Out of device device resources "
1159 "for %s.\n", res->func->type_name); 1164 "for %s.\n", res->func->type_name);
1160 ret = -EBUSY; 1165 ret = -EBUSY;
1161 write_unlock(&dev_priv->resource_lock); 1166 write_unlock(&dev_priv->resource_lock);
@@ -1168,7 +1173,19 @@ int vmw_resource_validate(struct vmw_resource *res)
1168 list_del_init(&evict_res->lru_head); 1173 list_del_init(&evict_res->lru_head);
1169 1174
1170 write_unlock(&dev_priv->resource_lock); 1175 write_unlock(&dev_priv->resource_lock);
1171 vmw_resource_do_evict(evict_res); 1176
1177 ret = vmw_resource_do_evict(evict_res, true);
1178 if (unlikely(ret != 0)) {
1179 write_lock(&dev_priv->resource_lock);
1180 list_add_tail(&evict_res->lru_head, lru_list);
1181 write_unlock(&dev_priv->resource_lock);
1182 if (ret == -ERESTARTSYS ||
1183 ++err_count > VMW_RES_EVICT_ERR_COUNT) {
1184 vmw_resource_unreference(&evict_res);
1185 goto out_no_validate;
1186 }
1187 }
1188
1172 vmw_resource_unreference(&evict_res); 1189 vmw_resource_unreference(&evict_res);
1173 } while (1); 1190 } while (1);
1174 1191
@@ -1253,13 +1270,15 @@ bool vmw_resource_needs_backup(const struct vmw_resource *res)
1253 * @type: The resource type to evict 1270 * @type: The resource type to evict
1254 * 1271 *
1255 * To avoid thrashing starvation or as part of the hibernation sequence, 1272 * To avoid thrashing starvation or as part of the hibernation sequence,
1256 * evict all evictable resources of a specific type. 1273 * try to evict all evictable resources of a specific type.
1257 */ 1274 */
1258static void vmw_resource_evict_type(struct vmw_private *dev_priv, 1275static void vmw_resource_evict_type(struct vmw_private *dev_priv,
1259 enum vmw_res_type type) 1276 enum vmw_res_type type)
1260{ 1277{
1261 struct list_head *lru_list = &dev_priv->res_lru[type]; 1278 struct list_head *lru_list = &dev_priv->res_lru[type];
1262 struct vmw_resource *evict_res; 1279 struct vmw_resource *evict_res;
1280 unsigned err_count = 0;
1281 int ret;
1263 1282
1264 do { 1283 do {
1265 write_lock(&dev_priv->resource_lock); 1284 write_lock(&dev_priv->resource_lock);
@@ -1272,7 +1291,18 @@ static void vmw_resource_evict_type(struct vmw_private *dev_priv,
1272 lru_head)); 1291 lru_head));
1273 list_del_init(&evict_res->lru_head); 1292 list_del_init(&evict_res->lru_head);
1274 write_unlock(&dev_priv->resource_lock); 1293 write_unlock(&dev_priv->resource_lock);
1275 vmw_resource_do_evict(evict_res); 1294
1295 ret = vmw_resource_do_evict(evict_res, false);
1296 if (unlikely(ret != 0)) {
1297 write_lock(&dev_priv->resource_lock);
1298 list_add_tail(&evict_res->lru_head, lru_list);
1299 write_unlock(&dev_priv->resource_lock);
1300 if (++err_count > VMW_RES_EVICT_ERR_COUNT) {
1301 vmw_resource_unreference(&evict_res);
1302 return;
1303 }
1304 }
1305
1276 vmw_resource_unreference(&evict_res); 1306 vmw_resource_unreference(&evict_res);
1277 } while (1); 1307 } while (1);
1278 1308
diff --git a/drivers/gpu/host1x/Kconfig b/drivers/gpu/host1x/Kconfig
index ccfd42b23606..7d6bed222542 100644
--- a/drivers/gpu/host1x/Kconfig
+++ b/drivers/gpu/host1x/Kconfig
@@ -19,6 +19,4 @@ config TEGRA_HOST1X_FIREWALL
19 19
20 If unsure, choose Y. 20 If unsure, choose Y.
21 21
22source "drivers/gpu/host1x/drm/Kconfig"
23
24endif 22endif
diff --git a/drivers/gpu/host1x/Makefile b/drivers/gpu/host1x/Makefile
index 3b037b6e0298..afa1e9e4e512 100644
--- a/drivers/gpu/host1x/Makefile
+++ b/drivers/gpu/host1x/Makefile
@@ -1,6 +1,5 @@
1ccflags-y = -Idrivers/gpu/host1x
2
3host1x-y = \ 1host1x-y = \
2 bus.o \
4 syncpt.o \ 3 syncpt.o \
5 dev.o \ 4 dev.o \
6 intr.o \ 5 intr.o \
@@ -8,13 +7,7 @@ host1x-y = \
8 channel.o \ 7 channel.o \
9 job.o \ 8 job.o \
10 debug.o \ 9 debug.o \
11 hw/host1x01.o 10 hw/host1x01.o \
12 11 hw/host1x02.o
13ccflags-y += -Iinclude/drm
14ccflags-$(CONFIG_DRM_TEGRA_DEBUG) += -DDEBUG
15 12
16host1x-$(CONFIG_DRM_TEGRA) += drm/drm.o drm/fb.o drm/dc.o
17host1x-$(CONFIG_DRM_TEGRA) += drm/output.o drm/rgb.o drm/hdmi.o
18host1x-$(CONFIG_DRM_TEGRA) += drm/gem.o
19host1x-$(CONFIG_DRM_TEGRA) += drm/gr2d.o
20obj-$(CONFIG_TEGRA_HOST1X) += host1x.o 13obj-$(CONFIG_TEGRA_HOST1X) += host1x.o
diff --git a/drivers/gpu/host1x/bus.c b/drivers/gpu/host1x/bus.c
new file mode 100644
index 000000000000..509383f8be03
--- /dev/null
+++ b/drivers/gpu/host1x/bus.c
@@ -0,0 +1,550 @@
1/*
2 * Copyright (C) 2012 Avionic Design GmbH
3 * Copyright (C) 2012-2013, NVIDIA Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include <linux/host1x.h>
19#include <linux/of.h>
20#include <linux/slab.h>
21
22#include "dev.h"
23
24static DEFINE_MUTEX(clients_lock);
25static LIST_HEAD(clients);
26
27static DEFINE_MUTEX(drivers_lock);
28static LIST_HEAD(drivers);
29
30static DEFINE_MUTEX(devices_lock);
31static LIST_HEAD(devices);
32
33struct host1x_subdev {
34 struct host1x_client *client;
35 struct device_node *np;
36 struct list_head list;
37};
38
39/**
40 * host1x_subdev_add() - add a new subdevice with an associated device node
41 */
42static int host1x_subdev_add(struct host1x_device *device,
43 struct device_node *np)
44{
45 struct host1x_subdev *subdev;
46
47 subdev = kzalloc(sizeof(*subdev), GFP_KERNEL);
48 if (!subdev)
49 return -ENOMEM;
50
51 INIT_LIST_HEAD(&subdev->list);
52 subdev->np = of_node_get(np);
53
54 mutex_lock(&device->subdevs_lock);
55 list_add_tail(&subdev->list, &device->subdevs);
56 mutex_unlock(&device->subdevs_lock);
57
58 return 0;
59}
60
61/**
62 * host1x_subdev_del() - remove subdevice
63 */
64static void host1x_subdev_del(struct host1x_subdev *subdev)
65{
66 list_del(&subdev->list);
67 of_node_put(subdev->np);
68 kfree(subdev);
69}
70
71/**
72 * host1x_device_parse_dt() - scan device tree and add matching subdevices
73 */
74static int host1x_device_parse_dt(struct host1x_device *device)
75{
76 struct device_node *np;
77 int err;
78
79 for_each_child_of_node(device->dev.parent->of_node, np) {
80 if (of_match_node(device->driver->subdevs, np) &&
81 of_device_is_available(np)) {
82 err = host1x_subdev_add(device, np);
83 if (err < 0)
84 return err;
85 }
86 }
87
88 return 0;
89}
90
91static void host1x_subdev_register(struct host1x_device *device,
92 struct host1x_subdev *subdev,
93 struct host1x_client *client)
94{
95 int err;
96
97 /*
98 * Move the subdevice to the list of active (registered) subdevices
99 * and associate it with a client. At the same time, associate the
100 * client with its parent device.
101 */
102 mutex_lock(&device->subdevs_lock);
103 mutex_lock(&device->clients_lock);
104 list_move_tail(&client->list, &device->clients);
105 list_move_tail(&subdev->list, &device->active);
106 client->parent = &device->dev;
107 subdev->client = client;
108 mutex_unlock(&device->clients_lock);
109 mutex_unlock(&device->subdevs_lock);
110
111 /*
112 * When all subdevices have been registered, the composite device is
113 * ready to be probed.
114 */
115 if (list_empty(&device->subdevs)) {
116 err = device->driver->probe(device);
117 if (err < 0)
118 dev_err(&device->dev, "probe failed: %d\n", err);
119 }
120}
121
122static void __host1x_subdev_unregister(struct host1x_device *device,
123 struct host1x_subdev *subdev)
124{
125 struct host1x_client *client = subdev->client;
126 int err;
127
128 /*
129 * If all subdevices have been activated, we're about to remove the
130 * first active subdevice, so unload the driver first.
131 */
132 if (list_empty(&device->subdevs)) {
133 err = device->driver->remove(device);
134 if (err < 0)
135 dev_err(&device->dev, "remove failed: %d\n", err);
136 }
137
138 /*
139 * Move the subdevice back to the list of idle subdevices and remove
140 * it from list of clients.
141 */
142 mutex_lock(&device->clients_lock);
143 subdev->client = NULL;
144 client->parent = NULL;
145 list_move_tail(&subdev->list, &device->subdevs);
146 /*
147 * XXX: Perhaps don't do this here, but rather explicitly remove it
148 * when the device is about to be deleted.
149 *
150 * This is somewhat complicated by the fact that this function is
151 * used to remove the subdevice when a client is unregistered but
152 * also when the composite device is about to be removed.
153 */
154 list_del_init(&client->list);
155 mutex_unlock(&device->clients_lock);
156}
157
158static void host1x_subdev_unregister(struct host1x_device *device,
159 struct host1x_subdev *subdev)
160{
161 mutex_lock(&device->subdevs_lock);
162 __host1x_subdev_unregister(device, subdev);
163 mutex_unlock(&device->subdevs_lock);
164}
165
166int host1x_device_init(struct host1x_device *device)
167{
168 struct host1x_client *client;
169 int err;
170
171 mutex_lock(&device->clients_lock);
172
173 list_for_each_entry(client, &device->clients, list) {
174 if (client->ops && client->ops->init) {
175 err = client->ops->init(client);
176 if (err < 0) {
177 dev_err(&device->dev,
178 "failed to initialize %s: %d\n",
179 dev_name(client->dev), err);
180 mutex_unlock(&device->clients_lock);
181 return err;
182 }
183 }
184 }
185
186 mutex_unlock(&device->clients_lock);
187
188 return 0;
189}
190
191int host1x_device_exit(struct host1x_device *device)
192{
193 struct host1x_client *client;
194 int err;
195
196 mutex_lock(&device->clients_lock);
197
198 list_for_each_entry_reverse(client, &device->clients, list) {
199 if (client->ops && client->ops->exit) {
200 err = client->ops->exit(client);
201 if (err < 0) {
202 dev_err(&device->dev,
203 "failed to cleanup %s: %d\n",
204 dev_name(client->dev), err);
205 mutex_unlock(&device->clients_lock);
206 return err;
207 }
208 }
209 }
210
211 mutex_unlock(&device->clients_lock);
212
213 return 0;
214}
215
216static int host1x_register_client(struct host1x *host1x,
217 struct host1x_client *client)
218{
219 struct host1x_device *device;
220 struct host1x_subdev *subdev;
221
222 mutex_lock(&host1x->devices_lock);
223
224 list_for_each_entry(device, &host1x->devices, list) {
225 list_for_each_entry(subdev, &device->subdevs, list) {
226 if (subdev->np == client->dev->of_node) {
227 host1x_subdev_register(device, subdev, client);
228 mutex_unlock(&host1x->devices_lock);
229 return 0;
230 }
231 }
232 }
233
234 mutex_unlock(&host1x->devices_lock);
235 return -ENODEV;
236}
237
238static int host1x_unregister_client(struct host1x *host1x,
239 struct host1x_client *client)
240{
241 struct host1x_device *device, *dt;
242 struct host1x_subdev *subdev;
243
244 mutex_lock(&host1x->devices_lock);
245
246 list_for_each_entry_safe(device, dt, &host1x->devices, list) {
247 list_for_each_entry(subdev, &device->active, list) {
248 if (subdev->client == client) {
249 host1x_subdev_unregister(device, subdev);
250 mutex_unlock(&host1x->devices_lock);
251 return 0;
252 }
253 }
254 }
255
256 mutex_unlock(&host1x->devices_lock);
257 return -ENODEV;
258}
259
260struct bus_type host1x_bus_type = {
261 .name = "host1x",
262};
263
264int host1x_bus_init(void)
265{
266 return bus_register(&host1x_bus_type);
267}
268
269void host1x_bus_exit(void)
270{
271 bus_unregister(&host1x_bus_type);
272}
273
274static void host1x_device_release(struct device *dev)
275{
276 struct host1x_device *device = to_host1x_device(dev);
277
278 kfree(device);
279}
280
281static int host1x_device_add(struct host1x *host1x,
282 struct host1x_driver *driver)
283{
284 struct host1x_client *client, *tmp;
285 struct host1x_subdev *subdev;
286 struct host1x_device *device;
287 int err;
288
289 device = kzalloc(sizeof(*device), GFP_KERNEL);
290 if (!device)
291 return -ENOMEM;
292
293 mutex_init(&device->subdevs_lock);
294 INIT_LIST_HEAD(&device->subdevs);
295 INIT_LIST_HEAD(&device->active);
296 mutex_init(&device->clients_lock);
297 INIT_LIST_HEAD(&device->clients);
298 INIT_LIST_HEAD(&device->list);
299 device->driver = driver;
300
301 device->dev.coherent_dma_mask = host1x->dev->coherent_dma_mask;
302 device->dev.dma_mask = &device->dev.coherent_dma_mask;
303 device->dev.release = host1x_device_release;
304 dev_set_name(&device->dev, driver->name);
305 device->dev.bus = &host1x_bus_type;
306 device->dev.parent = host1x->dev;
307
308 err = device_register(&device->dev);
309 if (err < 0)
310 return err;
311
312 err = host1x_device_parse_dt(device);
313 if (err < 0) {
314 device_unregister(&device->dev);
315 return err;
316 }
317
318 mutex_lock(&host1x->devices_lock);
319 list_add_tail(&device->list, &host1x->devices);
320 mutex_unlock(&host1x->devices_lock);
321
322 mutex_lock(&clients_lock);
323
324 list_for_each_entry_safe(client, tmp, &clients, list) {
325 list_for_each_entry(subdev, &device->subdevs, list) {
326 if (subdev->np == client->dev->of_node) {
327 host1x_subdev_register(device, subdev, client);
328 break;
329 }
330 }
331 }
332
333 mutex_unlock(&clients_lock);
334
335 return 0;
336}
337
338/*
339 * Removes a device by first unregistering any subdevices and then removing
340 * itself from the list of devices.
341 *
342 * This function must be called with the host1x->devices_lock held.
343 */
344static void host1x_device_del(struct host1x *host1x,
345 struct host1x_device *device)
346{
347 struct host1x_subdev *subdev, *sd;
348 struct host1x_client *client, *cl;
349
350 mutex_lock(&device->subdevs_lock);
351
352 /* unregister subdevices */
353 list_for_each_entry_safe(subdev, sd, &device->active, list) {
354 /*
355 * host1x_subdev_unregister() will remove the client from
356 * any lists, so we'll need to manually add it back to the
357 * list of idle clients.
358 *
359 * XXX: Alternatively, perhaps don't remove the client from
360 * any lists in host1x_subdev_unregister() and instead do
361 * that explicitly from host1x_unregister_client()?
362 */
363 client = subdev->client;
364
365 __host1x_subdev_unregister(device, subdev);
366
367 /* add the client to the list of idle clients */
368 mutex_lock(&clients_lock);
369 list_add_tail(&client->list, &clients);
370 mutex_unlock(&clients_lock);
371 }
372
373 /* remove subdevices */
374 list_for_each_entry_safe(subdev, sd, &device->subdevs, list)
375 host1x_subdev_del(subdev);
376
377 mutex_unlock(&device->subdevs_lock);
378
379 /* move clients to idle list */
380 mutex_lock(&clients_lock);
381 mutex_lock(&device->clients_lock);
382
383 list_for_each_entry_safe(client, cl, &device->clients, list)
384 list_move_tail(&client->list, &clients);
385
386 mutex_unlock(&device->clients_lock);
387 mutex_unlock(&clients_lock);
388
389 /* finally remove the device */
390 list_del_init(&device->list);
391 device_unregister(&device->dev);
392}
393
394static void host1x_attach_driver(struct host1x *host1x,
395 struct host1x_driver *driver)
396{
397 struct host1x_device *device;
398 int err;
399
400 mutex_lock(&host1x->devices_lock);
401
402 list_for_each_entry(device, &host1x->devices, list) {
403 if (device->driver == driver) {
404 mutex_unlock(&host1x->devices_lock);
405 return;
406 }
407 }
408
409 mutex_unlock(&host1x->devices_lock);
410
411 err = host1x_device_add(host1x, driver);
412 if (err < 0)
413 dev_err(host1x->dev, "failed to allocate device: %d\n", err);
414}
415
416static void host1x_detach_driver(struct host1x *host1x,
417 struct host1x_driver *driver)
418{
419 struct host1x_device *device, *tmp;
420
421 mutex_lock(&host1x->devices_lock);
422
423 list_for_each_entry_safe(device, tmp, &host1x->devices, list)
424 if (device->driver == driver)
425 host1x_device_del(host1x, device);
426
427 mutex_unlock(&host1x->devices_lock);
428}
429
430int host1x_register(struct host1x *host1x)
431{
432 struct host1x_driver *driver;
433
434 mutex_lock(&devices_lock);
435 list_add_tail(&host1x->list, &devices);
436 mutex_unlock(&devices_lock);
437
438 mutex_lock(&drivers_lock);
439
440 list_for_each_entry(driver, &drivers, list)
441 host1x_attach_driver(host1x, driver);
442
443 mutex_unlock(&drivers_lock);
444
445 return 0;
446}
447
448int host1x_unregister(struct host1x *host1x)
449{
450 struct host1x_driver *driver;
451
452 mutex_lock(&drivers_lock);
453
454 list_for_each_entry(driver, &drivers, list)
455 host1x_detach_driver(host1x, driver);
456
457 mutex_unlock(&drivers_lock);
458
459 mutex_lock(&devices_lock);
460 list_del_init(&host1x->list);
461 mutex_unlock(&devices_lock);
462
463 return 0;
464}
465
466int host1x_driver_register(struct host1x_driver *driver)
467{
468 struct host1x *host1x;
469
470 INIT_LIST_HEAD(&driver->list);
471
472 mutex_lock(&drivers_lock);
473 list_add_tail(&driver->list, &drivers);
474 mutex_unlock(&drivers_lock);
475
476 mutex_lock(&devices_lock);
477
478 list_for_each_entry(host1x, &devices, list)
479 host1x_attach_driver(host1x, driver);
480
481 mutex_unlock(&devices_lock);
482
483 return 0;
484}
485EXPORT_SYMBOL(host1x_driver_register);
486
487void host1x_driver_unregister(struct host1x_driver *driver)
488{
489 mutex_lock(&drivers_lock);
490 list_del_init(&driver->list);
491 mutex_unlock(&drivers_lock);
492}
493EXPORT_SYMBOL(host1x_driver_unregister);
494
495int host1x_client_register(struct host1x_client *client)
496{
497 struct host1x *host1x;
498 int err;
499
500 mutex_lock(&devices_lock);
501
502 list_for_each_entry(host1x, &devices, list) {
503 err = host1x_register_client(host1x, client);
504 if (!err) {
505 mutex_unlock(&devices_lock);
506 return 0;
507 }
508 }
509
510 mutex_unlock(&devices_lock);
511
512 mutex_lock(&clients_lock);
513 list_add_tail(&client->list, &clients);
514 mutex_unlock(&clients_lock);
515
516 return 0;
517}
518EXPORT_SYMBOL(host1x_client_register);
519
520int host1x_client_unregister(struct host1x_client *client)
521{
522 struct host1x_client *c;
523 struct host1x *host1x;
524 int err;
525
526 mutex_lock(&devices_lock);
527
528 list_for_each_entry(host1x, &devices, list) {
529 err = host1x_unregister_client(host1x, client);
530 if (!err) {
531 mutex_unlock(&devices_lock);
532 return 0;
533 }
534 }
535
536 mutex_unlock(&devices_lock);
537 mutex_lock(&clients_lock);
538
539 list_for_each_entry(c, &clients, list) {
540 if (c == client) {
541 list_del_init(&c->list);
542 break;
543 }
544 }
545
546 mutex_unlock(&clients_lock);
547
548 return 0;
549}
550EXPORT_SYMBOL(host1x_client_unregister);
diff --git a/drivers/gpu/host1x/host1x_client.h b/drivers/gpu/host1x/bus.h
index 9b85f10f4a44..4099e99212c8 100644
--- a/drivers/gpu/host1x/host1x_client.h
+++ b/drivers/gpu/host1x/bus.h
@@ -1,5 +1,6 @@
1/* 1/*
2 * Copyright (c) 2013, NVIDIA Corporation. 2 * Copyright (C) 2012 Avionic Design GmbH
3 * Copyright (C) 2012-2013, NVIDIA Corporation
3 * 4 *
4 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License, 6 * under the terms and conditions of the GNU General Public License,
@@ -14,22 +15,15 @@
14 * along with this program. If not, see <http://www.gnu.org/licenses/>. 15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */ 16 */
16 17
17#ifndef HOST1X_CLIENT_H 18#ifndef HOST1X_BUS_H
18#define HOST1X_CLIENT_H 19#define HOST1X_BUS_H
19 20
20struct device; 21struct host1x;
21struct platform_device;
22 22
23#ifdef CONFIG_DRM_TEGRA 23int host1x_bus_init(void);
24int host1x_drm_alloc(struct platform_device *pdev); 24void host1x_bus_exit(void);
25#else
26static inline int host1x_drm_alloc(struct platform_device *pdev)
27{
28 return 0;
29}
30#endif
31 25
32void host1x_set_drm_data(struct device *dev, void *data); 26int host1x_register(struct host1x *host1x);
33void *host1x_get_drm_data(struct device *dev); 27int host1x_unregister(struct host1x *host1x);
34 28
35#endif 29#endif
diff --git a/drivers/gpu/host1x/cdma.c b/drivers/gpu/host1x/cdma.c
index de72172d3b5f..3995255b16c7 100644
--- a/drivers/gpu/host1x/cdma.c
+++ b/drivers/gpu/host1x/cdma.c
@@ -20,6 +20,7 @@
20#include <asm/cacheflush.h> 20#include <asm/cacheflush.h>
21#include <linux/device.h> 21#include <linux/device.h>
22#include <linux/dma-mapping.h> 22#include <linux/dma-mapping.h>
23#include <linux/host1x.h>
23#include <linux/interrupt.h> 24#include <linux/interrupt.h>
24#include <linux/kernel.h> 25#include <linux/kernel.h>
25#include <linux/kfifo.h> 26#include <linux/kfifo.h>
@@ -30,7 +31,6 @@
30#include "channel.h" 31#include "channel.h"
31#include "dev.h" 32#include "dev.h"
32#include "debug.h" 33#include "debug.h"
33#include "host1x_bo.h"
34#include "job.h" 34#include "job.h"
35 35
36/* 36/*
diff --git a/drivers/gpu/host1x/channel.h b/drivers/gpu/host1x/channel.h
index 48723b8eea42..df767cf90d51 100644
--- a/drivers/gpu/host1x/channel.h
+++ b/drivers/gpu/host1x/channel.h
@@ -40,12 +40,6 @@ struct host1x_channel {
40/* channel list operations */ 40/* channel list operations */
41int host1x_channel_list_init(struct host1x *host); 41int host1x_channel_list_init(struct host1x *host);
42 42
43struct host1x_channel *host1x_channel_request(struct device *dev);
44void host1x_channel_free(struct host1x_channel *channel);
45struct host1x_channel *host1x_channel_get(struct host1x_channel *channel);
46void host1x_channel_put(struct host1x_channel *channel);
47int host1x_job_submit(struct host1x_job *job);
48
49#define host1x_for_each_channel(host, channel) \ 43#define host1x_for_each_channel(host, channel) \
50 list_for_each_entry(channel, &host->chlist.list, list) 44 list_for_each_entry(channel, &host->chlist.list, list)
51 45
diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c
index 471630299878..80da003d63de 100644
--- a/drivers/gpu/host1x/dev.c
+++ b/drivers/gpu/host1x/dev.c
@@ -27,24 +27,13 @@
27#define CREATE_TRACE_POINTS 27#define CREATE_TRACE_POINTS
28#include <trace/events/host1x.h> 28#include <trace/events/host1x.h>
29 29
30#include "bus.h"
30#include "dev.h" 31#include "dev.h"
31#include "intr.h" 32#include "intr.h"
32#include "channel.h" 33#include "channel.h"
33#include "debug.h" 34#include "debug.h"
34#include "hw/host1x01.h" 35#include "hw/host1x01.h"
35#include "host1x_client.h" 36#include "hw/host1x02.h"
36
37void host1x_set_drm_data(struct device *dev, void *data)
38{
39 struct host1x *host1x = dev_get_drvdata(dev);
40 host1x->drm_data = data;
41}
42
43void *host1x_get_drm_data(struct device *dev)
44{
45 struct host1x *host1x = dev_get_drvdata(dev);
46 return host1x ? host1x->drm_data : NULL;
47}
48 37
49void host1x_sync_writel(struct host1x *host1x, u32 v, u32 r) 38void host1x_sync_writel(struct host1x *host1x, u32 v, u32 r)
50{ 39{
@@ -79,7 +68,17 @@ static const struct host1x_info host1x01_info = {
79 .sync_offset = 0x3000, 68 .sync_offset = 0x3000,
80}; 69};
81 70
71static const struct host1x_info host1x02_info = {
72 .nb_channels = 9,
73 .nb_pts = 32,
74 .nb_mlocks = 16,
75 .nb_bases = 12,
76 .init = host1x02_init,
77 .sync_offset = 0x3000,
78};
79
82static struct of_device_id host1x_of_match[] = { 80static struct of_device_id host1x_of_match[] = {
81 { .compatible = "nvidia,tegra114-host1x", .data = &host1x02_info, },
83 { .compatible = "nvidia,tegra30-host1x", .data = &host1x01_info, }, 82 { .compatible = "nvidia,tegra30-host1x", .data = &host1x01_info, },
84 { .compatible = "nvidia,tegra20-host1x", .data = &host1x01_info, }, 83 { .compatible = "nvidia,tegra20-host1x", .data = &host1x01_info, },
85 { }, 84 { },
@@ -114,6 +113,9 @@ static int host1x_probe(struct platform_device *pdev)
114 if (!host) 113 if (!host)
115 return -ENOMEM; 114 return -ENOMEM;
116 115
116 mutex_init(&host->devices_lock);
117 INIT_LIST_HEAD(&host->devices);
118 INIT_LIST_HEAD(&host->list);
117 host->dev = &pdev->dev; 119 host->dev = &pdev->dev;
118 host->info = id->data; 120 host->info = id->data;
119 121
@@ -152,7 +154,7 @@ static int host1x_probe(struct platform_device *pdev)
152 err = host1x_syncpt_init(host); 154 err = host1x_syncpt_init(host);
153 if (err) { 155 if (err) {
154 dev_err(&pdev->dev, "failed to initialize syncpts\n"); 156 dev_err(&pdev->dev, "failed to initialize syncpts\n");
155 return err; 157 goto fail_unprepare_disable;
156 } 158 }
157 159
158 err = host1x_intr_init(host, syncpt_irq); 160 err = host1x_intr_init(host, syncpt_irq);
@@ -163,19 +165,26 @@ static int host1x_probe(struct platform_device *pdev)
163 165
164 host1x_debug_init(host); 166 host1x_debug_init(host);
165 167
166 host1x_drm_alloc(pdev); 168 err = host1x_register(host);
169 if (err < 0)
170 goto fail_deinit_intr;
167 171
168 return 0; 172 return 0;
169 173
174fail_deinit_intr:
175 host1x_intr_deinit(host);
170fail_deinit_syncpt: 176fail_deinit_syncpt:
171 host1x_syncpt_deinit(host); 177 host1x_syncpt_deinit(host);
178fail_unprepare_disable:
179 clk_disable_unprepare(host->clk);
172 return err; 180 return err;
173} 181}
174 182
175static int __exit host1x_remove(struct platform_device *pdev) 183static int host1x_remove(struct platform_device *pdev)
176{ 184{
177 struct host1x *host = platform_get_drvdata(pdev); 185 struct host1x *host = platform_get_drvdata(pdev);
178 186
187 host1x_unregister(host);
179 host1x_intr_deinit(host); 188 host1x_intr_deinit(host);
180 host1x_syncpt_deinit(host); 189 host1x_syncpt_deinit(host);
181 clk_disable_unprepare(host->clk); 190 clk_disable_unprepare(host->clk);
@@ -184,59 +193,36 @@ static int __exit host1x_remove(struct platform_device *pdev)
184} 193}
185 194
186static struct platform_driver tegra_host1x_driver = { 195static struct platform_driver tegra_host1x_driver = {
187 .probe = host1x_probe,
188 .remove = __exit_p(host1x_remove),
189 .driver = { 196 .driver = {
190 .owner = THIS_MODULE,
191 .name = "tegra-host1x", 197 .name = "tegra-host1x",
192 .of_match_table = host1x_of_match, 198 .of_match_table = host1x_of_match,
193 }, 199 },
200 .probe = host1x_probe,
201 .remove = host1x_remove,
194}; 202};
195 203
196static int __init tegra_host1x_init(void) 204static int __init tegra_host1x_init(void)
197{ 205{
198 int err; 206 int err;
199 207
200 err = platform_driver_register(&tegra_host1x_driver); 208 err = host1x_bus_init();
201 if (err < 0) 209 if (err < 0)
202 return err; 210 return err;
203 211
204#ifdef CONFIG_DRM_TEGRA 212 err = platform_driver_register(&tegra_host1x_driver);
205 err = platform_driver_register(&tegra_dc_driver); 213 if (err < 0) {
206 if (err < 0) 214 host1x_bus_exit();
207 goto unregister_host1x; 215 return err;
208 216 }
209 err = platform_driver_register(&tegra_hdmi_driver);
210 if (err < 0)
211 goto unregister_dc;
212
213 err = platform_driver_register(&tegra_gr2d_driver);
214 if (err < 0)
215 goto unregister_hdmi;
216#endif
217 217
218 return 0; 218 return 0;
219
220#ifdef CONFIG_DRM_TEGRA
221unregister_hdmi:
222 platform_driver_unregister(&tegra_hdmi_driver);
223unregister_dc:
224 platform_driver_unregister(&tegra_dc_driver);
225unregister_host1x:
226 platform_driver_unregister(&tegra_host1x_driver);
227 return err;
228#endif
229} 219}
230module_init(tegra_host1x_init); 220module_init(tegra_host1x_init);
231 221
232static void __exit tegra_host1x_exit(void) 222static void __exit tegra_host1x_exit(void)
233{ 223{
234#ifdef CONFIG_DRM_TEGRA
235 platform_driver_unregister(&tegra_gr2d_driver);
236 platform_driver_unregister(&tegra_hdmi_driver);
237 platform_driver_unregister(&tegra_dc_driver);
238#endif
239 platform_driver_unregister(&tegra_host1x_driver); 224 platform_driver_unregister(&tegra_host1x_driver);
225 host1x_bus_exit();
240} 226}
241module_exit(tegra_host1x_exit); 227module_exit(tegra_host1x_exit);
242 228
diff --git a/drivers/gpu/host1x/dev.h b/drivers/gpu/host1x/dev.h
index bed90a8131be..a61a976e7a42 100644
--- a/drivers/gpu/host1x/dev.h
+++ b/drivers/gpu/host1x/dev.h
@@ -27,6 +27,7 @@
27#include "job.h" 27#include "job.h"
28 28
29struct host1x_syncpt; 29struct host1x_syncpt;
30struct host1x_syncpt_base;
30struct host1x_channel; 31struct host1x_channel;
31struct host1x_cdma; 32struct host1x_cdma;
32struct host1x_job; 33struct host1x_job;
@@ -102,6 +103,7 @@ struct host1x {
102 103
103 void __iomem *regs; 104 void __iomem *regs;
104 struct host1x_syncpt *syncpt; 105 struct host1x_syncpt *syncpt;
106 struct host1x_syncpt_base *bases;
105 struct device *dev; 107 struct device *dev;
106 struct clk *clk; 108 struct clk *clk;
107 109
@@ -125,7 +127,10 @@ struct host1x {
125 127
126 struct dentry *debugfs; 128 struct dentry *debugfs;
127 129
128 void *drm_data; 130 struct mutex devices_lock;
131 struct list_head devices;
132
133 struct list_head list;
129}; 134};
130 135
131void host1x_sync_writel(struct host1x *host1x, u32 r, u32 v); 136void host1x_sync_writel(struct host1x *host1x, u32 r, u32 v);
@@ -301,8 +306,4 @@ static inline void host1x_hw_show_mlocks(struct host1x *host, struct output *o)
301 host->debug_op->show_mlocks(host, o); 306 host->debug_op->show_mlocks(host, o);
302} 307}
303 308
304extern struct platform_driver tegra_dc_driver;
305extern struct platform_driver tegra_hdmi_driver;
306extern struct platform_driver tegra_gr2d_driver;
307
308#endif 309#endif
diff --git a/drivers/gpu/host1x/drm/drm.c b/drivers/gpu/host1x/drm/drm.c
deleted file mode 100644
index 8c61ceeaa12d..000000000000
--- a/drivers/gpu/host1x/drm/drm.c
+++ /dev/null
@@ -1,647 +0,0 @@
1/*
2 * Copyright (C) 2012 Avionic Design GmbH
3 * Copyright (C) 2012-2013 NVIDIA CORPORATION. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9
10#include <linux/module.h>
11#include <linux/of_address.h>
12#include <linux/of_platform.h>
13
14#include <linux/dma-mapping.h>
15#include <asm/dma-iommu.h>
16
17#include <drm/drm.h>
18#include <drm/drmP.h>
19
20#include "host1x_client.h"
21#include "dev.h"
22#include "drm.h"
23#include "gem.h"
24#include "syncpt.h"
25
26#define DRIVER_NAME "tegra"
27#define DRIVER_DESC "NVIDIA Tegra graphics"
28#define DRIVER_DATE "20120330"
29#define DRIVER_MAJOR 0
30#define DRIVER_MINOR 0
31#define DRIVER_PATCHLEVEL 0
32
33struct host1x_drm_client {
34 struct host1x_client *client;
35 struct device_node *np;
36 struct list_head list;
37};
38
39static int host1x_add_drm_client(struct host1x_drm *host1x,
40 struct device_node *np)
41{
42 struct host1x_drm_client *client;
43
44 client = kzalloc(sizeof(*client), GFP_KERNEL);
45 if (!client)
46 return -ENOMEM;
47
48 INIT_LIST_HEAD(&client->list);
49 client->np = of_node_get(np);
50
51 list_add_tail(&client->list, &host1x->drm_clients);
52
53 return 0;
54}
55
56static int host1x_activate_drm_client(struct host1x_drm *host1x,
57 struct host1x_drm_client *drm,
58 struct host1x_client *client)
59{
60 mutex_lock(&host1x->drm_clients_lock);
61 list_del_init(&drm->list);
62 list_add_tail(&drm->list, &host1x->drm_active);
63 drm->client = client;
64 mutex_unlock(&host1x->drm_clients_lock);
65
66 return 0;
67}
68
69static int host1x_remove_drm_client(struct host1x_drm *host1x,
70 struct host1x_drm_client *client)
71{
72 mutex_lock(&host1x->drm_clients_lock);
73 list_del_init(&client->list);
74 mutex_unlock(&host1x->drm_clients_lock);
75
76 of_node_put(client->np);
77 kfree(client);
78
79 return 0;
80}
81
82static int host1x_parse_dt(struct host1x_drm *host1x)
83{
84 static const char * const compat[] = {
85 "nvidia,tegra20-dc",
86 "nvidia,tegra20-hdmi",
87 "nvidia,tegra20-gr2d",
88 "nvidia,tegra30-dc",
89 "nvidia,tegra30-hdmi",
90 "nvidia,tegra30-gr2d",
91 };
92 unsigned int i;
93 int err;
94
95 for (i = 0; i < ARRAY_SIZE(compat); i++) {
96 struct device_node *np;
97
98 for_each_child_of_node(host1x->dev->of_node, np) {
99 if (of_device_is_compatible(np, compat[i]) &&
100 of_device_is_available(np)) {
101 err = host1x_add_drm_client(host1x, np);
102 if (err < 0)
103 return err;
104 }
105 }
106 }
107
108 return 0;
109}
110
111int host1x_drm_alloc(struct platform_device *pdev)
112{
113 struct host1x_drm *host1x;
114 int err;
115
116 host1x = devm_kzalloc(&pdev->dev, sizeof(*host1x), GFP_KERNEL);
117 if (!host1x)
118 return -ENOMEM;
119
120 mutex_init(&host1x->drm_clients_lock);
121 INIT_LIST_HEAD(&host1x->drm_clients);
122 INIT_LIST_HEAD(&host1x->drm_active);
123 mutex_init(&host1x->clients_lock);
124 INIT_LIST_HEAD(&host1x->clients);
125 host1x->dev = &pdev->dev;
126
127 err = host1x_parse_dt(host1x);
128 if (err < 0) {
129 dev_err(&pdev->dev, "failed to parse DT: %d\n", err);
130 return err;
131 }
132
133 host1x_set_drm_data(&pdev->dev, host1x);
134
135 return 0;
136}
137
138int host1x_drm_init(struct host1x_drm *host1x, struct drm_device *drm)
139{
140 struct host1x_client *client;
141
142 mutex_lock(&host1x->clients_lock);
143
144 list_for_each_entry(client, &host1x->clients, list) {
145 if (client->ops && client->ops->drm_init) {
146 int err = client->ops->drm_init(client, drm);
147 if (err < 0) {
148 dev_err(host1x->dev,
149 "DRM setup failed for %s: %d\n",
150 dev_name(client->dev), err);
151 mutex_unlock(&host1x->clients_lock);
152 return err;
153 }
154 }
155 }
156
157 mutex_unlock(&host1x->clients_lock);
158
159 return 0;
160}
161
162int host1x_drm_exit(struct host1x_drm *host1x)
163{
164 struct platform_device *pdev = to_platform_device(host1x->dev);
165 struct host1x_client *client;
166
167 if (!host1x->drm)
168 return 0;
169
170 mutex_lock(&host1x->clients_lock);
171
172 list_for_each_entry_reverse(client, &host1x->clients, list) {
173 if (client->ops && client->ops->drm_exit) {
174 int err = client->ops->drm_exit(client);
175 if (err < 0) {
176 dev_err(host1x->dev,
177 "DRM cleanup failed for %s: %d\n",
178 dev_name(client->dev), err);
179 mutex_unlock(&host1x->clients_lock);
180 return err;
181 }
182 }
183 }
184
185 mutex_unlock(&host1x->clients_lock);
186
187 drm_platform_exit(&tegra_drm_driver, pdev);
188 host1x->drm = NULL;
189
190 return 0;
191}
192
193int host1x_register_client(struct host1x_drm *host1x,
194 struct host1x_client *client)
195{
196 struct host1x_drm_client *drm, *tmp;
197 int err;
198
199 mutex_lock(&host1x->clients_lock);
200 list_add_tail(&client->list, &host1x->clients);
201 mutex_unlock(&host1x->clients_lock);
202
203 list_for_each_entry_safe(drm, tmp, &host1x->drm_clients, list)
204 if (drm->np == client->dev->of_node)
205 host1x_activate_drm_client(host1x, drm, client);
206
207 if (list_empty(&host1x->drm_clients)) {
208 struct platform_device *pdev = to_platform_device(host1x->dev);
209
210 err = drm_platform_init(&tegra_drm_driver, pdev);
211 if (err < 0) {
212 dev_err(host1x->dev, "drm_platform_init(): %d\n", err);
213 return err;
214 }
215 }
216
217 return 0;
218}
219
220int host1x_unregister_client(struct host1x_drm *host1x,
221 struct host1x_client *client)
222{
223 struct host1x_drm_client *drm, *tmp;
224 int err;
225
226 list_for_each_entry_safe(drm, tmp, &host1x->drm_active, list) {
227 if (drm->client == client) {
228 err = host1x_drm_exit(host1x);
229 if (err < 0) {
230 dev_err(host1x->dev, "host1x_drm_exit(): %d\n",
231 err);
232 return err;
233 }
234
235 host1x_remove_drm_client(host1x, drm);
236 break;
237 }
238 }
239
240 mutex_lock(&host1x->clients_lock);
241 list_del_init(&client->list);
242 mutex_unlock(&host1x->clients_lock);
243
244 return 0;
245}
246
247static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
248{
249 struct host1x_drm *host1x;
250 int err;
251
252 host1x = host1x_get_drm_data(drm->dev);
253 drm->dev_private = host1x;
254 host1x->drm = drm;
255
256 drm_mode_config_init(drm);
257
258 err = host1x_drm_init(host1x, drm);
259 if (err < 0)
260 return err;
261
262 /*
263 * We don't use the drm_irq_install() helpers provided by the DRM
264 * core, so we need to set this manually in order to allow the
265 * DRM_IOCTL_WAIT_VBLANK to operate correctly.
266 */
267 drm->irq_enabled = 1;
268
269 err = drm_vblank_init(drm, drm->mode_config.num_crtc);
270 if (err < 0)
271 return err;
272
273 err = tegra_drm_fb_init(drm);
274 if (err < 0)
275 return err;
276
277 drm_kms_helper_poll_init(drm);
278
279 return 0;
280}
281
282static int tegra_drm_unload(struct drm_device *drm)
283{
284 drm_kms_helper_poll_fini(drm);
285 tegra_drm_fb_exit(drm);
286
287 drm_mode_config_cleanup(drm);
288
289 return 0;
290}
291
292static int tegra_drm_open(struct drm_device *drm, struct drm_file *filp)
293{
294 struct host1x_drm_file *fpriv;
295
296 fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
297 if (!fpriv)
298 return -ENOMEM;
299
300 INIT_LIST_HEAD(&fpriv->contexts);
301 filp->driver_priv = fpriv;
302
303 return 0;
304}
305
306static void host1x_drm_context_free(struct host1x_drm_context *context)
307{
308 context->client->ops->close_channel(context);
309 kfree(context);
310}
311
312static void tegra_drm_lastclose(struct drm_device *drm)
313{
314 struct host1x_drm *host1x = drm->dev_private;
315
316 tegra_fbdev_restore_mode(host1x->fbdev);
317}
318
319#ifdef CONFIG_DRM_TEGRA_STAGING
320static bool host1x_drm_file_owns_context(struct host1x_drm_file *file,
321 struct host1x_drm_context *context)
322{
323 struct host1x_drm_context *ctx;
324
325 list_for_each_entry(ctx, &file->contexts, list)
326 if (ctx == context)
327 return true;
328
329 return false;
330}
331
332static int tegra_gem_create(struct drm_device *drm, void *data,
333 struct drm_file *file)
334{
335 struct drm_tegra_gem_create *args = data;
336 struct tegra_bo *bo;
337
338 bo = tegra_bo_create_with_handle(file, drm, args->size,
339 &args->handle);
340 if (IS_ERR(bo))
341 return PTR_ERR(bo);
342
343 return 0;
344}
345
346static int tegra_gem_mmap(struct drm_device *drm, void *data,
347 struct drm_file *file)
348{
349 struct drm_tegra_gem_mmap *args = data;
350 struct drm_gem_object *gem;
351 struct tegra_bo *bo;
352
353 gem = drm_gem_object_lookup(drm, file, args->handle);
354 if (!gem)
355 return -EINVAL;
356
357 bo = to_tegra_bo(gem);
358
359 args->offset = drm_vma_node_offset_addr(&bo->gem.vma_node);
360
361 drm_gem_object_unreference(gem);
362
363 return 0;
364}
365
366static int tegra_syncpt_read(struct drm_device *drm, void *data,
367 struct drm_file *file)
368{
369 struct drm_tegra_syncpt_read *args = data;
370 struct host1x *host = dev_get_drvdata(drm->dev);
371 struct host1x_syncpt *sp = host1x_syncpt_get(host, args->id);
372
373 if (!sp)
374 return -EINVAL;
375
376 args->value = host1x_syncpt_read_min(sp);
377 return 0;
378}
379
380static int tegra_syncpt_incr(struct drm_device *drm, void *data,
381 struct drm_file *file)
382{
383 struct drm_tegra_syncpt_incr *args = data;
384 struct host1x *host = dev_get_drvdata(drm->dev);
385 struct host1x_syncpt *sp = host1x_syncpt_get(host, args->id);
386
387 if (!sp)
388 return -EINVAL;
389
390 return host1x_syncpt_incr(sp);
391}
392
393static int tegra_syncpt_wait(struct drm_device *drm, void *data,
394 struct drm_file *file)
395{
396 struct drm_tegra_syncpt_wait *args = data;
397 struct host1x *host = dev_get_drvdata(drm->dev);
398 struct host1x_syncpt *sp = host1x_syncpt_get(host, args->id);
399
400 if (!sp)
401 return -EINVAL;
402
403 return host1x_syncpt_wait(sp, args->thresh, args->timeout,
404 &args->value);
405}
406
407static int tegra_open_channel(struct drm_device *drm, void *data,
408 struct drm_file *file)
409{
410 struct drm_tegra_open_channel *args = data;
411 struct host1x_client *client;
412 struct host1x_drm_context *context;
413 struct host1x_drm_file *fpriv = file->driver_priv;
414 struct host1x_drm *host1x = drm->dev_private;
415 int err = -ENODEV;
416
417 context = kzalloc(sizeof(*context), GFP_KERNEL);
418 if (!context)
419 return -ENOMEM;
420
421 list_for_each_entry(client, &host1x->clients, list)
422 if (client->class == args->client) {
423 err = client->ops->open_channel(client, context);
424 if (err)
425 break;
426
427 context->client = client;
428 list_add(&context->list, &fpriv->contexts);
429 args->context = (uintptr_t)context;
430 return 0;
431 }
432
433 kfree(context);
434 return err;
435}
436
437static int tegra_close_channel(struct drm_device *drm, void *data,
438 struct drm_file *file)
439{
440 struct drm_tegra_close_channel *args = data;
441 struct host1x_drm_file *fpriv = file->driver_priv;
442 struct host1x_drm_context *context =
443 (struct host1x_drm_context *)(uintptr_t)args->context;
444
445 if (!host1x_drm_file_owns_context(fpriv, context))
446 return -EINVAL;
447
448 list_del(&context->list);
449 host1x_drm_context_free(context);
450
451 return 0;
452}
453
454static int tegra_get_syncpt(struct drm_device *drm, void *data,
455 struct drm_file *file)
456{
457 struct drm_tegra_get_syncpt *args = data;
458 struct host1x_drm_file *fpriv = file->driver_priv;
459 struct host1x_drm_context *context =
460 (struct host1x_drm_context *)(uintptr_t)args->context;
461 struct host1x_syncpt *syncpt;
462
463 if (!host1x_drm_file_owns_context(fpriv, context))
464 return -ENODEV;
465
466 if (args->index >= context->client->num_syncpts)
467 return -EINVAL;
468
469 syncpt = context->client->syncpts[args->index];
470 args->id = host1x_syncpt_id(syncpt);
471
472 return 0;
473}
474
475static int tegra_submit(struct drm_device *drm, void *data,
476 struct drm_file *file)
477{
478 struct drm_tegra_submit *args = data;
479 struct host1x_drm_file *fpriv = file->driver_priv;
480 struct host1x_drm_context *context =
481 (struct host1x_drm_context *)(uintptr_t)args->context;
482
483 if (!host1x_drm_file_owns_context(fpriv, context))
484 return -ENODEV;
485
486 return context->client->ops->submit(context, args, drm, file);
487}
488#endif
489
490static const struct drm_ioctl_desc tegra_drm_ioctls[] = {
491#ifdef CONFIG_DRM_TEGRA_STAGING
492 DRM_IOCTL_DEF_DRV(TEGRA_GEM_CREATE, tegra_gem_create, DRM_UNLOCKED | DRM_AUTH),
493 DRM_IOCTL_DEF_DRV(TEGRA_GEM_MMAP, tegra_gem_mmap, DRM_UNLOCKED),
494 DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_READ, tegra_syncpt_read, DRM_UNLOCKED),
495 DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_INCR, tegra_syncpt_incr, DRM_UNLOCKED),
496 DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_WAIT, tegra_syncpt_wait, DRM_UNLOCKED),
497 DRM_IOCTL_DEF_DRV(TEGRA_OPEN_CHANNEL, tegra_open_channel, DRM_UNLOCKED),
498 DRM_IOCTL_DEF_DRV(TEGRA_CLOSE_CHANNEL, tegra_close_channel, DRM_UNLOCKED),
499 DRM_IOCTL_DEF_DRV(TEGRA_GET_SYNCPT, tegra_get_syncpt, DRM_UNLOCKED),
500 DRM_IOCTL_DEF_DRV(TEGRA_SUBMIT, tegra_submit, DRM_UNLOCKED),
501#endif
502};
503
504static const struct file_operations tegra_drm_fops = {
505 .owner = THIS_MODULE,
506 .open = drm_open,
507 .release = drm_release,
508 .unlocked_ioctl = drm_ioctl,
509 .mmap = tegra_drm_mmap,
510 .poll = drm_poll,
511 .read = drm_read,
512#ifdef CONFIG_COMPAT
513 .compat_ioctl = drm_compat_ioctl,
514#endif
515 .llseek = noop_llseek,
516};
517
518static struct drm_crtc *tegra_crtc_from_pipe(struct drm_device *drm, int pipe)
519{
520 struct drm_crtc *crtc;
521
522 list_for_each_entry(crtc, &drm->mode_config.crtc_list, head) {
523 struct tegra_dc *dc = to_tegra_dc(crtc);
524
525 if (dc->pipe == pipe)
526 return crtc;
527 }
528
529 return NULL;
530}
531
532static u32 tegra_drm_get_vblank_counter(struct drm_device *dev, int crtc)
533{
534 /* TODO: implement real hardware counter using syncpoints */
535 return drm_vblank_count(dev, crtc);
536}
537
538static int tegra_drm_enable_vblank(struct drm_device *drm, int pipe)
539{
540 struct drm_crtc *crtc = tegra_crtc_from_pipe(drm, pipe);
541 struct tegra_dc *dc = to_tegra_dc(crtc);
542
543 if (!crtc)
544 return -ENODEV;
545
546 tegra_dc_enable_vblank(dc);
547
548 return 0;
549}
550
551static void tegra_drm_disable_vblank(struct drm_device *drm, int pipe)
552{
553 struct drm_crtc *crtc = tegra_crtc_from_pipe(drm, pipe);
554 struct tegra_dc *dc = to_tegra_dc(crtc);
555
556 if (crtc)
557 tegra_dc_disable_vblank(dc);
558}
559
560static void tegra_drm_preclose(struct drm_device *drm, struct drm_file *file)
561{
562 struct host1x_drm_file *fpriv = file->driver_priv;
563 struct host1x_drm_context *context, *tmp;
564 struct drm_crtc *crtc;
565
566 list_for_each_entry(crtc, &drm->mode_config.crtc_list, head)
567 tegra_dc_cancel_page_flip(crtc, file);
568
569 list_for_each_entry_safe(context, tmp, &fpriv->contexts, list)
570 host1x_drm_context_free(context);
571
572 kfree(fpriv);
573}
574
575#ifdef CONFIG_DEBUG_FS
576static int tegra_debugfs_framebuffers(struct seq_file *s, void *data)
577{
578 struct drm_info_node *node = (struct drm_info_node *)s->private;
579 struct drm_device *drm = node->minor->dev;
580 struct drm_framebuffer *fb;
581
582 mutex_lock(&drm->mode_config.fb_lock);
583
584 list_for_each_entry(fb, &drm->mode_config.fb_list, head) {
585 seq_printf(s, "%3d: user size: %d x %d, depth %d, %d bpp, refcount %d\n",
586 fb->base.id, fb->width, fb->height, fb->depth,
587 fb->bits_per_pixel,
588 atomic_read(&fb->refcount.refcount));
589 }
590
591 mutex_unlock(&drm->mode_config.fb_lock);
592
593 return 0;
594}
595
596static struct drm_info_list tegra_debugfs_list[] = {
597 { "framebuffers", tegra_debugfs_framebuffers, 0 },
598};
599
600static int tegra_debugfs_init(struct drm_minor *minor)
601{
602 return drm_debugfs_create_files(tegra_debugfs_list,
603 ARRAY_SIZE(tegra_debugfs_list),
604 minor->debugfs_root, minor);
605}
606
607static void tegra_debugfs_cleanup(struct drm_minor *minor)
608{
609 drm_debugfs_remove_files(tegra_debugfs_list,
610 ARRAY_SIZE(tegra_debugfs_list), minor);
611}
612#endif
613
614struct drm_driver tegra_drm_driver = {
615 .driver_features = DRIVER_MODESET | DRIVER_GEM,
616 .load = tegra_drm_load,
617 .unload = tegra_drm_unload,
618 .open = tegra_drm_open,
619 .preclose = tegra_drm_preclose,
620 .lastclose = tegra_drm_lastclose,
621
622 .get_vblank_counter = tegra_drm_get_vblank_counter,
623 .enable_vblank = tegra_drm_enable_vblank,
624 .disable_vblank = tegra_drm_disable_vblank,
625
626#if defined(CONFIG_DEBUG_FS)
627 .debugfs_init = tegra_debugfs_init,
628 .debugfs_cleanup = tegra_debugfs_cleanup,
629#endif
630
631 .gem_free_object = tegra_bo_free_object,
632 .gem_vm_ops = &tegra_bo_vm_ops,
633 .dumb_create = tegra_bo_dumb_create,
634 .dumb_map_offset = tegra_bo_dumb_map_offset,
635 .dumb_destroy = drm_gem_dumb_destroy,
636
637 .ioctls = tegra_drm_ioctls,
638 .num_ioctls = ARRAY_SIZE(tegra_drm_ioctls),
639 .fops = &tegra_drm_fops,
640
641 .name = DRIVER_NAME,
642 .desc = DRIVER_DESC,
643 .date = DRIVER_DATE,
644 .major = DRIVER_MAJOR,
645 .minor = DRIVER_MINOR,
646 .patchlevel = DRIVER_PATCHLEVEL,
647};
diff --git a/drivers/gpu/host1x/drm/gr2d.c b/drivers/gpu/host1x/drm/gr2d.c
deleted file mode 100644
index 27ffcf15a4b4..000000000000
--- a/drivers/gpu/host1x/drm/gr2d.c
+++ /dev/null
@@ -1,343 +0,0 @@
1/*
2 * drivers/video/tegra/host/gr2d/gr2d.c
3 *
4 * Tegra Graphics 2D
5 *
6 * Copyright (c) 2012-2013, NVIDIA Corporation.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 */
20
21#include <linux/export.h>
22#include <linux/of.h>
23#include <linux/of_device.h>
24#include <linux/clk.h>
25
26#include "channel.h"
27#include "drm.h"
28#include "gem.h"
29#include "job.h"
30#include "host1x.h"
31#include "host1x_bo.h"
32#include "host1x_client.h"
33#include "syncpt.h"
34
35struct gr2d {
36 struct host1x_client client;
37 struct clk *clk;
38 struct host1x_channel *channel;
39 unsigned long *addr_regs;
40};
41
42static inline struct gr2d *to_gr2d(struct host1x_client *client)
43{
44 return container_of(client, struct gr2d, client);
45}
46
47static int gr2d_is_addr_reg(struct device *dev, u32 class, u32 reg);
48
49static int gr2d_client_init(struct host1x_client *client,
50 struct drm_device *drm)
51{
52 return 0;
53}
54
55static int gr2d_client_exit(struct host1x_client *client)
56{
57 return 0;
58}
59
60static int gr2d_open_channel(struct host1x_client *client,
61 struct host1x_drm_context *context)
62{
63 struct gr2d *gr2d = to_gr2d(client);
64
65 context->channel = host1x_channel_get(gr2d->channel);
66
67 if (!context->channel)
68 return -ENOMEM;
69
70 return 0;
71}
72
73static void gr2d_close_channel(struct host1x_drm_context *context)
74{
75 host1x_channel_put(context->channel);
76}
77
78static struct host1x_bo *host1x_bo_lookup(struct drm_device *drm,
79 struct drm_file *file,
80 u32 handle)
81{
82 struct drm_gem_object *gem;
83 struct tegra_bo *bo;
84
85 gem = drm_gem_object_lookup(drm, file, handle);
86 if (!gem)
87 return NULL;
88
89 mutex_lock(&drm->struct_mutex);
90 drm_gem_object_unreference(gem);
91 mutex_unlock(&drm->struct_mutex);
92
93 bo = to_tegra_bo(gem);
94 return &bo->base;
95}
96
97static int gr2d_submit(struct host1x_drm_context *context,
98 struct drm_tegra_submit *args, struct drm_device *drm,
99 struct drm_file *file)
100{
101 struct host1x_job *job;
102 unsigned int num_cmdbufs = args->num_cmdbufs;
103 unsigned int num_relocs = args->num_relocs;
104 unsigned int num_waitchks = args->num_waitchks;
105 struct drm_tegra_cmdbuf __user *cmdbufs =
106 (void * __user)(uintptr_t)args->cmdbufs;
107 struct drm_tegra_reloc __user *relocs =
108 (void * __user)(uintptr_t)args->relocs;
109 struct drm_tegra_waitchk __user *waitchks =
110 (void * __user)(uintptr_t)args->waitchks;
111 struct drm_tegra_syncpt syncpt;
112 int err;
113
114 /* We don't yet support other than one syncpt_incr struct per submit */
115 if (args->num_syncpts != 1)
116 return -EINVAL;
117
118 job = host1x_job_alloc(context->channel, args->num_cmdbufs,
119 args->num_relocs, args->num_waitchks);
120 if (!job)
121 return -ENOMEM;
122
123 job->num_relocs = args->num_relocs;
124 job->num_waitchk = args->num_waitchks;
125 job->client = (u32)args->context;
126 job->class = context->client->class;
127 job->serialize = true;
128
129 while (num_cmdbufs) {
130 struct drm_tegra_cmdbuf cmdbuf;
131 struct host1x_bo *bo;
132
133 err = copy_from_user(&cmdbuf, cmdbufs, sizeof(cmdbuf));
134 if (err)
135 goto fail;
136
137 bo = host1x_bo_lookup(drm, file, cmdbuf.handle);
138 if (!bo) {
139 err = -ENOENT;
140 goto fail;
141 }
142
143 host1x_job_add_gather(job, bo, cmdbuf.words, cmdbuf.offset);
144 num_cmdbufs--;
145 cmdbufs++;
146 }
147
148 err = copy_from_user(job->relocarray, relocs,
149 sizeof(*relocs) * num_relocs);
150 if (err)
151 goto fail;
152
153 while (num_relocs--) {
154 struct host1x_reloc *reloc = &job->relocarray[num_relocs];
155 struct host1x_bo *cmdbuf, *target;
156
157 cmdbuf = host1x_bo_lookup(drm, file, (u32)reloc->cmdbuf);
158 target = host1x_bo_lookup(drm, file, (u32)reloc->target);
159
160 reloc->cmdbuf = cmdbuf;
161 reloc->target = target;
162
163 if (!reloc->target || !reloc->cmdbuf) {
164 err = -ENOENT;
165 goto fail;
166 }
167 }
168
169 err = copy_from_user(job->waitchk, waitchks,
170 sizeof(*waitchks) * num_waitchks);
171 if (err)
172 goto fail;
173
174 err = copy_from_user(&syncpt, (void * __user)(uintptr_t)args->syncpts,
175 sizeof(syncpt));
176 if (err)
177 goto fail;
178
179 job->syncpt_id = syncpt.id;
180 job->syncpt_incrs = syncpt.incrs;
181 job->timeout = 10000;
182 job->is_addr_reg = gr2d_is_addr_reg;
183
184 if (args->timeout && args->timeout < 10000)
185 job->timeout = args->timeout;
186
187 err = host1x_job_pin(job, context->client->dev);
188 if (err)
189 goto fail;
190
191 err = host1x_job_submit(job);
192 if (err)
193 goto fail_submit;
194
195 args->fence = job->syncpt_end;
196
197 host1x_job_put(job);
198 return 0;
199
200fail_submit:
201 host1x_job_unpin(job);
202fail:
203 host1x_job_put(job);
204 return err;
205}
206
207static struct host1x_client_ops gr2d_client_ops = {
208 .drm_init = gr2d_client_init,
209 .drm_exit = gr2d_client_exit,
210 .open_channel = gr2d_open_channel,
211 .close_channel = gr2d_close_channel,
212 .submit = gr2d_submit,
213};
214
215static void gr2d_init_addr_reg_map(struct device *dev, struct gr2d *gr2d)
216{
217 const u32 gr2d_addr_regs[] = {0x1a, 0x1b, 0x26, 0x2b, 0x2c, 0x2d, 0x31,
218 0x32, 0x48, 0x49, 0x4a, 0x4b, 0x4c};
219 unsigned long *bitmap;
220 int i;
221
222 bitmap = devm_kzalloc(dev, DIV_ROUND_UP(256, BITS_PER_BYTE),
223 GFP_KERNEL);
224
225 for (i = 0; i < ARRAY_SIZE(gr2d_addr_regs); ++i) {
226 u32 reg = gr2d_addr_regs[i];
227 bitmap[BIT_WORD(reg)] |= BIT_MASK(reg);
228 }
229
230 gr2d->addr_regs = bitmap;
231}
232
233static int gr2d_is_addr_reg(struct device *dev, u32 class, u32 reg)
234{
235 struct gr2d *gr2d = dev_get_drvdata(dev);
236
237 switch (class) {
238 case HOST1X_CLASS_HOST1X:
239 return reg == 0x2b;
240 case HOST1X_CLASS_GR2D:
241 case HOST1X_CLASS_GR2D_SB:
242 reg &= 0xff;
243 if (gr2d->addr_regs[BIT_WORD(reg)] & BIT_MASK(reg))
244 return 1;
245 default:
246 return 0;
247 }
248}
249
250static const struct of_device_id gr2d_match[] = {
251 { .compatible = "nvidia,tegra30-gr2d" },
252 { .compatible = "nvidia,tegra20-gr2d" },
253 { },
254};
255
256static int gr2d_probe(struct platform_device *pdev)
257{
258 struct device *dev = &pdev->dev;
259 struct host1x_drm *host1x = host1x_get_drm_data(dev->parent);
260 int err;
261 struct gr2d *gr2d = NULL;
262 struct host1x_syncpt **syncpts;
263
264 gr2d = devm_kzalloc(dev, sizeof(*gr2d), GFP_KERNEL);
265 if (!gr2d)
266 return -ENOMEM;
267
268 syncpts = devm_kzalloc(dev, sizeof(*syncpts), GFP_KERNEL);
269 if (!syncpts)
270 return -ENOMEM;
271
272 gr2d->clk = devm_clk_get(dev, NULL);
273 if (IS_ERR(gr2d->clk)) {
274 dev_err(dev, "cannot get clock\n");
275 return PTR_ERR(gr2d->clk);
276 }
277
278 err = clk_prepare_enable(gr2d->clk);
279 if (err) {
280 dev_err(dev, "cannot turn on clock\n");
281 return err;
282 }
283
284 gr2d->channel = host1x_channel_request(dev);
285 if (!gr2d->channel)
286 return -ENOMEM;
287
288 *syncpts = host1x_syncpt_request(dev, false);
289 if (!(*syncpts)) {
290 host1x_channel_free(gr2d->channel);
291 return -ENOMEM;
292 }
293
294 gr2d->client.ops = &gr2d_client_ops;
295 gr2d->client.dev = dev;
296 gr2d->client.class = HOST1X_CLASS_GR2D;
297 gr2d->client.syncpts = syncpts;
298 gr2d->client.num_syncpts = 1;
299
300 err = host1x_register_client(host1x, &gr2d->client);
301 if (err < 0) {
302 dev_err(dev, "failed to register host1x client: %d\n", err);
303 return err;
304 }
305
306 gr2d_init_addr_reg_map(dev, gr2d);
307
308 platform_set_drvdata(pdev, gr2d);
309
310 return 0;
311}
312
313static int __exit gr2d_remove(struct platform_device *pdev)
314{
315 struct host1x_drm *host1x = host1x_get_drm_data(pdev->dev.parent);
316 struct gr2d *gr2d = platform_get_drvdata(pdev);
317 unsigned int i;
318 int err;
319
320 err = host1x_unregister_client(host1x, &gr2d->client);
321 if (err < 0) {
322 dev_err(&pdev->dev, "failed to unregister client: %d\n", err);
323 return err;
324 }
325
326 for (i = 0; i < gr2d->client.num_syncpts; i++)
327 host1x_syncpt_free(gr2d->client.syncpts[i]);
328
329 host1x_channel_free(gr2d->channel);
330 clk_disable_unprepare(gr2d->clk);
331
332 return 0;
333}
334
335struct platform_driver tegra_gr2d_driver = {
336 .probe = gr2d_probe,
337 .remove = __exit_p(gr2d_remove),
338 .driver = {
339 .owner = THIS_MODULE,
340 .name = "gr2d",
341 .of_match_table = gr2d_match,
342 }
343};
diff --git a/drivers/gpu/host1x/host1x.h b/drivers/gpu/host1x/host1x.h
deleted file mode 100644
index a2bc1e65e972..000000000000
--- a/drivers/gpu/host1x/host1x.h
+++ /dev/null
@@ -1,30 +0,0 @@
1/*
2 * Tegra host1x driver
3 *
4 * Copyright (c) 2009-2013, NVIDIA Corporation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program; if not, write to the Free Software Foundation, Inc.,
18 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
19 */
20
21#ifndef __LINUX_HOST1X_H
22#define __LINUX_HOST1X_H
23
24enum host1x_class {
25 HOST1X_CLASS_HOST1X = 0x1,
26 HOST1X_CLASS_GR2D = 0x51,
27 HOST1X_CLASS_GR2D_SB = 0x52
28};
29
30#endif
diff --git a/drivers/gpu/host1x/host1x_bo.h b/drivers/gpu/host1x/host1x_bo.h
deleted file mode 100644
index 4c1f10bd773d..000000000000
--- a/drivers/gpu/host1x/host1x_bo.h
+++ /dev/null
@@ -1,87 +0,0 @@
1/*
2 * Tegra host1x Memory Management Abstraction header
3 *
4 * Copyright (c) 2012-2013, NVIDIA Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19#ifndef _HOST1X_BO_H
20#define _HOST1X_BO_H
21
22struct host1x_bo;
23
24struct host1x_bo_ops {
25 struct host1x_bo *(*get)(struct host1x_bo *bo);
26 void (*put)(struct host1x_bo *bo);
27 dma_addr_t (*pin)(struct host1x_bo *bo, struct sg_table **sgt);
28 void (*unpin)(struct host1x_bo *bo, struct sg_table *sgt);
29 void *(*mmap)(struct host1x_bo *bo);
30 void (*munmap)(struct host1x_bo *bo, void *addr);
31 void *(*kmap)(struct host1x_bo *bo, unsigned int pagenum);
32 void (*kunmap)(struct host1x_bo *bo, unsigned int pagenum, void *addr);
33};
34
35struct host1x_bo {
36 const struct host1x_bo_ops *ops;
37};
38
39static inline void host1x_bo_init(struct host1x_bo *bo,
40 const struct host1x_bo_ops *ops)
41{
42 bo->ops = ops;
43}
44
45static inline struct host1x_bo *host1x_bo_get(struct host1x_bo *bo)
46{
47 return bo->ops->get(bo);
48}
49
50static inline void host1x_bo_put(struct host1x_bo *bo)
51{
52 bo->ops->put(bo);
53}
54
55static inline dma_addr_t host1x_bo_pin(struct host1x_bo *bo,
56 struct sg_table **sgt)
57{
58 return bo->ops->pin(bo, sgt);
59}
60
61static inline void host1x_bo_unpin(struct host1x_bo *bo, struct sg_table *sgt)
62{
63 bo->ops->unpin(bo, sgt);
64}
65
66static inline void *host1x_bo_mmap(struct host1x_bo *bo)
67{
68 return bo->ops->mmap(bo);
69}
70
71static inline void host1x_bo_munmap(struct host1x_bo *bo, void *addr)
72{
73 bo->ops->munmap(bo, addr);
74}
75
76static inline void *host1x_bo_kmap(struct host1x_bo *bo, unsigned int pagenum)
77{
78 return bo->ops->kmap(bo, pagenum);
79}
80
81static inline void host1x_bo_kunmap(struct host1x_bo *bo,
82 unsigned int pagenum, void *addr)
83{
84 bo->ops->kunmap(bo, pagenum, addr);
85}
86
87#endif
diff --git a/drivers/gpu/host1x/hw/Makefile b/drivers/gpu/host1x/hw/Makefile
deleted file mode 100644
index 9b50863a2236..000000000000
--- a/drivers/gpu/host1x/hw/Makefile
+++ /dev/null
@@ -1,6 +0,0 @@
1ccflags-y = -Idrivers/gpu/host1x
2
3host1x-hw-objs = \
4 host1x01.o
5
6obj-$(CONFIG_TEGRA_HOST1X) += host1x-hw.o
diff --git a/drivers/gpu/host1x/hw/cdma_hw.c b/drivers/gpu/host1x/hw/cdma_hw.c
index 2ee4ad55c4db..37e2a63241a9 100644
--- a/drivers/gpu/host1x/hw/cdma_hw.c
+++ b/drivers/gpu/host1x/hw/cdma_hw.c
@@ -20,10 +20,10 @@
20#include <linux/scatterlist.h> 20#include <linux/scatterlist.h>
21#include <linux/dma-mapping.h> 21#include <linux/dma-mapping.h>
22 22
23#include "cdma.h" 23#include "../cdma.h"
24#include "channel.h" 24#include "../channel.h"
25#include "dev.h" 25#include "../dev.h"
26#include "debug.h" 26#include "../debug.h"
27 27
28/* 28/*
29 * Put the restart at the end of pushbuffer memor 29 * Put the restart at the end of pushbuffer memor
diff --git a/drivers/gpu/host1x/hw/channel_hw.c b/drivers/gpu/host1x/hw/channel_hw.c
index ee199623e365..4608257ab656 100644
--- a/drivers/gpu/host1x/hw/channel_hw.c
+++ b/drivers/gpu/host1x/hw/channel_hw.c
@@ -16,15 +16,15 @@
16 * along with this program. If not, see <http://www.gnu.org/licenses/>. 16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */ 17 */
18 18
19#include <linux/host1x.h>
19#include <linux/slab.h> 20#include <linux/slab.h>
21
20#include <trace/events/host1x.h> 22#include <trace/events/host1x.h>
21 23
22#include "host1x.h" 24#include "../channel.h"
23#include "host1x_bo.h" 25#include "../dev.h"
24#include "channel.h" 26#include "../intr.h"
25#include "dev.h" 27#include "../job.h"
26#include "intr.h"
27#include "job.h"
28 28
29#define HOST1X_CHANNEL_SIZE 16384 29#define HOST1X_CHANNEL_SIZE 16384
30#define TRACE_MAX_LENGTH 128U 30#define TRACE_MAX_LENGTH 128U
@@ -67,6 +67,22 @@ static void submit_gathers(struct host1x_job *job)
67 } 67 }
68} 68}
69 69
70static inline void synchronize_syncpt_base(struct host1x_job *job)
71{
72 struct host1x *host = dev_get_drvdata(job->channel->dev->parent);
73 struct host1x_syncpt *sp = host->syncpt + job->syncpt_id;
74 u32 id, value;
75
76 value = host1x_syncpt_read_max(sp);
77 id = sp->base->id;
78
79 host1x_cdma_push(&job->channel->cdma,
80 host1x_opcode_setclass(HOST1X_CLASS_HOST1X,
81 HOST1X_UCLASS_LOAD_SYNCPT_BASE, 1),
82 HOST1X_UCLASS_LOAD_SYNCPT_BASE_BASE_INDX_F(id) |
83 HOST1X_UCLASS_LOAD_SYNCPT_BASE_VALUE_F(value));
84}
85
70static int channel_submit(struct host1x_job *job) 86static int channel_submit(struct host1x_job *job)
71{ 87{
72 struct host1x_channel *ch = job->channel; 88 struct host1x_channel *ch = job->channel;
@@ -118,6 +134,10 @@ static int channel_submit(struct host1x_job *job)
118 host1x_syncpt_read_max(sp))); 134 host1x_syncpt_read_max(sp)));
119 } 135 }
120 136
137 /* Synchronize base register to allow using it for relative waiting */
138 if (sp->base)
139 synchronize_syncpt_base(job);
140
121 syncval = host1x_syncpt_incr_max(sp, user_syncpt_incrs); 141 syncval = host1x_syncpt_incr_max(sp, user_syncpt_incrs);
122 142
123 job->syncpt_end = syncval; 143 job->syncpt_end = syncval;
diff --git a/drivers/gpu/host1x/hw/debug_hw.c b/drivers/gpu/host1x/hw/debug_hw.c
index 334c038052f5..640c75ca5a8b 100644
--- a/drivers/gpu/host1x/hw/debug_hw.c
+++ b/drivers/gpu/host1x/hw/debug_hw.c
@@ -15,18 +15,10 @@
15 * 15 *
16 */ 16 */
17 17
18#include <linux/debugfs.h> 18#include "../dev.h"
19#include <linux/seq_file.h> 19#include "../debug.h"
20#include <linux/mm.h> 20#include "../cdma.h"
21#include <linux/scatterlist.h> 21#include "../channel.h"
22
23#include <linux/io.h>
24
25#include "dev.h"
26#include "debug.h"
27#include "cdma.h"
28#include "channel.h"
29#include "host1x_bo.h"
30 22
31#define HOST1X_DEBUG_MAX_PAGE_OFFSET 102400 23#define HOST1X_DEBUG_MAX_PAGE_OFFSET 102400
32 24
diff --git a/drivers/gpu/host1x/hw/host1x01.c b/drivers/gpu/host1x/hw/host1x01.c
index a14e91cd1e58..859b73beb4d0 100644
--- a/drivers/gpu/host1x/hw/host1x01.c
+++ b/drivers/gpu/host1x/hw/host1x01.c
@@ -17,17 +17,17 @@
17 */ 17 */
18 18
19/* include hw specification */ 19/* include hw specification */
20#include "hw/host1x01.h" 20#include "host1x01.h"
21#include "hw/host1x01_hardware.h" 21#include "host1x01_hardware.h"
22 22
23/* include code */ 23/* include code */
24#include "hw/cdma_hw.c" 24#include "cdma_hw.c"
25#include "hw/channel_hw.c" 25#include "channel_hw.c"
26#include "hw/debug_hw.c" 26#include "debug_hw.c"
27#include "hw/intr_hw.c" 27#include "intr_hw.c"
28#include "hw/syncpt_hw.c" 28#include "syncpt_hw.c"
29 29
30#include "dev.h" 30#include "../dev.h"
31 31
32int host1x01_init(struct host1x *host) 32int host1x01_init(struct host1x *host)
33{ 33{
diff --git a/drivers/gpu/host1x/hw/host1x02.c b/drivers/gpu/host1x/hw/host1x02.c
new file mode 100644
index 000000000000..e98caca0ca42
--- /dev/null
+++ b/drivers/gpu/host1x/hw/host1x02.c
@@ -0,0 +1,42 @@
1/*
2 * Host1x init for Tegra114 SoCs
3 *
4 * Copyright (c) 2013 NVIDIA Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19/* include hw specification */
20#include "host1x01.h"
21#include "host1x01_hardware.h"
22
23/* include code */
24#include "cdma_hw.c"
25#include "channel_hw.c"
26#include "debug_hw.c"
27#include "intr_hw.c"
28#include "syncpt_hw.c"
29
30#include "../dev.h"
31
32int host1x02_init(struct host1x *host)
33{
34 host->channel_op = &host1x_channel_ops;
35 host->cdma_op = &host1x_cdma_ops;
36 host->cdma_pb_op = &host1x_pushbuffer_ops;
37 host->syncpt_op = &host1x_syncpt_ops;
38 host->intr_op = &host1x_intr_ops;
39 host->debug_op = &host1x_debug_ops;
40
41 return 0;
42}
diff --git a/drivers/gpu/host1x/hw/host1x02.h b/drivers/gpu/host1x/hw/host1x02.h
new file mode 100644
index 000000000000..f7486609a90e
--- /dev/null
+++ b/drivers/gpu/host1x/hw/host1x02.h
@@ -0,0 +1,26 @@
1/*
2 * Host1x init for Tegra114 SoCs
3 *
4 * Copyright (c) 2013 NVIDIA Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19#ifndef HOST1X_HOST1X02_H
20#define HOST1X_HOST1X02_H
21
22struct host1x;
23
24int host1x02_init(struct host1x *host);
25
26#endif
diff --git a/drivers/gpu/host1x/hw/hw_host1x01_uclass.h b/drivers/gpu/host1x/hw/hw_host1x01_uclass.h
index 42f3ce19ca32..f7553599ee27 100644
--- a/drivers/gpu/host1x/hw/hw_host1x01_uclass.h
+++ b/drivers/gpu/host1x/hw/hw_host1x01_uclass.h
@@ -111,6 +111,12 @@ static inline u32 host1x_uclass_wait_syncpt_base_offset_f(u32 v)
111} 111}
112#define HOST1X_UCLASS_WAIT_SYNCPT_BASE_OFFSET_F(v) \ 112#define HOST1X_UCLASS_WAIT_SYNCPT_BASE_OFFSET_F(v) \
113 host1x_uclass_wait_syncpt_base_offset_f(v) 113 host1x_uclass_wait_syncpt_base_offset_f(v)
114static inline u32 host1x_uclass_load_syncpt_base_r(void)
115{
116 return 0xb;
117}
118#define HOST1X_UCLASS_LOAD_SYNCPT_BASE \
119 host1x_uclass_load_syncpt_base_r()
114static inline u32 host1x_uclass_load_syncpt_base_base_indx_f(u32 v) 120static inline u32 host1x_uclass_load_syncpt_base_base_indx_f(u32 v)
115{ 121{
116 return (v & 0xff) << 24; 122 return (v & 0xff) << 24;
diff --git a/drivers/gpu/host1x/hw/hw_host1x02_channel.h b/drivers/gpu/host1x/hw/hw_host1x02_channel.h
new file mode 100644
index 000000000000..e490bcde33fe
--- /dev/null
+++ b/drivers/gpu/host1x/hw/hw_host1x02_channel.h
@@ -0,0 +1,121 @@
1/*
2 * Copyright (c) 2013 NVIDIA Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 *
16 */
17
18 /*
19 * Function naming determines intended use:
20 *
21 * <x>_r(void) : Returns the offset for register <x>.
22 *
23 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
24 *
25 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
26 *
27 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
28 * and masked to place it at field <y> of register <x>. This value
29 * can be |'d with others to produce a full register value for
30 * register <x>.
31 *
32 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
33 * value can be ~'d and then &'d to clear the value of field <y> for
34 * register <x>.
35 *
36 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
37 * to place it at field <y> of register <x>. This value can be |'d
38 * with others to produce a full register value for <x>.
39 *
40 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
41 * <x> value 'r' after being shifted to place its LSB at bit 0.
42 * This value is suitable for direct comparison with other unshifted
43 * values appropriate for use in field <y> of register <x>.
44 *
45 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
46 * field <y> of register <x>. This value is suitable for direct
47 * comparison with unshifted values appropriate for use in field <y>
48 * of register <x>.
49 */
50
51#ifndef HOST1X_HW_HOST1X02_CHANNEL_H
52#define HOST1X_HW_HOST1X02_CHANNEL_H
53
54static inline u32 host1x_channel_fifostat_r(void)
55{
56 return 0x0;
57}
58#define HOST1X_CHANNEL_FIFOSTAT \
59 host1x_channel_fifostat_r()
60static inline u32 host1x_channel_fifostat_cfempty_v(u32 r)
61{
62 return (r >> 11) & 0x1;
63}
64#define HOST1X_CHANNEL_FIFOSTAT_CFEMPTY_V(r) \
65 host1x_channel_fifostat_cfempty_v(r)
66static inline u32 host1x_channel_dmastart_r(void)
67{
68 return 0x14;
69}
70#define HOST1X_CHANNEL_DMASTART \
71 host1x_channel_dmastart_r()
72static inline u32 host1x_channel_dmaput_r(void)
73{
74 return 0x18;
75}
76#define HOST1X_CHANNEL_DMAPUT \
77 host1x_channel_dmaput_r()
78static inline u32 host1x_channel_dmaget_r(void)
79{
80 return 0x1c;
81}
82#define HOST1X_CHANNEL_DMAGET \
83 host1x_channel_dmaget_r()
84static inline u32 host1x_channel_dmaend_r(void)
85{
86 return 0x20;
87}
88#define HOST1X_CHANNEL_DMAEND \
89 host1x_channel_dmaend_r()
90static inline u32 host1x_channel_dmactrl_r(void)
91{
92 return 0x24;
93}
94#define HOST1X_CHANNEL_DMACTRL \
95 host1x_channel_dmactrl_r()
96static inline u32 host1x_channel_dmactrl_dmastop(void)
97{
98 return 1 << 0;
99}
100#define HOST1X_CHANNEL_DMACTRL_DMASTOP \
101 host1x_channel_dmactrl_dmastop()
102static inline u32 host1x_channel_dmactrl_dmastop_v(u32 r)
103{
104 return (r >> 0) & 0x1;
105}
106#define HOST1X_CHANNEL_DMACTRL_DMASTOP_V(r) \
107 host1x_channel_dmactrl_dmastop_v(r)
108static inline u32 host1x_channel_dmactrl_dmagetrst(void)
109{
110 return 1 << 1;
111}
112#define HOST1X_CHANNEL_DMACTRL_DMAGETRST \
113 host1x_channel_dmactrl_dmagetrst()
114static inline u32 host1x_channel_dmactrl_dmainitget(void)
115{
116 return 1 << 2;
117}
118#define HOST1X_CHANNEL_DMACTRL_DMAINITGET \
119 host1x_channel_dmactrl_dmainitget()
120
121#endif
diff --git a/drivers/gpu/host1x/hw/hw_host1x02_sync.h b/drivers/gpu/host1x/hw/hw_host1x02_sync.h
new file mode 100644
index 000000000000..4495401525e8
--- /dev/null
+++ b/drivers/gpu/host1x/hw/hw_host1x02_sync.h
@@ -0,0 +1,243 @@
1/*
2 * Copyright (c) 2013 NVIDIA Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 *
16 */
17
18 /*
19 * Function naming determines intended use:
20 *
21 * <x>_r(void) : Returns the offset for register <x>.
22 *
23 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
24 *
25 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
26 *
27 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
28 * and masked to place it at field <y> of register <x>. This value
29 * can be |'d with others to produce a full register value for
30 * register <x>.
31 *
32 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
33 * value can be ~'d and then &'d to clear the value of field <y> for
34 * register <x>.
35 *
36 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
37 * to place it at field <y> of register <x>. This value can be |'d
38 * with others to produce a full register value for <x>.
39 *
40 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
41 * <x> value 'r' after being shifted to place its LSB at bit 0.
42 * This value is suitable for direct comparison with other unshifted
43 * values appropriate for use in field <y> of register <x>.
44 *
45 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
46 * field <y> of register <x>. This value is suitable for direct
47 * comparison with unshifted values appropriate for use in field <y>
48 * of register <x>.
49 */
50
51#ifndef HOST1X_HW_HOST1X02_SYNC_H
52#define HOST1X_HW_HOST1X02_SYNC_H
53
54#define REGISTER_STRIDE 4
55
56static inline u32 host1x_sync_syncpt_r(unsigned int id)
57{
58 return 0x400 + id * REGISTER_STRIDE;
59}
60#define HOST1X_SYNC_SYNCPT(id) \
61 host1x_sync_syncpt_r(id)
62static inline u32 host1x_sync_syncpt_thresh_cpu0_int_status_r(unsigned int id)
63{
64 return 0x40 + id * REGISTER_STRIDE;
65}
66#define HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(id) \
67 host1x_sync_syncpt_thresh_cpu0_int_status_r(id)
68static inline u32 host1x_sync_syncpt_thresh_int_disable_r(unsigned int id)
69{
70 return 0x60 + id * REGISTER_STRIDE;
71}
72#define HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE(id) \
73 host1x_sync_syncpt_thresh_int_disable_r(id)
74static inline u32 host1x_sync_syncpt_thresh_int_enable_cpu0_r(unsigned int id)
75{
76 return 0x68 + id * REGISTER_STRIDE;
77}
78#define HOST1X_SYNC_SYNCPT_THRESH_INT_ENABLE_CPU0(id) \
79 host1x_sync_syncpt_thresh_int_enable_cpu0_r(id)
80static inline u32 host1x_sync_cf_setup_r(unsigned int channel)
81{
82 return 0x80 + channel * REGISTER_STRIDE;
83}
84#define HOST1X_SYNC_CF_SETUP(channel) \
85 host1x_sync_cf_setup_r(channel)
86static inline u32 host1x_sync_cf_setup_base_v(u32 r)
87{
88 return (r >> 0) & 0x3ff;
89}
90#define HOST1X_SYNC_CF_SETUP_BASE_V(r) \
91 host1x_sync_cf_setup_base_v(r)
92static inline u32 host1x_sync_cf_setup_limit_v(u32 r)
93{
94 return (r >> 16) & 0x3ff;
95}
96#define HOST1X_SYNC_CF_SETUP_LIMIT_V(r) \
97 host1x_sync_cf_setup_limit_v(r)
98static inline u32 host1x_sync_cmdproc_stop_r(void)
99{
100 return 0xac;
101}
102#define HOST1X_SYNC_CMDPROC_STOP \
103 host1x_sync_cmdproc_stop_r()
104static inline u32 host1x_sync_ch_teardown_r(void)
105{
106 return 0xb0;
107}
108#define HOST1X_SYNC_CH_TEARDOWN \
109 host1x_sync_ch_teardown_r()
110static inline u32 host1x_sync_usec_clk_r(void)
111{
112 return 0x1a4;
113}
114#define HOST1X_SYNC_USEC_CLK \
115 host1x_sync_usec_clk_r()
116static inline u32 host1x_sync_ctxsw_timeout_cfg_r(void)
117{
118 return 0x1a8;
119}
120#define HOST1X_SYNC_CTXSW_TIMEOUT_CFG \
121 host1x_sync_ctxsw_timeout_cfg_r()
122static inline u32 host1x_sync_ip_busy_timeout_r(void)
123{
124 return 0x1bc;
125}
126#define HOST1X_SYNC_IP_BUSY_TIMEOUT \
127 host1x_sync_ip_busy_timeout_r()
128static inline u32 host1x_sync_mlock_owner_r(unsigned int id)
129{
130 return 0x340 + id * REGISTER_STRIDE;
131}
132#define HOST1X_SYNC_MLOCK_OWNER(id) \
133 host1x_sync_mlock_owner_r(id)
134static inline u32 host1x_sync_mlock_owner_chid_f(u32 v)
135{
136 return (v & 0xf) << 8;
137}
138#define HOST1X_SYNC_MLOCK_OWNER_CHID_F(v) \
139 host1x_sync_mlock_owner_chid_f(v)
140static inline u32 host1x_sync_mlock_owner_cpu_owns_v(u32 r)
141{
142 return (r >> 1) & 0x1;
143}
144#define HOST1X_SYNC_MLOCK_OWNER_CPU_OWNS_V(r) \
145 host1x_sync_mlock_owner_cpu_owns_v(r)
146static inline u32 host1x_sync_mlock_owner_ch_owns_v(u32 r)
147{
148 return (r >> 0) & 0x1;
149}
150#define HOST1X_SYNC_MLOCK_OWNER_CH_OWNS_V(r) \
151 host1x_sync_mlock_owner_ch_owns_v(r)
152static inline u32 host1x_sync_syncpt_int_thresh_r(unsigned int id)
153{
154 return 0x500 + id * REGISTER_STRIDE;
155}
156#define HOST1X_SYNC_SYNCPT_INT_THRESH(id) \
157 host1x_sync_syncpt_int_thresh_r(id)
158static inline u32 host1x_sync_syncpt_base_r(unsigned int id)
159{
160 return 0x600 + id * REGISTER_STRIDE;
161}
162#define HOST1X_SYNC_SYNCPT_BASE(id) \
163 host1x_sync_syncpt_base_r(id)
164static inline u32 host1x_sync_syncpt_cpu_incr_r(unsigned int id)
165{
166 return 0x700 + id * REGISTER_STRIDE;
167}
168#define HOST1X_SYNC_SYNCPT_CPU_INCR(id) \
169 host1x_sync_syncpt_cpu_incr_r(id)
170static inline u32 host1x_sync_cbread_r(unsigned int channel)
171{
172 return 0x720 + channel * REGISTER_STRIDE;
173}
174#define HOST1X_SYNC_CBREAD(channel) \
175 host1x_sync_cbread_r(channel)
176static inline u32 host1x_sync_cfpeek_ctrl_r(void)
177{
178 return 0x74c;
179}
180#define HOST1X_SYNC_CFPEEK_CTRL \
181 host1x_sync_cfpeek_ctrl_r()
182static inline u32 host1x_sync_cfpeek_ctrl_addr_f(u32 v)
183{
184 return (v & 0x3ff) << 0;
185}
186#define HOST1X_SYNC_CFPEEK_CTRL_ADDR_F(v) \
187 host1x_sync_cfpeek_ctrl_addr_f(v)
188static inline u32 host1x_sync_cfpeek_ctrl_channr_f(u32 v)
189{
190 return (v & 0xf) << 16;
191}
192#define HOST1X_SYNC_CFPEEK_CTRL_CHANNR_F(v) \
193 host1x_sync_cfpeek_ctrl_channr_f(v)
194static inline u32 host1x_sync_cfpeek_ctrl_ena_f(u32 v)
195{
196 return (v & 0x1) << 31;
197}
198#define HOST1X_SYNC_CFPEEK_CTRL_ENA_F(v) \
199 host1x_sync_cfpeek_ctrl_ena_f(v)
200static inline u32 host1x_sync_cfpeek_read_r(void)
201{
202 return 0x750;
203}
204#define HOST1X_SYNC_CFPEEK_READ \
205 host1x_sync_cfpeek_read_r()
206static inline u32 host1x_sync_cfpeek_ptrs_r(void)
207{
208 return 0x754;
209}
210#define HOST1X_SYNC_CFPEEK_PTRS \
211 host1x_sync_cfpeek_ptrs_r()
212static inline u32 host1x_sync_cfpeek_ptrs_cf_rd_ptr_v(u32 r)
213{
214 return (r >> 0) & 0x3ff;
215}
216#define HOST1X_SYNC_CFPEEK_PTRS_CF_RD_PTR_V(r) \
217 host1x_sync_cfpeek_ptrs_cf_rd_ptr_v(r)
218static inline u32 host1x_sync_cfpeek_ptrs_cf_wr_ptr_v(u32 r)
219{
220 return (r >> 16) & 0x3ff;
221}
222#define HOST1X_SYNC_CFPEEK_PTRS_CF_WR_PTR_V(r) \
223 host1x_sync_cfpeek_ptrs_cf_wr_ptr_v(r)
224static inline u32 host1x_sync_cbstat_r(unsigned int channel)
225{
226 return 0x758 + channel * REGISTER_STRIDE;
227}
228#define HOST1X_SYNC_CBSTAT(channel) \
229 host1x_sync_cbstat_r(channel)
230static inline u32 host1x_sync_cbstat_cboffset_v(u32 r)
231{
232 return (r >> 0) & 0xffff;
233}
234#define HOST1X_SYNC_CBSTAT_CBOFFSET_V(r) \
235 host1x_sync_cbstat_cboffset_v(r)
236static inline u32 host1x_sync_cbstat_cbclass_v(u32 r)
237{
238 return (r >> 16) & 0x3ff;
239}
240#define HOST1X_SYNC_CBSTAT_CBCLASS_V(r) \
241 host1x_sync_cbstat_cbclass_v(r)
242
243#endif
diff --git a/drivers/gpu/host1x/hw/hw_host1x02_uclass.h b/drivers/gpu/host1x/hw/hw_host1x02_uclass.h
new file mode 100644
index 000000000000..a3b3c9874413
--- /dev/null
+++ b/drivers/gpu/host1x/hw/hw_host1x02_uclass.h
@@ -0,0 +1,175 @@
1/*
2 * Copyright (c) 2013 NVIDIA Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 *
16 */
17
18 /*
19 * Function naming determines intended use:
20 *
21 * <x>_r(void) : Returns the offset for register <x>.
22 *
23 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
24 *
25 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
26 *
27 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
28 * and masked to place it at field <y> of register <x>. This value
29 * can be |'d with others to produce a full register value for
30 * register <x>.
31 *
32 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
33 * value can be ~'d and then &'d to clear the value of field <y> for
34 * register <x>.
35 *
36 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
37 * to place it at field <y> of register <x>. This value can be |'d
38 * with others to produce a full register value for <x>.
39 *
40 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
41 * <x> value 'r' after being shifted to place its LSB at bit 0.
42 * This value is suitable for direct comparison with other unshifted
43 * values appropriate for use in field <y> of register <x>.
44 *
45 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
46 * field <y> of register <x>. This value is suitable for direct
47 * comparison with unshifted values appropriate for use in field <y>
48 * of register <x>.
49 */
50
51#ifndef HOST1X_HW_HOST1X02_UCLASS_H
52#define HOST1X_HW_HOST1X02_UCLASS_H
53
54static inline u32 host1x_uclass_incr_syncpt_r(void)
55{
56 return 0x0;
57}
58#define HOST1X_UCLASS_INCR_SYNCPT \
59 host1x_uclass_incr_syncpt_r()
60static inline u32 host1x_uclass_incr_syncpt_cond_f(u32 v)
61{
62 return (v & 0xff) << 8;
63}
64#define HOST1X_UCLASS_INCR_SYNCPT_COND_F(v) \
65 host1x_uclass_incr_syncpt_cond_f(v)
66static inline u32 host1x_uclass_incr_syncpt_indx_f(u32 v)
67{
68 return (v & 0xff) << 0;
69}
70#define HOST1X_UCLASS_INCR_SYNCPT_INDX_F(v) \
71 host1x_uclass_incr_syncpt_indx_f(v)
72static inline u32 host1x_uclass_wait_syncpt_r(void)
73{
74 return 0x8;
75}
76#define HOST1X_UCLASS_WAIT_SYNCPT \
77 host1x_uclass_wait_syncpt_r()
78static inline u32 host1x_uclass_wait_syncpt_indx_f(u32 v)
79{
80 return (v & 0xff) << 24;
81}
82#define HOST1X_UCLASS_WAIT_SYNCPT_INDX_F(v) \
83 host1x_uclass_wait_syncpt_indx_f(v)
84static inline u32 host1x_uclass_wait_syncpt_thresh_f(u32 v)
85{
86 return (v & 0xffffff) << 0;
87}
88#define HOST1X_UCLASS_WAIT_SYNCPT_THRESH_F(v) \
89 host1x_uclass_wait_syncpt_thresh_f(v)
90static inline u32 host1x_uclass_wait_syncpt_base_r(void)
91{
92 return 0x9;
93}
94#define HOST1X_UCLASS_WAIT_SYNCPT_BASE \
95 host1x_uclass_wait_syncpt_base_r()
96static inline u32 host1x_uclass_wait_syncpt_base_indx_f(u32 v)
97{
98 return (v & 0xff) << 24;
99}
100#define HOST1X_UCLASS_WAIT_SYNCPT_BASE_INDX_F(v) \
101 host1x_uclass_wait_syncpt_base_indx_f(v)
102static inline u32 host1x_uclass_wait_syncpt_base_base_indx_f(u32 v)
103{
104 return (v & 0xff) << 16;
105}
106#define HOST1X_UCLASS_WAIT_SYNCPT_BASE_BASE_INDX_F(v) \
107 host1x_uclass_wait_syncpt_base_base_indx_f(v)
108static inline u32 host1x_uclass_wait_syncpt_base_offset_f(u32 v)
109{
110 return (v & 0xffff) << 0;
111}
112#define HOST1X_UCLASS_WAIT_SYNCPT_BASE_OFFSET_F(v) \
113 host1x_uclass_wait_syncpt_base_offset_f(v)
114static inline u32 host1x_uclass_load_syncpt_base_base_indx_f(u32 v)
115{
116 return (v & 0xff) << 24;
117}
118#define HOST1X_UCLASS_LOAD_SYNCPT_BASE_BASE_INDX_F(v) \
119 host1x_uclass_load_syncpt_base_base_indx_f(v)
120static inline u32 host1x_uclass_load_syncpt_base_value_f(u32 v)
121{
122 return (v & 0xffffff) << 0;
123}
124#define HOST1X_UCLASS_LOAD_SYNCPT_BASE_VALUE_F(v) \
125 host1x_uclass_load_syncpt_base_value_f(v)
126static inline u32 host1x_uclass_incr_syncpt_base_base_indx_f(u32 v)
127{
128 return (v & 0xff) << 24;
129}
130#define HOST1X_UCLASS_INCR_SYNCPT_BASE_BASE_INDX_F(v) \
131 host1x_uclass_incr_syncpt_base_base_indx_f(v)
132static inline u32 host1x_uclass_incr_syncpt_base_offset_f(u32 v)
133{
134 return (v & 0xffffff) << 0;
135}
136#define HOST1X_UCLASS_INCR_SYNCPT_BASE_OFFSET_F(v) \
137 host1x_uclass_incr_syncpt_base_offset_f(v)
138static inline u32 host1x_uclass_indoff_r(void)
139{
140 return 0x2d;
141}
142#define HOST1X_UCLASS_INDOFF \
143 host1x_uclass_indoff_r()
144static inline u32 host1x_uclass_indoff_indbe_f(u32 v)
145{
146 return (v & 0xf) << 28;
147}
148#define HOST1X_UCLASS_INDOFF_INDBE_F(v) \
149 host1x_uclass_indoff_indbe_f(v)
150static inline u32 host1x_uclass_indoff_autoinc_f(u32 v)
151{
152 return (v & 0x1) << 27;
153}
154#define HOST1X_UCLASS_INDOFF_AUTOINC_F(v) \
155 host1x_uclass_indoff_autoinc_f(v)
156static inline u32 host1x_uclass_indoff_indmodid_f(u32 v)
157{
158 return (v & 0xff) << 18;
159}
160#define HOST1X_UCLASS_INDOFF_INDMODID_F(v) \
161 host1x_uclass_indoff_indmodid_f(v)
162static inline u32 host1x_uclass_indoff_indroffset_f(u32 v)
163{
164 return (v & 0xffff) << 2;
165}
166#define HOST1X_UCLASS_INDOFF_INDROFFSET_F(v) \
167 host1x_uclass_indoff_indroffset_f(v)
168static inline u32 host1x_uclass_indoff_rwn_read_v(void)
169{
170 return 1;
171}
172#define HOST1X_UCLASS_INDOFF_INDROFFSET_F(v) \
173 host1x_uclass_indoff_indroffset_f(v)
174
175#endif
diff --git a/drivers/gpu/host1x/hw/intr_hw.c b/drivers/gpu/host1x/hw/intr_hw.c
index b592eef1efcb..b26dcc83bc1b 100644
--- a/drivers/gpu/host1x/hw/intr_hw.c
+++ b/drivers/gpu/host1x/hw/intr_hw.c
@@ -22,8 +22,8 @@
22#include <linux/io.h> 22#include <linux/io.h>
23#include <asm/mach/irq.h> 23#include <asm/mach/irq.h>
24 24
25#include "intr.h" 25#include "../intr.h"
26#include "dev.h" 26#include "../dev.h"
27 27
28/* 28/*
29 * Sync point threshold interrupt service function 29 * Sync point threshold interrupt service function
diff --git a/drivers/gpu/host1x/hw/syncpt_hw.c b/drivers/gpu/host1x/hw/syncpt_hw.c
index 0cf6095d3367..56e85395ac24 100644
--- a/drivers/gpu/host1x/hw/syncpt_hw.c
+++ b/drivers/gpu/host1x/hw/syncpt_hw.c
@@ -18,8 +18,8 @@
18 18
19#include <linux/io.h> 19#include <linux/io.h>
20 20
21#include "dev.h" 21#include "../dev.h"
22#include "syncpt.h" 22#include "../syncpt.h"
23 23
24/* 24/*
25 * Write the current syncpoint value back to hw. 25 * Write the current syncpoint value back to hw.
diff --git a/drivers/gpu/host1x/job.c b/drivers/gpu/host1x/job.c
index c4e1050f2252..de5ec333ce1a 100644
--- a/drivers/gpu/host1x/job.c
+++ b/drivers/gpu/host1x/job.c
@@ -18,6 +18,7 @@
18 18
19#include <linux/dma-mapping.h> 19#include <linux/dma-mapping.h>
20#include <linux/err.h> 20#include <linux/err.h>
21#include <linux/host1x.h>
21#include <linux/kref.h> 22#include <linux/kref.h>
22#include <linux/module.h> 23#include <linux/module.h>
23#include <linux/scatterlist.h> 24#include <linux/scatterlist.h>
@@ -27,7 +28,6 @@
27 28
28#include "channel.h" 29#include "channel.h"
29#include "dev.h" 30#include "dev.h"
30#include "host1x_bo.h"
31#include "job.h" 31#include "job.h"
32#include "syncpt.h" 32#include "syncpt.h"
33 33
@@ -264,7 +264,7 @@ static unsigned int do_relocs(struct host1x_job *job, struct host1x_bo *cmdbuf)
264} 264}
265 265
266static bool check_reloc(struct host1x_reloc *reloc, struct host1x_bo *cmdbuf, 266static bool check_reloc(struct host1x_reloc *reloc, struct host1x_bo *cmdbuf,
267 unsigned int offset) 267 unsigned int offset)
268{ 268{
269 offset *= sizeof(u32); 269 offset *= sizeof(u32);
270 270
@@ -281,7 +281,7 @@ struct host1x_firewall {
281 unsigned int num_relocs; 281 unsigned int num_relocs;
282 struct host1x_reloc *reloc; 282 struct host1x_reloc *reloc;
283 283
284 struct host1x_bo *cmdbuf_id; 284 struct host1x_bo *cmdbuf;
285 unsigned int offset; 285 unsigned int offset;
286 286
287 u32 words; 287 u32 words;
@@ -291,25 +291,37 @@ struct host1x_firewall {
291 u32 count; 291 u32 count;
292}; 292};
293 293
294static int check_register(struct host1x_firewall *fw, unsigned long offset)
295{
296 if (fw->job->is_addr_reg(fw->dev, fw->class, offset)) {
297 if (!fw->num_relocs)
298 return -EINVAL;
299
300 if (!check_reloc(fw->reloc, fw->cmdbuf, fw->offset))
301 return -EINVAL;
302
303 fw->num_relocs--;
304 fw->reloc++;
305 }
306
307 return 0;
308}
309
294static int check_mask(struct host1x_firewall *fw) 310static int check_mask(struct host1x_firewall *fw)
295{ 311{
296 u32 mask = fw->mask; 312 u32 mask = fw->mask;
297 u32 reg = fw->reg; 313 u32 reg = fw->reg;
314 int ret;
298 315
299 while (mask) { 316 while (mask) {
300 if (fw->words == 0) 317 if (fw->words == 0)
301 return -EINVAL; 318 return -EINVAL;
302 319
303 if (mask & 1) { 320 if (mask & 1) {
304 if (fw->job->is_addr_reg(fw->dev, fw->class, reg)) { 321 ret = check_register(fw, reg);
305 if (!fw->num_relocs) 322 if (ret < 0)
306 return -EINVAL; 323 return ret;
307 if (!check_reloc(fw->reloc, fw->cmdbuf_id, 324
308 fw->offset))
309 return -EINVAL;
310 fw->reloc++;
311 fw->num_relocs--;
312 }
313 fw->words--; 325 fw->words--;
314 fw->offset++; 326 fw->offset++;
315 } 327 }
@@ -324,19 +336,16 @@ static int check_incr(struct host1x_firewall *fw)
324{ 336{
325 u32 count = fw->count; 337 u32 count = fw->count;
326 u32 reg = fw->reg; 338 u32 reg = fw->reg;
339 int ret;
327 340
328 while (count) { 341 while (count) {
329 if (fw->words == 0) 342 if (fw->words == 0)
330 return -EINVAL; 343 return -EINVAL;
331 344
332 if (fw->job->is_addr_reg(fw->dev, fw->class, reg)) { 345 ret = check_register(fw, reg);
333 if (!fw->num_relocs) 346 if (ret < 0)
334 return -EINVAL; 347 return ret;
335 if (!check_reloc(fw->reloc, fw->cmdbuf_id, fw->offset)) 348
336 return -EINVAL;
337 fw->reloc++;
338 fw->num_relocs--;
339 }
340 reg++; 349 reg++;
341 fw->words--; 350 fw->words--;
342 fw->offset++; 351 fw->offset++;
@@ -348,21 +357,17 @@ static int check_incr(struct host1x_firewall *fw)
348 357
349static int check_nonincr(struct host1x_firewall *fw) 358static int check_nonincr(struct host1x_firewall *fw)
350{ 359{
351 int is_addr_reg = fw->job->is_addr_reg(fw->dev, fw->class, fw->reg);
352 u32 count = fw->count; 360 u32 count = fw->count;
361 int ret;
353 362
354 while (count) { 363 while (count) {
355 if (fw->words == 0) 364 if (fw->words == 0)
356 return -EINVAL; 365 return -EINVAL;
357 366
358 if (is_addr_reg) { 367 ret = check_register(fw, fw->reg);
359 if (!fw->num_relocs) 368 if (ret < 0)
360 return -EINVAL; 369 return ret;
361 if (!check_reloc(fw->reloc, fw->cmdbuf_id, fw->offset)) 370
362 return -EINVAL;
363 fw->reloc++;
364 fw->num_relocs--;
365 }
366 fw->words--; 371 fw->words--;
367 fw->offset++; 372 fw->offset++;
368 count--; 373 count--;
@@ -381,7 +386,7 @@ static int validate(struct host1x_firewall *fw, struct host1x_job_gather *g)
381 return 0; 386 return 0;
382 387
383 fw->words = g->words; 388 fw->words = g->words;
384 fw->cmdbuf_id = g->bo; 389 fw->cmdbuf = g->bo;
385 fw->offset = 0; 390 fw->offset = 0;
386 391
387 while (fw->words && !err) { 392 while (fw->words && !err) {
@@ -436,10 +441,6 @@ static int validate(struct host1x_firewall *fw, struct host1x_job_gather *g)
436 } 441 }
437 } 442 }
438 443
439 /* No relocs should remain at this point */
440 if (fw->num_relocs)
441 err = -EINVAL;
442
443out: 444out:
444 return err; 445 return err;
445} 446}
@@ -493,6 +494,10 @@ static inline int copy_gathers(struct host1x_job *job, struct device *dev)
493 offset += g->words * sizeof(u32); 494 offset += g->words * sizeof(u32);
494 } 495 }
495 496
497 /* No relocs should remain at this point */
498 if (fw.num_relocs)
499 return -EINVAL;
500
496 return 0; 501 return 0;
497} 502}
498 503
diff --git a/drivers/gpu/host1x/job.h b/drivers/gpu/host1x/job.h
index fba45f20458e..33a697d6dcef 100644
--- a/drivers/gpu/host1x/job.h
+++ b/drivers/gpu/host1x/job.h
@@ -34,15 +34,6 @@ struct host1x_cmdbuf {
34 u32 pad; 34 u32 pad;
35}; 35};
36 36
37struct host1x_reloc {
38 struct host1x_bo *cmdbuf;
39 u32 cmdbuf_offset;
40 struct host1x_bo *target;
41 u32 target_offset;
42 u32 shift;
43 u32 pad;
44};
45
46struct host1x_waitchk { 37struct host1x_waitchk {
47 struct host1x_bo *bo; 38 struct host1x_bo *bo;
48 u32 offset; 39 u32 offset;
@@ -56,105 +47,6 @@ struct host1x_job_unpin_data {
56}; 47};
57 48
58/* 49/*
59 * Each submit is tracked as a host1x_job.
60 */
61struct host1x_job {
62 /* When refcount goes to zero, job can be freed */
63 struct kref ref;
64
65 /* List entry */
66 struct list_head list;
67
68 /* Channel where job is submitted to */
69 struct host1x_channel *channel;
70
71 u32 client;
72
73 /* Gathers and their memory */
74 struct host1x_job_gather *gathers;
75 unsigned int num_gathers;
76
77 /* Wait checks to be processed at submit time */
78 struct host1x_waitchk *waitchk;
79 unsigned int num_waitchk;
80 u32 waitchk_mask;
81
82 /* Array of handles to be pinned & unpinned */
83 struct host1x_reloc *relocarray;
84 unsigned int num_relocs;
85 struct host1x_job_unpin_data *unpins;
86 unsigned int num_unpins;
87
88 dma_addr_t *addr_phys;
89 dma_addr_t *gather_addr_phys;
90 dma_addr_t *reloc_addr_phys;
91
92 /* Sync point id, number of increments and end related to the submit */
93 u32 syncpt_id;
94 u32 syncpt_incrs;
95 u32 syncpt_end;
96
97 /* Maximum time to wait for this job */
98 unsigned int timeout;
99
100 /* Index and number of slots used in the push buffer */
101 unsigned int first_get;
102 unsigned int num_slots;
103
104 /* Copy of gathers */
105 size_t gather_copy_size;
106 dma_addr_t gather_copy;
107 u8 *gather_copy_mapped;
108
109 /* Check if register is marked as an address reg */
110 int (*is_addr_reg)(struct device *dev, u32 reg, u32 class);
111
112 /* Request a SETCLASS to this class */
113 u32 class;
114
115 /* Add a channel wait for previous ops to complete */
116 bool serialize;
117};
118/*
119 * Allocate memory for a job. Just enough memory will be allocated to
120 * accomodate the submit.
121 */
122struct host1x_job *host1x_job_alloc(struct host1x_channel *ch,
123 u32 num_cmdbufs, u32 num_relocs,
124 u32 num_waitchks);
125
126/*
127 * Add a gather to a job.
128 */
129void host1x_job_add_gather(struct host1x_job *job, struct host1x_bo *mem_id,
130 u32 words, u32 offset);
131
132/*
133 * Increment reference going to host1x_job.
134 */
135struct host1x_job *host1x_job_get(struct host1x_job *job);
136
137/*
138 * Decrement reference job, free if goes to zero.
139 */
140void host1x_job_put(struct host1x_job *job);
141
142/*
143 * Pin memory related to job. This handles relocation of addresses to the
144 * host1x address space. Handles both the gather memory and any other memory
145 * referred to from the gather buffers.
146 *
147 * Handles also patching out host waits that would wait for an expired sync
148 * point value.
149 */
150int host1x_job_pin(struct host1x_job *job, struct device *dev);
151
152/*
153 * Unpin memory related to job.
154 */
155void host1x_job_unpin(struct host1x_job *job);
156
157/*
158 * Dump contents of job to debug output. 50 * Dump contents of job to debug output.
159 */ 51 */
160void host1x_job_dump(struct device *dev, struct host1x_job *job); 52void host1x_job_dump(struct device *dev, struct host1x_job *job);
diff --git a/drivers/gpu/host1x/syncpt.c b/drivers/gpu/host1x/syncpt.c
index 409745b949db..159c479829c9 100644
--- a/drivers/gpu/host1x/syncpt.c
+++ b/drivers/gpu/host1x/syncpt.c
@@ -30,9 +30,32 @@
30#define SYNCPT_CHECK_PERIOD (2 * HZ) 30#define SYNCPT_CHECK_PERIOD (2 * HZ)
31#define MAX_STUCK_CHECK_COUNT 15 31#define MAX_STUCK_CHECK_COUNT 15
32 32
33static struct host1x_syncpt *_host1x_syncpt_alloc(struct host1x *host, 33static struct host1x_syncpt_base *
34 struct device *dev, 34host1x_syncpt_base_request(struct host1x *host)
35 bool client_managed) 35{
36 struct host1x_syncpt_base *bases = host->bases;
37 unsigned int i;
38
39 for (i = 0; i < host->info->nb_bases; i++)
40 if (!bases[i].requested)
41 break;
42
43 if (i >= host->info->nb_bases)
44 return NULL;
45
46 bases[i].requested = true;
47 return &bases[i];
48}
49
50static void host1x_syncpt_base_free(struct host1x_syncpt_base *base)
51{
52 if (base)
53 base->requested = false;
54}
55
56static struct host1x_syncpt *host1x_syncpt_alloc(struct host1x *host,
57 struct device *dev,
58 unsigned long flags)
36{ 59{
37 int i; 60 int i;
38 struct host1x_syncpt *sp = host->syncpt; 61 struct host1x_syncpt *sp = host->syncpt;
@@ -44,6 +67,12 @@ static struct host1x_syncpt *_host1x_syncpt_alloc(struct host1x *host,
44 if (i >= host->info->nb_pts) 67 if (i >= host->info->nb_pts)
45 return NULL; 68 return NULL;
46 69
70 if (flags & HOST1X_SYNCPT_HAS_BASE) {
71 sp->base = host1x_syncpt_base_request(host);
72 if (!sp->base)
73 return NULL;
74 }
75
47 name = kasprintf(GFP_KERNEL, "%02d-%s", sp->id, 76 name = kasprintf(GFP_KERNEL, "%02d-%s", sp->id,
48 dev ? dev_name(dev) : NULL); 77 dev ? dev_name(dev) : NULL);
49 if (!name) 78 if (!name)
@@ -51,7 +80,11 @@ static struct host1x_syncpt *_host1x_syncpt_alloc(struct host1x *host,
51 80
52 sp->dev = dev; 81 sp->dev = dev;
53 sp->name = name; 82 sp->name = name;
54 sp->client_managed = client_managed; 83
84 if (flags & HOST1X_SYNCPT_CLIENT_MANAGED)
85 sp->client_managed = true;
86 else
87 sp->client_managed = false;
55 88
56 return sp; 89 return sp;
57} 90}
@@ -303,25 +336,35 @@ int host1x_syncpt_patch_wait(struct host1x_syncpt *sp, void *patch_addr)
303 336
304int host1x_syncpt_init(struct host1x *host) 337int host1x_syncpt_init(struct host1x *host)
305{ 338{
339 struct host1x_syncpt_base *bases;
306 struct host1x_syncpt *syncpt; 340 struct host1x_syncpt *syncpt;
307 int i; 341 int i;
308 342
309 syncpt = devm_kzalloc(host->dev, sizeof(*syncpt) * host->info->nb_pts, 343 syncpt = devm_kzalloc(host->dev, sizeof(*syncpt) * host->info->nb_pts,
310 GFP_KERNEL); 344 GFP_KERNEL);
311 if (!syncpt) 345 if (!syncpt)
312 return -ENOMEM; 346 return -ENOMEM;
313 347
314 for (i = 0; i < host->info->nb_pts; ++i) { 348 bases = devm_kzalloc(host->dev, sizeof(*bases) * host->info->nb_bases,
349 GFP_KERNEL);
350 if (!bases)
351 return -ENOMEM;
352
353 for (i = 0; i < host->info->nb_pts; i++) {
315 syncpt[i].id = i; 354 syncpt[i].id = i;
316 syncpt[i].host = host; 355 syncpt[i].host = host;
317 } 356 }
318 357
358 for (i = 0; i < host->info->nb_bases; i++)
359 bases[i].id = i;
360
319 host->syncpt = syncpt; 361 host->syncpt = syncpt;
362 host->bases = bases;
320 363
321 host1x_syncpt_restore(host); 364 host1x_syncpt_restore(host);
322 365
323 /* Allocate sync point to use for clearing waits for expired fences */ 366 /* Allocate sync point to use for clearing waits for expired fences */
324 host->nop_sp = _host1x_syncpt_alloc(host, NULL, false); 367 host->nop_sp = host1x_syncpt_alloc(host, NULL, 0);
325 if (!host->nop_sp) 368 if (!host->nop_sp)
326 return -ENOMEM; 369 return -ENOMEM;
327 370
@@ -329,10 +372,10 @@ int host1x_syncpt_init(struct host1x *host)
329} 372}
330 373
331struct host1x_syncpt *host1x_syncpt_request(struct device *dev, 374struct host1x_syncpt *host1x_syncpt_request(struct device *dev,
332 bool client_managed) 375 unsigned long flags)
333{ 376{
334 struct host1x *host = dev_get_drvdata(dev->parent); 377 struct host1x *host = dev_get_drvdata(dev->parent);
335 return _host1x_syncpt_alloc(host, dev, client_managed); 378 return host1x_syncpt_alloc(host, dev, flags);
336} 379}
337 380
338void host1x_syncpt_free(struct host1x_syncpt *sp) 381void host1x_syncpt_free(struct host1x_syncpt *sp)
@@ -340,7 +383,9 @@ void host1x_syncpt_free(struct host1x_syncpt *sp)
340 if (!sp) 383 if (!sp)
341 return; 384 return;
342 385
386 host1x_syncpt_base_free(sp->base);
343 kfree(sp->name); 387 kfree(sp->name);
388 sp->base = NULL;
344 sp->dev = NULL; 389 sp->dev = NULL;
345 sp->name = NULL; 390 sp->name = NULL;
346 sp->client_managed = false; 391 sp->client_managed = false;
@@ -354,6 +399,25 @@ void host1x_syncpt_deinit(struct host1x *host)
354 kfree(sp->name); 399 kfree(sp->name);
355} 400}
356 401
402/*
403 * Read max. It indicates how many operations there are in queue, either in
404 * channel or in a software thread.
405 * */
406u32 host1x_syncpt_read_max(struct host1x_syncpt *sp)
407{
408 smp_rmb();
409 return (u32)atomic_read(&sp->max_val);
410}
411
412/*
413 * Read min, which is a shadow of the current sync point value in hardware.
414 */
415u32 host1x_syncpt_read_min(struct host1x_syncpt *sp)
416{
417 smp_rmb();
418 return (u32)atomic_read(&sp->min_val);
419}
420
357int host1x_syncpt_nb_pts(struct host1x *host) 421int host1x_syncpt_nb_pts(struct host1x *host)
358{ 422{
359 return host->info->nb_pts; 423 return host->info->nb_pts;
@@ -375,3 +439,13 @@ struct host1x_syncpt *host1x_syncpt_get(struct host1x *host, u32 id)
375 return NULL; 439 return NULL;
376 return host->syncpt + id; 440 return host->syncpt + id;
377} 441}
442
443struct host1x_syncpt_base *host1x_syncpt_get_base(struct host1x_syncpt *sp)
444{
445 return sp ? sp->base : NULL;
446}
447
448u32 host1x_syncpt_base_id(struct host1x_syncpt_base *base)
449{
450 return base->id;
451}
diff --git a/drivers/gpu/host1x/syncpt.h b/drivers/gpu/host1x/syncpt.h
index 267c0b9d3647..9056465ecd3f 100644
--- a/drivers/gpu/host1x/syncpt.h
+++ b/drivers/gpu/host1x/syncpt.h
@@ -20,6 +20,7 @@
20#define __HOST1X_SYNCPT_H 20#define __HOST1X_SYNCPT_H
21 21
22#include <linux/atomic.h> 22#include <linux/atomic.h>
23#include <linux/host1x.h>
23#include <linux/kernel.h> 24#include <linux/kernel.h>
24#include <linux/sched.h> 25#include <linux/sched.h>
25 26
@@ -30,6 +31,11 @@ struct host1x;
30/* Reserved for replacing an expired wait with a NOP */ 31/* Reserved for replacing an expired wait with a NOP */
31#define HOST1X_SYNCPT_RESERVED 0 32#define HOST1X_SYNCPT_RESERVED 0
32 33
34struct host1x_syncpt_base {
35 unsigned int id;
36 bool requested;
37};
38
33struct host1x_syncpt { 39struct host1x_syncpt {
34 int id; 40 int id;
35 atomic_t min_val; 41 atomic_t min_val;
@@ -39,6 +45,7 @@ struct host1x_syncpt {
39 bool client_managed; 45 bool client_managed;
40 struct host1x *host; 46 struct host1x *host;
41 struct device *dev; 47 struct device *dev;
48 struct host1x_syncpt_base *base;
42 49
43 /* interrupt data */ 50 /* interrupt data */
44 struct host1x_syncpt_intr intr; 51 struct host1x_syncpt_intr intr;
@@ -50,25 +57,6 @@ int host1x_syncpt_init(struct host1x *host);
50/* Free sync point array */ 57/* Free sync point array */
51void host1x_syncpt_deinit(struct host1x *host); 58void host1x_syncpt_deinit(struct host1x *host);
52 59
53/*
54 * Read max. It indicates how many operations there are in queue, either in
55 * channel or in a software thread.
56 * */
57static inline u32 host1x_syncpt_read_max(struct host1x_syncpt *sp)
58{
59 smp_rmb();
60 return (u32)atomic_read(&sp->max_val);
61}
62
63/*
64 * Read min, which is a shadow of the current sync point value in hardware.
65 */
66static inline u32 host1x_syncpt_read_min(struct host1x_syncpt *sp)
67{
68 smp_rmb();
69 return (u32)atomic_read(&sp->min_val);
70}
71
72/* Return number of sync point supported. */ 60/* Return number of sync point supported. */
73int host1x_syncpt_nb_pts(struct host1x *host); 61int host1x_syncpt_nb_pts(struct host1x *host);
74 62
@@ -112,9 +100,6 @@ static inline bool host1x_syncpt_idle(struct host1x_syncpt *sp)
112 return (min == max); 100 return (min == max);
113} 101}
114 102
115/* Return pointer to struct denoting sync point id. */
116struct host1x_syncpt *host1x_syncpt_get(struct host1x *host, u32 id);
117
118/* Load current value from hardware to the shadow register. */ 103/* Load current value from hardware to the shadow register. */
119u32 host1x_syncpt_load(struct host1x_syncpt *sp); 104u32 host1x_syncpt_load(struct host1x_syncpt *sp);
120 105
@@ -130,16 +115,9 @@ void host1x_syncpt_restore(struct host1x *host);
130/* Read current wait base value into shadow register and return it. */ 115/* Read current wait base value into shadow register and return it. */
131u32 host1x_syncpt_load_wait_base(struct host1x_syncpt *sp); 116u32 host1x_syncpt_load_wait_base(struct host1x_syncpt *sp);
132 117
133/* Request incrementing a sync point. */
134int host1x_syncpt_incr(struct host1x_syncpt *sp);
135
136/* Indicate future operations by incrementing the sync point max. */ 118/* Indicate future operations by incrementing the sync point max. */
137u32 host1x_syncpt_incr_max(struct host1x_syncpt *sp, u32 incrs); 119u32 host1x_syncpt_incr_max(struct host1x_syncpt *sp, u32 incrs);
138 120
139/* Wait until sync point reaches a threshold value, or a timeout. */
140int host1x_syncpt_wait(struct host1x_syncpt *sp, u32 thresh,
141 long timeout, u32 *value);
142
143/* Check if sync point id is valid. */ 121/* Check if sync point id is valid. */
144static inline int host1x_syncpt_is_valid(struct host1x_syncpt *sp) 122static inline int host1x_syncpt_is_valid(struct host1x_syncpt *sp)
145{ 123{
@@ -149,14 +127,4 @@ static inline int host1x_syncpt_is_valid(struct host1x_syncpt *sp)
149/* Patch a wait by replacing it with a wait for syncpt 0 value 0 */ 127/* Patch a wait by replacing it with a wait for syncpt 0 value 0 */
150int host1x_syncpt_patch_wait(struct host1x_syncpt *sp, void *patch_addr); 128int host1x_syncpt_patch_wait(struct host1x_syncpt *sp, void *patch_addr);
151 129
152/* Return id of the sync point */
153u32 host1x_syncpt_id(struct host1x_syncpt *sp);
154
155/* Allocate a sync point for a device. */
156struct host1x_syncpt *host1x_syncpt_request(struct device *dev,
157 bool client_managed);
158
159/* Free a sync point. */
160void host1x_syncpt_free(struct host1x_syncpt *sp);
161
162#endif 130#endif
diff --git a/drivers/staging/imx-drm/Kconfig b/drivers/staging/imx-drm/Kconfig
index 394254f7d6b5..5032ff7c2259 100644
--- a/drivers/staging/imx-drm/Kconfig
+++ b/drivers/staging/imx-drm/Kconfig
@@ -1,6 +1,7 @@
1config DRM_IMX 1config DRM_IMX
2 tristate "DRM Support for Freescale i.MX" 2 tristate "DRM Support for Freescale i.MX"
3 select DRM_KMS_HELPER 3 select DRM_KMS_HELPER
4 select DRM_KMS_FB_HELPER
4 select VIDEOMODE_HELPERS 5 select VIDEOMODE_HELPERS
5 select DRM_GEM_CMA_HELPER 6 select DRM_GEM_CMA_HELPER
6 select DRM_KMS_CMA_HELPER 7 select DRM_KMS_CMA_HELPER
diff --git a/drivers/staging/imx-drm/imx-drm-core.c b/drivers/staging/imx-drm/imx-drm-core.c
index 3d3a824f6de7..51aa9772f959 100644
--- a/drivers/staging/imx-drm/imx-drm-core.c
+++ b/drivers/staging/imx-drm/imx-drm-core.c
@@ -407,14 +407,14 @@ static int imx_drm_driver_load(struct drm_device *drm, unsigned long flags)
407 407
408 /* 408 /*
409 * enable drm irq mode. 409 * enable drm irq mode.
410 * - with irq_enabled = 1, we can use the vblank feature. 410 * - with irq_enabled = true, we can use the vblank feature.
411 * 411 *
412 * P.S. note that we wouldn't use drm irq handler but 412 * P.S. note that we wouldn't use drm irq handler but
413 * just specific driver own one instead because 413 * just specific driver own one instead because
414 * drm framework supports only one irq handler and 414 * drm framework supports only one irq handler and
415 * drivers can well take care of their interrupts 415 * drivers can well take care of their interrupts
416 */ 416 */
417 drm->irq_enabled = 1; 417 drm->irq_enabled = true;
418 418
419 drm_mode_config_init(drm); 419 drm_mode_config_init(drm);
420 imx_drm_mode_config_init(drm); 420 imx_drm_mode_config_init(drm);
@@ -434,11 +434,11 @@ static int imx_drm_driver_load(struct drm_device *drm, unsigned long flags)
434 goto err_init; 434 goto err_init;
435 435
436 /* 436 /*
437 * with vblank_disable_allowed = 1, vblank interrupt will be disabled 437 * with vblank_disable_allowed = true, vblank interrupt will be disabled
438 * by drm timer once a current process gives up ownership of 438 * by drm timer once a current process gives up ownership of
439 * vblank event.(after drm_vblank_put function is called) 439 * vblank event.(after drm_vblank_put function is called)
440 */ 440 */
441 imxdrm->drm->vblank_disable_allowed = 1; 441 imxdrm->drm->vblank_disable_allowed = true;
442 442
443 if (!imx_drm_device_get()) 443 if (!imx_drm_device_get())
444 ret = -EINVAL; 444 ret = -EINVAL;
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index 14317b70b413..4f2e1b35eb38 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -19,10 +19,10 @@ source "drivers/char/agp/Kconfig"
19 19
20source "drivers/gpu/vga/Kconfig" 20source "drivers/gpu/vga/Kconfig"
21 21
22source "drivers/gpu/drm/Kconfig"
23
24source "drivers/gpu/host1x/Kconfig" 22source "drivers/gpu/host1x/Kconfig"
25 23
24source "drivers/gpu/drm/Kconfig"
25
26config VGASTATE 26config VGASTATE
27 tristate 27 tristate
28 default n 28 default n
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index b46fb45f2cca..1d4a920ef7ff 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -150,6 +150,7 @@ int drm_err(const char *func, const char *format, ...);
150#define DRIVER_BUS_PCI 0x1 150#define DRIVER_BUS_PCI 0x1
151#define DRIVER_BUS_PLATFORM 0x2 151#define DRIVER_BUS_PLATFORM 0x2
152#define DRIVER_BUS_USB 0x3 152#define DRIVER_BUS_USB 0x3
153#define DRIVER_BUS_HOST1X 0x4
153 154
154/***********************************************************************/ 155/***********************************************************************/
155/** \name Begin the DRM... */ 156/** \name Begin the DRM... */
@@ -412,7 +413,12 @@ struct drm_prime_file_private {
412 413
413/** File private data */ 414/** File private data */
414struct drm_file { 415struct drm_file {
415 int authenticated; 416 unsigned always_authenticated :1;
417 unsigned authenticated :1;
418 unsigned is_master :1; /* this file private is a master for a minor */
419 /* true when the client has asked us to expose stereo 3D mode flags */
420 unsigned stereo_allowed :1;
421
416 struct pid *pid; 422 struct pid *pid;
417 kuid_t uid; 423 kuid_t uid;
418 drm_magic_t magic; 424 drm_magic_t magic;
@@ -429,10 +435,8 @@ struct drm_file {
429 struct file *filp; 435 struct file *filp;
430 void *driver_priv; 436 void *driver_priv;
431 437
432 int is_master; /* this file private is a master for a minor */
433 struct drm_master *master; /* master this node is currently associated with 438 struct drm_master *master; /* master this node is currently associated with
434 N.B. not always minor->master */ 439 N.B. not always minor->master */
435
436 /** 440 /**
437 * fbs - List of framebuffers associated with this file. 441 * fbs - List of framebuffers associated with this file.
438 * 442 *
@@ -667,8 +671,6 @@ struct drm_gem_object {
667 uint32_t pending_read_domains; 671 uint32_t pending_read_domains;
668 uint32_t pending_write_domain; 672 uint32_t pending_write_domain;
669 673
670 void *driver_private;
671
672 /** 674 /**
673 * dma_buf - dma buf associated with this GEM object 675 * dma_buf - dma buf associated with this GEM object
674 * 676 *
@@ -834,12 +836,17 @@ struct drm_driver {
834 /** 836 /**
835 * Called by vblank timestamping code. 837 * Called by vblank timestamping code.
836 * 838 *
837 * Return the current display scanout position from a crtc. 839 * Return the current display scanout position from a crtc, and an
840 * optional accurate ktime_get timestamp of when position was measured.
838 * 841 *
839 * \param dev DRM device. 842 * \param dev DRM device.
840 * \param crtc Id of the crtc to query. 843 * \param crtc Id of the crtc to query.
841 * \param *vpos Target location for current vertical scanout position. 844 * \param *vpos Target location for current vertical scanout position.
842 * \param *hpos Target location for current horizontal scanout position. 845 * \param *hpos Target location for current horizontal scanout position.
846 * \param *stime Target location for timestamp taken immediately before
847 * scanout position query. Can be NULL to skip timestamp.
848 * \param *etime Target location for timestamp taken immediately after
849 * scanout position query. Can be NULL to skip timestamp.
843 * 850 *
844 * Returns vpos as a positive number while in active scanout area. 851 * Returns vpos as a positive number while in active scanout area.
845 * Returns vpos as a negative number inside vblank, counting the number 852 * Returns vpos as a negative number inside vblank, counting the number
@@ -856,7 +863,8 @@ struct drm_driver {
856 * 863 *
857 */ 864 */
858 int (*get_scanout_position) (struct drm_device *dev, int crtc, 865 int (*get_scanout_position) (struct drm_device *dev, int crtc,
859 int *vpos, int *hpos); 866 int *vpos, int *hpos, ktime_t *stime,
867 ktime_t *etime);
860 868
861 /** 869 /**
862 * Called by \c drm_get_last_vbltimestamp. Should return a precise 870 * Called by \c drm_get_last_vbltimestamp. Should return a precise
@@ -922,7 +930,6 @@ struct drm_driver {
922 * 930 *
923 * Returns 0 on success. 931 * Returns 0 on success.
924 */ 932 */
925 int (*gem_init_object) (struct drm_gem_object *obj);
926 void (*gem_free_object) (struct drm_gem_object *obj); 933 void (*gem_free_object) (struct drm_gem_object *obj);
927 int (*gem_open_object) (struct drm_gem_object *, struct drm_file *); 934 int (*gem_open_object) (struct drm_gem_object *, struct drm_file *);
928 void (*gem_close_object) (struct drm_gem_object *, struct drm_file *); 935 void (*gem_close_object) (struct drm_gem_object *, struct drm_file *);
@@ -997,27 +1004,6 @@ struct drm_driver {
997#define DRM_MINOR_CONTROL 2 1004#define DRM_MINOR_CONTROL 2
998#define DRM_MINOR_RENDER 3 1005#define DRM_MINOR_RENDER 3
999 1006
1000
1001/**
1002 * debugfs node list. This structure represents a debugfs file to
1003 * be created by the drm core
1004 */
1005struct drm_debugfs_list {
1006 const char *name; /** file name */
1007 int (*show)(struct seq_file*, void*); /** show callback */
1008 u32 driver_features; /**< Required driver features for this entry */
1009};
1010
1011/**
1012 * debugfs node structure. This structure represents a debugfs file.
1013 */
1014struct drm_debugfs_node {
1015 struct list_head list;
1016 struct drm_minor *minor;
1017 struct drm_debugfs_list *debugfs_ent;
1018 struct dentry *dent;
1019};
1020
1021/** 1007/**
1022 * Info file list entry. This structure represents a debugfs or proc file to 1008 * Info file list entry. This structure represents a debugfs or proc file to
1023 * be created by the drm core 1009 * be created by the drm core
@@ -1046,7 +1032,7 @@ struct drm_minor {
1046 int index; /**< Minor device number */ 1032 int index; /**< Minor device number */
1047 int type; /**< Control or render */ 1033 int type; /**< Control or render */
1048 dev_t device; /**< Device number for mknod */ 1034 dev_t device; /**< Device number for mknod */
1049 struct device kdev; /**< Linux device */ 1035 struct device *kdev; /**< Linux device */
1050 struct drm_device *dev; 1036 struct drm_device *dev;
1051 1037
1052 struct dentry *debugfs_root; 1038 struct dentry *debugfs_root;
@@ -1081,6 +1067,19 @@ struct drm_pending_vblank_event {
1081 struct drm_event_vblank event; 1067 struct drm_event_vblank event;
1082}; 1068};
1083 1069
1070struct drm_vblank_crtc {
1071 wait_queue_head_t queue; /**< VBLANK wait queue */
1072 struct timeval time[DRM_VBLANKTIME_RBSIZE]; /**< timestamp of current count */
1073 atomic_t count; /**< number of VBLANK interrupts */
1074 atomic_t refcount; /* number of users of vblank interruptsper crtc */
1075 u32 last; /* protected by dev->vbl_lock, used */
1076 /* for wraparound handling */
1077 u32 last_wait; /* Last vblank seqno waited per CRTC */
1078 unsigned int inmodeset; /* Display driver is setting mode */
1079 bool enabled; /* so we don't call enable more than
1080 once per disable */
1081};
1082
1084/** 1083/**
1085 * DRM device structure. This structure represent a complete card that 1084 * DRM device structure. This structure represent a complete card that
1086 * may contain multiple heads. 1085 * may contain multiple heads.
@@ -1105,25 +1104,16 @@ struct drm_device {
1105 atomic_t buf_alloc; /**< Buffer allocation in progress */ 1104 atomic_t buf_alloc; /**< Buffer allocation in progress */
1106 /*@} */ 1105 /*@} */
1107 1106
1108 /** \name Performance counters */
1109 /*@{ */
1110 unsigned long counters;
1111 enum drm_stat_type types[15];
1112 atomic_t counts[15];
1113 /*@} */
1114
1115 struct list_head filelist; 1107 struct list_head filelist;
1116 1108
1117 /** \name Memory management */ 1109 /** \name Memory management */
1118 /*@{ */ 1110 /*@{ */
1119 struct list_head maplist; /**< Linked list of regions */ 1111 struct list_head maplist; /**< Linked list of regions */
1120 int map_count; /**< Number of mappable regions */
1121 struct drm_open_hash map_hash; /**< User token hash table for maps */ 1112 struct drm_open_hash map_hash; /**< User token hash table for maps */
1122 1113
1123 /** \name Context handle management */ 1114 /** \name Context handle management */
1124 /*@{ */ 1115 /*@{ */
1125 struct list_head ctxlist; /**< Linked list of context handles */ 1116 struct list_head ctxlist; /**< Linked list of context handles */
1126 int ctx_count; /**< Number of context handles */
1127 struct mutex ctxlist_mutex; /**< For ctxlist */ 1117 struct mutex ctxlist_mutex; /**< For ctxlist */
1128 1118
1129 struct idr ctx_idr; 1119 struct idr ctx_idr;
@@ -1139,12 +1129,11 @@ struct drm_device {
1139 1129
1140 /** \name Context support */ 1130 /** \name Context support */
1141 /*@{ */ 1131 /*@{ */
1142 int irq_enabled; /**< True if irq handler is enabled */ 1132 bool irq_enabled; /**< True if irq handler is enabled */
1143 __volatile__ long context_flag; /**< Context swapping flag */ 1133 __volatile__ long context_flag; /**< Context swapping flag */
1144 int last_context; /**< Last current context */ 1134 int last_context; /**< Last current context */
1145 /*@} */ 1135 /*@} */
1146 1136
1147 struct work_struct work;
1148 /** \name VBLANK IRQ support */ 1137 /** \name VBLANK IRQ support */
1149 /*@{ */ 1138 /*@{ */
1150 1139
@@ -1154,20 +1143,13 @@ struct drm_device {
1154 * Once the modeset ioctl *has* been called though, we can safely 1143 * Once the modeset ioctl *has* been called though, we can safely
1155 * disable them when unused. 1144 * disable them when unused.
1156 */ 1145 */
1157 int vblank_disable_allowed; 1146 bool vblank_disable_allowed;
1147
1148 /* array of size num_crtcs */
1149 struct drm_vblank_crtc *vblank;
1158 1150
1159 wait_queue_head_t *vbl_queue; /**< VBLANK wait queue */
1160 atomic_t *_vblank_count; /**< number of VBLANK interrupts (driver must alloc the right number of counters) */
1161 struct timeval *_vblank_time; /**< timestamp of current vblank_count (drivers must alloc right number of fields) */
1162 spinlock_t vblank_time_lock; /**< Protects vblank count and time updates during vblank enable/disable */ 1151 spinlock_t vblank_time_lock; /**< Protects vblank count and time updates during vblank enable/disable */
1163 spinlock_t vbl_lock; 1152 spinlock_t vbl_lock;
1164 atomic_t *vblank_refcount; /* number of users of vblank interruptsper crtc */
1165 u32 *last_vblank; /* protected by dev->vbl_lock, used */
1166 /* for wraparound handling */
1167 int *vblank_enabled; /* so we don't call enable more than
1168 once per disable */
1169 int *vblank_inmodeset; /* Display driver is setting mode */
1170 u32 *last_vblank_wait; /* Last vblank seqno waited per CRTC */
1171 struct timer_list vblank_disable_timer; 1153 struct timer_list vblank_disable_timer;
1172 1154
1173 u32 max_vblank_count; /**< size of vblank counter register */ 1155 u32 max_vblank_count; /**< size of vblank counter register */
@@ -1184,8 +1166,6 @@ struct drm_device {
1184 1166
1185 struct device *dev; /**< Device structure */ 1167 struct device *dev; /**< Device structure */
1186 struct pci_dev *pdev; /**< PCI device structure */ 1168 struct pci_dev *pdev; /**< PCI device structure */
1187 int pci_vendor; /**< PCI vendor id */
1188 int pci_device; /**< PCI device id */
1189#ifdef __alpha__ 1169#ifdef __alpha__
1190 struct pci_controller *hose; 1170 struct pci_controller *hose;
1191#endif 1171#endif
@@ -1303,6 +1283,8 @@ extern int drm_getstats(struct drm_device *dev, void *data,
1303 struct drm_file *file_priv); 1283 struct drm_file *file_priv);
1304extern int drm_getcap(struct drm_device *dev, void *data, 1284extern int drm_getcap(struct drm_device *dev, void *data,
1305 struct drm_file *file_priv); 1285 struct drm_file *file_priv);
1286extern int drm_setclientcap(struct drm_device *dev, void *data,
1287 struct drm_file *file_priv);
1306extern int drm_setversion(struct drm_device *dev, void *data, 1288extern int drm_setversion(struct drm_device *dev, void *data,
1307 struct drm_file *file_priv); 1289 struct drm_file *file_priv);
1308extern int drm_noop(struct drm_device *dev, void *data, 1290extern int drm_noop(struct drm_device *dev, void *data,
@@ -1454,7 +1436,6 @@ extern struct drm_master *drm_master_get(struct drm_master *master);
1454extern void drm_master_put(struct drm_master **master); 1436extern void drm_master_put(struct drm_master **master);
1455 1437
1456extern void drm_put_dev(struct drm_device *dev); 1438extern void drm_put_dev(struct drm_device *dev);
1457extern int drm_put_minor(struct drm_minor **minor);
1458extern void drm_unplug_dev(struct drm_device *dev); 1439extern void drm_unplug_dev(struct drm_device *dev);
1459extern unsigned int drm_debug; 1440extern unsigned int drm_debug;
1460extern unsigned int drm_rnodes; 1441extern unsigned int drm_rnodes;
@@ -1474,10 +1455,11 @@ extern struct drm_local_map *drm_getsarea(struct drm_device *dev);
1474#if defined(CONFIG_DEBUG_FS) 1455#if defined(CONFIG_DEBUG_FS)
1475extern int drm_debugfs_init(struct drm_minor *minor, int minor_id, 1456extern int drm_debugfs_init(struct drm_minor *minor, int minor_id,
1476 struct dentry *root); 1457 struct dentry *root);
1477extern int drm_debugfs_create_files(struct drm_info_list *files, int count, 1458extern int drm_debugfs_create_files(const struct drm_info_list *files,
1478 struct dentry *root, struct drm_minor *minor); 1459 int count, struct dentry *root,
1479extern int drm_debugfs_remove_files(struct drm_info_list *files, int count, 1460 struct drm_minor *minor);
1480 struct drm_minor *minor); 1461extern int drm_debugfs_remove_files(const struct drm_info_list *files,
1462 int count, struct drm_minor *minor);
1481extern int drm_debugfs_cleanup(struct drm_minor *minor); 1463extern int drm_debugfs_cleanup(struct drm_minor *minor);
1482#endif 1464#endif
1483 1465
@@ -1556,8 +1538,6 @@ int drm_gem_init(struct drm_device *dev);
1556void drm_gem_destroy(struct drm_device *dev); 1538void drm_gem_destroy(struct drm_device *dev);
1557void drm_gem_object_release(struct drm_gem_object *obj); 1539void drm_gem_object_release(struct drm_gem_object *obj);
1558void drm_gem_object_free(struct kref *kref); 1540void drm_gem_object_free(struct kref *kref);
1559struct drm_gem_object *drm_gem_object_alloc(struct drm_device *dev,
1560 size_t size);
1561int drm_gem_object_init(struct drm_device *dev, 1541int drm_gem_object_init(struct drm_device *dev,
1562 struct drm_gem_object *obj, size_t size); 1542 struct drm_gem_object *obj, size_t size);
1563void drm_gem_private_object_init(struct drm_device *dev, 1543void drm_gem_private_object_init(struct drm_device *dev,
@@ -1645,10 +1625,11 @@ static __inline__ void drm_core_dropmap(struct drm_local_map *map)
1645 1625
1646#include <drm/drm_mem_util.h> 1626#include <drm/drm_mem_util.h>
1647 1627
1648extern int drm_fill_in_dev(struct drm_device *dev, 1628struct drm_device *drm_dev_alloc(struct drm_driver *driver,
1649 const struct pci_device_id *ent, 1629 struct device *parent);
1650 struct drm_driver *driver); 1630void drm_dev_free(struct drm_device *dev);
1651int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int type); 1631int drm_dev_register(struct drm_device *dev, unsigned long flags);
1632void drm_dev_unregister(struct drm_device *dev);
1652/*@}*/ 1633/*@}*/
1653 1634
1654/* PCI section */ 1635/* PCI section */
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index 24f499569a2f..f32c5cd51f41 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -108,6 +108,7 @@ enum drm_mode_status {
108 MODE_ONE_HEIGHT, /* only one height is supported */ 108 MODE_ONE_HEIGHT, /* only one height is supported */
109 MODE_ONE_SIZE, /* only one resolution is supported */ 109 MODE_ONE_SIZE, /* only one resolution is supported */
110 MODE_NO_REDUCED, /* monitor doesn't accept reduced blanking */ 110 MODE_NO_REDUCED, /* monitor doesn't accept reduced blanking */
111 MODE_NO_STEREO, /* stereo modes not supported */
111 MODE_UNVERIFIED = -3, /* mode needs to reverified */ 112 MODE_UNVERIFIED = -3, /* mode needs to reverified */
112 MODE_BAD = -2, /* unspecified reason */ 113 MODE_BAD = -2, /* unspecified reason */
113 MODE_ERROR = -1 /* error condition */ 114 MODE_ERROR = -1 /* error condition */
@@ -124,7 +125,10 @@ enum drm_mode_status {
124 .vscan = (vs), .flags = (f), \ 125 .vscan = (vs), .flags = (f), \
125 .base.type = DRM_MODE_OBJECT_MODE 126 .base.type = DRM_MODE_OBJECT_MODE
126 127
127#define CRTC_INTERLACE_HALVE_V 0x1 /* halve V values for interlacing */ 128#define CRTC_INTERLACE_HALVE_V (1 << 0) /* halve V values for interlacing */
129#define CRTC_STEREO_DOUBLE (1 << 1) /* adjust timings for stereo modes */
130
131#define DRM_MODE_FLAG_3D_MAX DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF
128 132
129struct drm_display_mode { 133struct drm_display_mode {
130 /* Header */ 134 /* Header */
@@ -155,8 +159,7 @@ struct drm_display_mode {
155 int height_mm; 159 int height_mm;
156 160
157 /* Actual mode we give to hw */ 161 /* Actual mode we give to hw */
158 int clock_index; 162 int crtc_clock; /* in KHz */
159 int synth_clock;
160 int crtc_hdisplay; 163 int crtc_hdisplay;
161 int crtc_hblank_start; 164 int crtc_hblank_start;
162 int crtc_hblank_end; 165 int crtc_hblank_end;
@@ -180,6 +183,11 @@ struct drm_display_mode {
180 int hsync; /* in kHz */ 183 int hsync; /* in kHz */
181}; 184};
182 185
186static inline bool drm_mode_is_stereo(const struct drm_display_mode *mode)
187{
188 return mode->flags & DRM_MODE_FLAG_3D_MASK;
189}
190
183enum drm_connector_status { 191enum drm_connector_status {
184 connector_status_connected = 1, 192 connector_status_connected = 1,
185 connector_status_disconnected = 2, 193 connector_status_disconnected = 2,
@@ -587,7 +595,7 @@ enum drm_connector_force {
587 */ 595 */
588struct drm_connector { 596struct drm_connector {
589 struct drm_device *dev; 597 struct drm_device *dev;
590 struct device kdev; 598 struct device *kdev;
591 struct device_attribute *attr; 599 struct device_attribute *attr;
592 struct list_head head; 600 struct list_head head;
593 601
@@ -597,6 +605,7 @@ struct drm_connector {
597 int connector_type_id; 605 int connector_type_id;
598 bool interlace_allowed; 606 bool interlace_allowed;
599 bool doublescan_allowed; 607 bool doublescan_allowed;
608 bool stereo_allowed;
600 struct list_head modes; /* list of modes on this connector */ 609 struct list_head modes; /* list of modes on this connector */
601 610
602 enum drm_connector_status status; 611 enum drm_connector_status status;
@@ -964,6 +973,7 @@ extern int drm_mode_group_init_legacy_group(struct drm_device *dev, struct drm_m
964extern bool drm_probe_ddc(struct i2c_adapter *adapter); 973extern bool drm_probe_ddc(struct i2c_adapter *adapter);
965extern struct edid *drm_get_edid(struct drm_connector *connector, 974extern struct edid *drm_get_edid(struct drm_connector *connector,
966 struct i2c_adapter *adapter); 975 struct i2c_adapter *adapter);
976extern struct edid *drm_edid_duplicate(const struct edid *edid);
967extern int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid); 977extern int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid);
968extern void drm_mode_probed_add(struct drm_connector *connector, struct drm_display_mode *mode); 978extern void drm_mode_probed_add(struct drm_connector *connector, struct drm_display_mode *mode);
969extern void drm_mode_copy(struct drm_display_mode *dst, const struct drm_display_mode *src); 979extern void drm_mode_copy(struct drm_display_mode *dst, const struct drm_display_mode *src);
@@ -975,7 +985,7 @@ extern void drm_mode_config_reset(struct drm_device *dev);
975extern void drm_mode_config_cleanup(struct drm_device *dev); 985extern void drm_mode_config_cleanup(struct drm_device *dev);
976extern void drm_mode_set_name(struct drm_display_mode *mode); 986extern void drm_mode_set_name(struct drm_display_mode *mode);
977extern bool drm_mode_equal(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2); 987extern bool drm_mode_equal(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2);
978extern bool drm_mode_equal_no_clocks(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2); 988extern bool drm_mode_equal_no_clocks_no_stereo(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2);
979extern int drm_mode_width(const struct drm_display_mode *mode); 989extern int drm_mode_width(const struct drm_display_mode *mode);
980extern int drm_mode_height(const struct drm_display_mode *mode); 990extern int drm_mode_height(const struct drm_display_mode *mode);
981 991
@@ -1108,6 +1118,8 @@ extern struct drm_display_mode *drm_gtf_mode_complex(struct drm_device *dev,
1108 int GTF_2C, int GTF_K, int GTF_2J); 1118 int GTF_2C, int GTF_K, int GTF_2J);
1109extern int drm_add_modes_noedid(struct drm_connector *connector, 1119extern int drm_add_modes_noedid(struct drm_connector *connector,
1110 int hdisplay, int vdisplay); 1120 int hdisplay, int vdisplay);
1121extern void drm_set_preferred_mode(struct drm_connector *connector,
1122 int hpref, int vpref);
1111 1123
1112extern int drm_edid_header_is_valid(const u8 *raw_edid); 1124extern int drm_edid_header_is_valid(const u8 *raw_edid);
1113extern bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid); 1125extern bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid);
@@ -1135,4 +1147,21 @@ extern int drm_format_horz_chroma_subsampling(uint32_t format);
1135extern int drm_format_vert_chroma_subsampling(uint32_t format); 1147extern int drm_format_vert_chroma_subsampling(uint32_t format);
1136extern const char *drm_get_format_name(uint32_t format); 1148extern const char *drm_get_format_name(uint32_t format);
1137 1149
1150/* Helpers */
1151static inline struct drm_crtc *drm_crtc_find(struct drm_device *dev,
1152 uint32_t id)
1153{
1154 struct drm_mode_object *mo;
1155 mo = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_CRTC);
1156 return mo ? obj_to_crtc(mo) : NULL;
1157}
1158
1159static inline struct drm_encoder *drm_encoder_find(struct drm_device *dev,
1160 uint32_t id)
1161{
1162 struct drm_mode_object *mo;
1163 mo = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_ENCODER);
1164 return mo ? obj_to_encoder(mo) : NULL;
1165}
1166
1138#endif /* __DRM_CRTC_H__ */ 1167#endif /* __DRM_CRTC_H__ */
diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
index f43d556bf40b..ef6ad3a8e58e 100644
--- a/include/drm/drm_crtc_helper.h
+++ b/include/drm/drm_crtc_helper.h
@@ -163,7 +163,7 @@ static inline void drm_connector_helper_add(struct drm_connector *connector,
163extern int drm_helper_resume_force_mode(struct drm_device *dev); 163extern int drm_helper_resume_force_mode(struct drm_device *dev);
164extern void drm_kms_helper_poll_init(struct drm_device *dev); 164extern void drm_kms_helper_poll_init(struct drm_device *dev);
165extern void drm_kms_helper_poll_fini(struct drm_device *dev); 165extern void drm_kms_helper_poll_fini(struct drm_device *dev);
166extern void drm_helper_hpd_irq_event(struct drm_device *dev); 166extern bool drm_helper_hpd_irq_event(struct drm_device *dev);
167extern void drm_kms_helper_hotplug_event(struct drm_device *dev); 167extern void drm_kms_helper_hotplug_event(struct drm_device *dev);
168 168
169extern void drm_kms_helper_poll_disable(struct drm_device *dev); 169extern void drm_kms_helper_poll_disable(struct drm_device *dev);
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index ae8dbfb1207c..a92c3754e3bb 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -77,10 +77,10 @@
77#define DP_DOWNSTREAMPORT_PRESENT 0x005 77#define DP_DOWNSTREAMPORT_PRESENT 0x005
78# define DP_DWN_STRM_PORT_PRESENT (1 << 0) 78# define DP_DWN_STRM_PORT_PRESENT (1 << 0)
79# define DP_DWN_STRM_PORT_TYPE_MASK 0x06 79# define DP_DWN_STRM_PORT_TYPE_MASK 0x06
80/* 00b = DisplayPort */ 80# define DP_DWN_STRM_PORT_TYPE_DP (0 << 1)
81/* 01b = Analog */ 81# define DP_DWN_STRM_PORT_TYPE_ANALOG (1 << 1)
82/* 10b = TMDS or HDMI */ 82# define DP_DWN_STRM_PORT_TYPE_TMDS (2 << 1)
83/* 11b = Other */ 83# define DP_DWN_STRM_PORT_TYPE_OTHER (3 << 1)
84# define DP_FORMAT_CONVERSION (1 << 3) 84# define DP_FORMAT_CONVERSION (1 << 3)
85# define DP_DETAILED_CAP_INFO_AVAILABLE (1 << 4) /* DPI */ 85# define DP_DETAILED_CAP_INFO_AVAILABLE (1 << 4) /* DPI */
86 86
@@ -333,20 +333,20 @@ i2c_dp_aux_add_bus(struct i2c_adapter *adapter);
333 333
334 334
335#define DP_LINK_STATUS_SIZE 6 335#define DP_LINK_STATUS_SIZE 6
336bool drm_dp_channel_eq_ok(u8 link_status[DP_LINK_STATUS_SIZE], 336bool drm_dp_channel_eq_ok(const u8 link_status[DP_LINK_STATUS_SIZE],
337 int lane_count); 337 int lane_count);
338bool drm_dp_clock_recovery_ok(u8 link_status[DP_LINK_STATUS_SIZE], 338bool drm_dp_clock_recovery_ok(const u8 link_status[DP_LINK_STATUS_SIZE],
339 int lane_count); 339 int lane_count);
340u8 drm_dp_get_adjust_request_voltage(u8 link_status[DP_LINK_STATUS_SIZE], 340u8 drm_dp_get_adjust_request_voltage(const u8 link_status[DP_LINK_STATUS_SIZE],
341 int lane); 341 int lane);
342u8 drm_dp_get_adjust_request_pre_emphasis(u8 link_status[DP_LINK_STATUS_SIZE], 342u8 drm_dp_get_adjust_request_pre_emphasis(const u8 link_status[DP_LINK_STATUS_SIZE],
343 int lane); 343 int lane);
344 344
345#define DP_RECEIVER_CAP_SIZE 0xf 345#define DP_RECEIVER_CAP_SIZE 0xf
346#define EDP_PSR_RECEIVER_CAP_SIZE 2 346#define EDP_PSR_RECEIVER_CAP_SIZE 2
347 347
348void drm_dp_link_train_clock_recovery_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]); 348void drm_dp_link_train_clock_recovery_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]);
349void drm_dp_link_train_channel_eq_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]); 349void drm_dp_link_train_channel_eq_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]);
350 350
351u8 drm_dp_link_rate_to_bw_code(int link_rate); 351u8 drm_dp_link_rate_to_bw_code(int link_rate);
352int drm_dp_bw_code_to_link_rate(u8 link_bw); 352int drm_dp_bw_code_to_link_rate(u8 link_bw);
@@ -379,15 +379,22 @@ struct edp_vsc_psr {
379#define EDP_VSC_PSR_CRC_VALUES_VALID (1<<2) 379#define EDP_VSC_PSR_CRC_VALUES_VALID (1<<2)
380 380
381static inline int 381static inline int
382drm_dp_max_link_rate(u8 dpcd[DP_RECEIVER_CAP_SIZE]) 382drm_dp_max_link_rate(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
383{ 383{
384 return drm_dp_bw_code_to_link_rate(dpcd[DP_MAX_LINK_RATE]); 384 return drm_dp_bw_code_to_link_rate(dpcd[DP_MAX_LINK_RATE]);
385} 385}
386 386
387static inline u8 387static inline u8
388drm_dp_max_lane_count(u8 dpcd[DP_RECEIVER_CAP_SIZE]) 388drm_dp_max_lane_count(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
389{ 389{
390 return dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK; 390 return dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK;
391} 391}
392 392
393static inline bool
394drm_dp_enhanced_frame_cap(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
395{
396 return dpcd[DP_DPCD_REV] >= 0x11 &&
397 (dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP);
398}
399
393#endif /* _DRM_DP_HELPER_H_ */ 400#endif /* _DRM_DP_HELPER_H_ */
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
index 3d79e513c0b3..87578c109e48 100644
--- a/include/drm/drm_pciids.h
+++ b/include/drm/drm_pciids.h
@@ -261,6 +261,18 @@
261 {0x1002, 0x679B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \ 261 {0x1002, 0x679B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
262 {0x1002, 0x679E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \ 262 {0x1002, 0x679E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
263 {0x1002, 0x679F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \ 263 {0x1002, 0x679F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
264 {0x1002, 0x67A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII|RADEON_NEW_MEMMAP}, \
265 {0x1002, 0x67A1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII|RADEON_NEW_MEMMAP}, \
266 {0x1002, 0x67A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII|RADEON_NEW_MEMMAP}, \
267 {0x1002, 0x67A8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII|RADEON_NEW_MEMMAP}, \
268 {0x1002, 0x67A9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII|RADEON_NEW_MEMMAP}, \
269 {0x1002, 0x67AA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII|RADEON_NEW_MEMMAP}, \
270 {0x1002, 0x67B0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII|RADEON_NEW_MEMMAP}, \
271 {0x1002, 0x67B1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII|RADEON_NEW_MEMMAP}, \
272 {0x1002, 0x67B8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII|RADEON_NEW_MEMMAP}, \
273 {0x1002, 0x67B9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII|RADEON_NEW_MEMMAP}, \
274 {0x1002, 0x67BA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII|RADEON_NEW_MEMMAP}, \
275 {0x1002, 0x67BE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII|RADEON_NEW_MEMMAP}, \
264 {0x1002, 0x6800, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 276 {0x1002, 0x6800, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
265 {0x1002, 0x6801, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 277 {0x1002, 0x6801, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
266 {0x1002, 0x6802, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 278 {0x1002, 0x6802, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h
index 3abfa6ea226e..97d5497debc1 100644
--- a/include/drm/i915_drm.h
+++ b/include/drm/i915_drm.h
@@ -49,6 +49,10 @@ extern bool i915_gpu_turbo_disable(void);
49#define SNB_GMCH_GGMS_MASK 0x3 49#define SNB_GMCH_GGMS_MASK 0x3
50#define SNB_GMCH_GMS_SHIFT 3 /* Graphics Mode Select */ 50#define SNB_GMCH_GMS_SHIFT 3 /* Graphics Mode Select */
51#define SNB_GMCH_GMS_MASK 0x1f 51#define SNB_GMCH_GMS_MASK 0x1f
52#define BDW_GMCH_GGMS_SHIFT 6
53#define BDW_GMCH_GGMS_MASK 0x3
54#define BDW_GMCH_GMS_SHIFT 8
55#define BDW_GMCH_GMS_MASK 0xff
52 56
53#define I830_GMCH_CTRL 0x52 57#define I830_GMCH_CTRL 0x52
54 58
diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
index 8a10f5c354e6..940ece4934ba 100644
--- a/include/drm/i915_pciids.h
+++ b/include/drm/i915_pciids.h
@@ -208,4 +208,29 @@
208#define INTEL_VLV_D_IDS(info) \ 208#define INTEL_VLV_D_IDS(info) \
209 INTEL_VGA_DEVICE(0x0155, info) 209 INTEL_VGA_DEVICE(0x0155, info)
210 210
211#define _INTEL_BDW_M(gt, id, info) \
212 INTEL_VGA_DEVICE((((gt) - 1) << 4) | (id), info)
213#define _INTEL_BDW_D(gt, id, info) \
214 INTEL_VGA_DEVICE((((gt) - 1) << 4) | (id), info)
215
216#define _INTEL_BDW_M_IDS(gt, info) \
217 _INTEL_BDW_M(gt, 0x1602, info), /* ULT */ \
218 _INTEL_BDW_M(gt, 0x1606, info), /* ULT */ \
219 _INTEL_BDW_M(gt, 0x160B, info), /* Iris */ \
220 _INTEL_BDW_M(gt, 0x160E, info) /* ULX */
221
222#define _INTEL_BDW_D_IDS(gt, info) \
223 _INTEL_BDW_D(gt, 0x160A, info), /* Server */ \
224 _INTEL_BDW_D(gt, 0x160D, info) /* Workstation */
225
226#define INTEL_BDW_M_IDS(info) \
227 _INTEL_BDW_M_IDS(1, info), \
228 _INTEL_BDW_M_IDS(2, info), \
229 _INTEL_BDW_M_IDS(3, info)
230
231#define INTEL_BDW_D_IDS(info) \
232 _INTEL_BDW_D_IDS(1, info), \
233 _INTEL_BDW_D_IDS(2, info), \
234 _INTEL_BDW_D_IDS(3, info)
235
211#endif /* _I915_PCIIDS_H */ 236#endif /* _I915_PCIIDS_H */
diff --git a/include/drm/ttm/ttm_page_alloc.h b/include/drm/ttm/ttm_page_alloc.h
index 706b962c6467..d1f61bfe0ebe 100644
--- a/include/drm/ttm/ttm_page_alloc.h
+++ b/include/drm/ttm/ttm_page_alloc.h
@@ -62,7 +62,7 @@ extern void ttm_pool_unpopulate(struct ttm_tt *ttm);
62extern int ttm_page_alloc_debugfs(struct seq_file *m, void *data); 62extern int ttm_page_alloc_debugfs(struct seq_file *m, void *data);
63 63
64 64
65#ifdef CONFIG_SWIOTLB 65#if defined(CONFIG_SWIOTLB) || defined(CONFIG_INTEL_IOMMU)
66/** 66/**
67 * Initialize pool allocator. 67 * Initialize pool allocator.
68 */ 68 */
@@ -94,6 +94,15 @@ static inline int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data)
94{ 94{
95 return 0; 95 return 0;
96} 96}
97static inline int ttm_dma_populate(struct ttm_dma_tt *ttm_dma,
98 struct device *dev)
99{
100 return -ENOMEM;
101}
102static inline void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma,
103 struct device *dev)
104{
105}
97#endif 106#endif
98 107
99#endif 108#endif
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 5bd6ab9b0c27..dc196bbcf227 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -107,8 +107,16 @@ struct cpufreq_policy {
107#define CPUFREQ_SHARED_TYPE_ALL (2) /* All dependent CPUs should set freq */ 107#define CPUFREQ_SHARED_TYPE_ALL (2) /* All dependent CPUs should set freq */
108#define CPUFREQ_SHARED_TYPE_ANY (3) /* Freq can be set from any dependent CPU*/ 108#define CPUFREQ_SHARED_TYPE_ANY (3) /* Freq can be set from any dependent CPU*/
109 109
110#ifdef CONFIG_CPU_FREQ
110struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu); 111struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu);
111void cpufreq_cpu_put(struct cpufreq_policy *policy); 112void cpufreq_cpu_put(struct cpufreq_policy *policy);
113#else
114static inline struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
115{
116 return NULL;
117}
118static inline void cpufreq_cpu_put(struct cpufreq_policy *policy) { }
119#endif
112 120
113static inline bool policy_is_shared(struct cpufreq_policy *policy) 121static inline bool policy_is_shared(struct cpufreq_policy *policy)
114{ 122{
diff --git a/include/linux/host1x.h b/include/linux/host1x.h
new file mode 100644
index 000000000000..f5b9b87ac9a9
--- /dev/null
+++ b/include/linux/host1x.h
@@ -0,0 +1,284 @@
1/*
2 * Copyright (c) 2009-2013, NVIDIA Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along
15 * with this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17 */
18
19#ifndef __LINUX_HOST1X_H
20#define __LINUX_HOST1X_H
21
22#include <linux/device.h>
23#include <linux/types.h>
24
25enum host1x_class {
26 HOST1X_CLASS_HOST1X = 0x1,
27 HOST1X_CLASS_GR2D = 0x51,
28 HOST1X_CLASS_GR2D_SB = 0x52,
29 HOST1X_CLASS_GR3D = 0x60,
30};
31
32struct host1x_client;
33
34struct host1x_client_ops {
35 int (*init)(struct host1x_client *client);
36 int (*exit)(struct host1x_client *client);
37};
38
39struct host1x_client {
40 struct list_head list;
41 struct device *parent;
42 struct device *dev;
43
44 const struct host1x_client_ops *ops;
45
46 enum host1x_class class;
47 struct host1x_channel *channel;
48
49 struct host1x_syncpt **syncpts;
50 unsigned int num_syncpts;
51};
52
53/*
54 * host1x buffer objects
55 */
56
57struct host1x_bo;
58struct sg_table;
59
60struct host1x_bo_ops {
61 struct host1x_bo *(*get)(struct host1x_bo *bo);
62 void (*put)(struct host1x_bo *bo);
63 dma_addr_t (*pin)(struct host1x_bo *bo, struct sg_table **sgt);
64 void (*unpin)(struct host1x_bo *bo, struct sg_table *sgt);
65 void *(*mmap)(struct host1x_bo *bo);
66 void (*munmap)(struct host1x_bo *bo, void *addr);
67 void *(*kmap)(struct host1x_bo *bo, unsigned int pagenum);
68 void (*kunmap)(struct host1x_bo *bo, unsigned int pagenum, void *addr);
69};
70
71struct host1x_bo {
72 const struct host1x_bo_ops *ops;
73};
74
75static inline void host1x_bo_init(struct host1x_bo *bo,
76 const struct host1x_bo_ops *ops)
77{
78 bo->ops = ops;
79}
80
81static inline struct host1x_bo *host1x_bo_get(struct host1x_bo *bo)
82{
83 return bo->ops->get(bo);
84}
85
86static inline void host1x_bo_put(struct host1x_bo *bo)
87{
88 bo->ops->put(bo);
89}
90
91static inline dma_addr_t host1x_bo_pin(struct host1x_bo *bo,
92 struct sg_table **sgt)
93{
94 return bo->ops->pin(bo, sgt);
95}
96
97static inline void host1x_bo_unpin(struct host1x_bo *bo, struct sg_table *sgt)
98{
99 bo->ops->unpin(bo, sgt);
100}
101
102static inline void *host1x_bo_mmap(struct host1x_bo *bo)
103{
104 return bo->ops->mmap(bo);
105}
106
107static inline void host1x_bo_munmap(struct host1x_bo *bo, void *addr)
108{
109 bo->ops->munmap(bo, addr);
110}
111
112static inline void *host1x_bo_kmap(struct host1x_bo *bo, unsigned int pagenum)
113{
114 return bo->ops->kmap(bo, pagenum);
115}
116
117static inline void host1x_bo_kunmap(struct host1x_bo *bo,
118 unsigned int pagenum, void *addr)
119{
120 bo->ops->kunmap(bo, pagenum, addr);
121}
122
123/*
124 * host1x syncpoints
125 */
126
127#define HOST1X_SYNCPT_CLIENT_MANAGED (1 << 0)
128#define HOST1X_SYNCPT_HAS_BASE (1 << 1)
129
130struct host1x_syncpt_base;
131struct host1x_syncpt;
132struct host1x;
133
134struct host1x_syncpt *host1x_syncpt_get(struct host1x *host, u32 id);
135u32 host1x_syncpt_id(struct host1x_syncpt *sp);
136u32 host1x_syncpt_read_min(struct host1x_syncpt *sp);
137u32 host1x_syncpt_read_max(struct host1x_syncpt *sp);
138int host1x_syncpt_incr(struct host1x_syncpt *sp);
139int host1x_syncpt_wait(struct host1x_syncpt *sp, u32 thresh, long timeout,
140 u32 *value);
141struct host1x_syncpt *host1x_syncpt_request(struct device *dev,
142 unsigned long flags);
143void host1x_syncpt_free(struct host1x_syncpt *sp);
144
145struct host1x_syncpt_base *host1x_syncpt_get_base(struct host1x_syncpt *sp);
146u32 host1x_syncpt_base_id(struct host1x_syncpt_base *base);
147
148/*
149 * host1x channel
150 */
151
152struct host1x_channel;
153struct host1x_job;
154
155struct host1x_channel *host1x_channel_request(struct device *dev);
156void host1x_channel_free(struct host1x_channel *channel);
157struct host1x_channel *host1x_channel_get(struct host1x_channel *channel);
158void host1x_channel_put(struct host1x_channel *channel);
159int host1x_job_submit(struct host1x_job *job);
160
161/*
162 * host1x job
163 */
164
165struct host1x_reloc {
166 struct host1x_bo *cmdbuf;
167 u32 cmdbuf_offset;
168 struct host1x_bo *target;
169 u32 target_offset;
170 u32 shift;
171 u32 pad;
172};
173
174struct host1x_job {
175 /* When refcount goes to zero, job can be freed */
176 struct kref ref;
177
178 /* List entry */
179 struct list_head list;
180
181 /* Channel where job is submitted to */
182 struct host1x_channel *channel;
183
184 u32 client;
185
186 /* Gathers and their memory */
187 struct host1x_job_gather *gathers;
188 unsigned int num_gathers;
189
190 /* Wait checks to be processed at submit time */
191 struct host1x_waitchk *waitchk;
192 unsigned int num_waitchk;
193 u32 waitchk_mask;
194
195 /* Array of handles to be pinned & unpinned */
196 struct host1x_reloc *relocarray;
197 unsigned int num_relocs;
198 struct host1x_job_unpin_data *unpins;
199 unsigned int num_unpins;
200
201 dma_addr_t *addr_phys;
202 dma_addr_t *gather_addr_phys;
203 dma_addr_t *reloc_addr_phys;
204
205 /* Sync point id, number of increments and end related to the submit */
206 u32 syncpt_id;
207 u32 syncpt_incrs;
208 u32 syncpt_end;
209
210 /* Maximum time to wait for this job */
211 unsigned int timeout;
212
213 /* Index and number of slots used in the push buffer */
214 unsigned int first_get;
215 unsigned int num_slots;
216
217 /* Copy of gathers */
218 size_t gather_copy_size;
219 dma_addr_t gather_copy;
220 u8 *gather_copy_mapped;
221
222 /* Check if register is marked as an address reg */
223 int (*is_addr_reg)(struct device *dev, u32 reg, u32 class);
224
225 /* Request a SETCLASS to this class */
226 u32 class;
227
228 /* Add a channel wait for previous ops to complete */
229 bool serialize;
230};
231
232struct host1x_job *host1x_job_alloc(struct host1x_channel *ch,
233 u32 num_cmdbufs, u32 num_relocs,
234 u32 num_waitchks);
235void host1x_job_add_gather(struct host1x_job *job, struct host1x_bo *mem_id,
236 u32 words, u32 offset);
237struct host1x_job *host1x_job_get(struct host1x_job *job);
238void host1x_job_put(struct host1x_job *job);
239int host1x_job_pin(struct host1x_job *job, struct device *dev);
240void host1x_job_unpin(struct host1x_job *job);
241
242/*
243 * subdevice probe infrastructure
244 */
245
246struct host1x_device;
247
248struct host1x_driver {
249 const struct of_device_id *subdevs;
250 struct list_head list;
251 const char *name;
252
253 int (*probe)(struct host1x_device *device);
254 int (*remove)(struct host1x_device *device);
255};
256
257int host1x_driver_register(struct host1x_driver *driver);
258void host1x_driver_unregister(struct host1x_driver *driver);
259
260struct host1x_device {
261 struct host1x_driver *driver;
262 struct list_head list;
263 struct device dev;
264
265 struct mutex subdevs_lock;
266 struct list_head subdevs;
267 struct list_head active;
268
269 struct mutex clients_lock;
270 struct list_head clients;
271};
272
273static inline struct host1x_device *to_host1x_device(struct device *dev)
274{
275 return container_of(dev, struct host1x_device, dev);
276}
277
278int host1x_device_init(struct host1x_device *device);
279int host1x_device_exit(struct host1x_device *device);
280
281int host1x_client_register(struct host1x_client *client);
282int host1x_client_unregister(struct host1x_client *client);
283
284#endif
diff --git a/include/uapi/drm/armada_drm.h b/include/uapi/drm/armada_drm.h
new file mode 100644
index 000000000000..8dec3fdc99c7
--- /dev/null
+++ b/include/uapi/drm/armada_drm.h
@@ -0,0 +1,45 @@
1/*
2 * Copyright (C) 2012 Russell King
3 * With inspiration from the i915 driver
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9#ifndef DRM_ARMADA_IOCTL_H
10#define DRM_ARMADA_IOCTL_H
11
12#define DRM_ARMADA_GEM_CREATE 0x00
13#define DRM_ARMADA_GEM_MMAP 0x02
14#define DRM_ARMADA_GEM_PWRITE 0x03
15
16#define ARMADA_IOCTL(dir, name, str) \
17 DRM_##dir(DRM_COMMAND_BASE + DRM_ARMADA_##name, struct drm_armada_##str)
18
19struct drm_armada_gem_create {
20 uint32_t handle;
21 uint32_t size;
22};
23#define DRM_IOCTL_ARMADA_GEM_CREATE \
24 ARMADA_IOCTL(IOWR, GEM_CREATE, gem_create)
25
26struct drm_armada_gem_mmap {
27 uint32_t handle;
28 uint32_t pad;
29 uint64_t offset;
30 uint64_t size;
31 uint64_t addr;
32};
33#define DRM_IOCTL_ARMADA_GEM_MMAP \
34 ARMADA_IOCTL(IOWR, GEM_MMAP, gem_mmap)
35
36struct drm_armada_gem_pwrite {
37 uint64_t ptr;
38 uint32_t handle;
39 uint32_t offset;
40 uint32_t size;
41};
42#define DRM_IOCTL_ARMADA_GEM_PWRITE \
43 ARMADA_IOCTL(IOW, GEM_PWRITE, gem_pwrite)
44
45#endif
diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h
index ece867889cc7..9b24d65fed72 100644
--- a/include/uapi/drm/drm.h
+++ b/include/uapi/drm/drm.h
@@ -611,12 +611,37 @@ struct drm_gem_open {
611 __u64 size; 611 __u64 size;
612}; 612};
613 613
614#define DRM_CAP_DUMB_BUFFER 0x1
615#define DRM_CAP_VBLANK_HIGH_CRTC 0x2
616#define DRM_CAP_DUMB_PREFERRED_DEPTH 0x3
617#define DRM_CAP_DUMB_PREFER_SHADOW 0x4
618#define DRM_CAP_PRIME 0x5
619#define DRM_PRIME_CAP_IMPORT 0x1
620#define DRM_PRIME_CAP_EXPORT 0x2
621#define DRM_CAP_TIMESTAMP_MONOTONIC 0x6
622#define DRM_CAP_ASYNC_PAGE_FLIP 0x7
623
614/** DRM_IOCTL_GET_CAP ioctl argument type */ 624/** DRM_IOCTL_GET_CAP ioctl argument type */
615struct drm_get_cap { 625struct drm_get_cap {
616 __u64 capability; 626 __u64 capability;
617 __u64 value; 627 __u64 value;
618}; 628};
619 629
630/**
631 * DRM_CLIENT_CAP_STEREO_3D
632 *
633 * if set to 1, the DRM core will expose the stereo 3D capabilities of the
634 * monitor by advertising the supported 3D layouts in the flags of struct
635 * drm_mode_modeinfo.
636 */
637#define DRM_CLIENT_CAP_STEREO_3D 1
638
639/** DRM_IOCTL_SET_CLIENT_CAP ioctl argument type */
640struct drm_set_client_cap {
641 __u64 capability;
642 __u64 value;
643};
644
620#define DRM_CLOEXEC O_CLOEXEC 645#define DRM_CLOEXEC O_CLOEXEC
621struct drm_prime_handle { 646struct drm_prime_handle {
622 __u32 handle; 647 __u32 handle;
@@ -649,6 +674,7 @@ struct drm_prime_handle {
649#define DRM_IOCTL_GEM_FLINK DRM_IOWR(0x0a, struct drm_gem_flink) 674#define DRM_IOCTL_GEM_FLINK DRM_IOWR(0x0a, struct drm_gem_flink)
650#define DRM_IOCTL_GEM_OPEN DRM_IOWR(0x0b, struct drm_gem_open) 675#define DRM_IOCTL_GEM_OPEN DRM_IOWR(0x0b, struct drm_gem_open)
651#define DRM_IOCTL_GET_CAP DRM_IOWR(0x0c, struct drm_get_cap) 676#define DRM_IOCTL_GET_CAP DRM_IOWR(0x0c, struct drm_get_cap)
677#define DRM_IOCTL_SET_CLIENT_CAP DRM_IOW( 0x0d, struct drm_set_client_cap)
652 678
653#define DRM_IOCTL_SET_UNIQUE DRM_IOW( 0x10, struct drm_unique) 679#define DRM_IOCTL_SET_UNIQUE DRM_IOW( 0x10, struct drm_unique)
654#define DRM_IOCTL_AUTH_MAGIC DRM_IOW( 0x11, struct drm_auth) 680#define DRM_IOCTL_AUTH_MAGIC DRM_IOW( 0x11, struct drm_auth)
@@ -774,17 +800,6 @@ struct drm_event_vblank {
774 __u32 reserved; 800 __u32 reserved;
775}; 801};
776 802
777#define DRM_CAP_DUMB_BUFFER 0x1
778#define DRM_CAP_VBLANK_HIGH_CRTC 0x2
779#define DRM_CAP_DUMB_PREFERRED_DEPTH 0x3
780#define DRM_CAP_DUMB_PREFER_SHADOW 0x4
781#define DRM_CAP_PRIME 0x5
782#define DRM_CAP_TIMESTAMP_MONOTONIC 0x6
783#define DRM_CAP_ASYNC_PAGE_FLIP 0x7
784
785#define DRM_PRIME_CAP_IMPORT 0x1
786#define DRM_PRIME_CAP_EXPORT 0x2
787
788/* typedef area */ 803/* typedef area */
789#ifndef __KERNEL__ 804#ifndef __KERNEL__
790typedef struct drm_clip_rect drm_clip_rect_t; 805typedef struct drm_clip_rect drm_clip_rect_t;
diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h
index 28acbaf4a81e..f104c2603ebe 100644
--- a/include/uapi/drm/drm_mode.h
+++ b/include/uapi/drm/drm_mode.h
@@ -44,20 +44,35 @@
44 44
45/* Video mode flags */ 45/* Video mode flags */
46/* bit compatible with the xorg definitions. */ 46/* bit compatible with the xorg definitions. */
47#define DRM_MODE_FLAG_PHSYNC (1<<0) 47#define DRM_MODE_FLAG_PHSYNC (1<<0)
48#define DRM_MODE_FLAG_NHSYNC (1<<1) 48#define DRM_MODE_FLAG_NHSYNC (1<<1)
49#define DRM_MODE_FLAG_PVSYNC (1<<2) 49#define DRM_MODE_FLAG_PVSYNC (1<<2)
50#define DRM_MODE_FLAG_NVSYNC (1<<3) 50#define DRM_MODE_FLAG_NVSYNC (1<<3)
51#define DRM_MODE_FLAG_INTERLACE (1<<4) 51#define DRM_MODE_FLAG_INTERLACE (1<<4)
52#define DRM_MODE_FLAG_DBLSCAN (1<<5) 52#define DRM_MODE_FLAG_DBLSCAN (1<<5)
53#define DRM_MODE_FLAG_CSYNC (1<<6) 53#define DRM_MODE_FLAG_CSYNC (1<<6)
54#define DRM_MODE_FLAG_PCSYNC (1<<7) 54#define DRM_MODE_FLAG_PCSYNC (1<<7)
55#define DRM_MODE_FLAG_NCSYNC (1<<8) 55#define DRM_MODE_FLAG_NCSYNC (1<<8)
56#define DRM_MODE_FLAG_HSKEW (1<<9) /* hskew provided */ 56#define DRM_MODE_FLAG_HSKEW (1<<9) /* hskew provided */
57#define DRM_MODE_FLAG_BCAST (1<<10) 57#define DRM_MODE_FLAG_BCAST (1<<10)
58#define DRM_MODE_FLAG_PIXMUX (1<<11) 58#define DRM_MODE_FLAG_PIXMUX (1<<11)
59#define DRM_MODE_FLAG_DBLCLK (1<<12) 59#define DRM_MODE_FLAG_DBLCLK (1<<12)
60#define DRM_MODE_FLAG_CLKDIV2 (1<<13) 60#define DRM_MODE_FLAG_CLKDIV2 (1<<13)
61 /*
62 * When adding a new stereo mode don't forget to adjust DRM_MODE_FLAGS_3D_MAX
63 * (define not exposed to user space).
64 */
65#define DRM_MODE_FLAG_3D_MASK (0x1f<<14)
66#define DRM_MODE_FLAG_3D_NONE (0<<14)
67#define DRM_MODE_FLAG_3D_FRAME_PACKING (1<<14)
68#define DRM_MODE_FLAG_3D_FIELD_ALTERNATIVE (2<<14)
69#define DRM_MODE_FLAG_3D_LINE_ALTERNATIVE (3<<14)
70#define DRM_MODE_FLAG_3D_SIDE_BY_SIDE_FULL (4<<14)
71#define DRM_MODE_FLAG_3D_L_DEPTH (5<<14)
72#define DRM_MODE_FLAG_3D_L_DEPTH_GFX_GFX_DEPTH (6<<14)
73#define DRM_MODE_FLAG_3D_TOP_AND_BOTTOM (7<<14)
74#define DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF (8<<14)
75
61 76
62/* DPMS flags */ 77/* DPMS flags */
63/* bit compatible with the xorg definitions. */ 78/* bit compatible with the xorg definitions. */
@@ -165,6 +180,7 @@ struct drm_mode_get_plane_res {
165#define DRM_MODE_ENCODER_LVDS 3 180#define DRM_MODE_ENCODER_LVDS 3
166#define DRM_MODE_ENCODER_TVDAC 4 181#define DRM_MODE_ENCODER_TVDAC 4
167#define DRM_MODE_ENCODER_VIRTUAL 5 182#define DRM_MODE_ENCODER_VIRTUAL 5
183#define DRM_MODE_ENCODER_DSI 6
168 184
169struct drm_mode_get_encoder { 185struct drm_mode_get_encoder {
170 __u32 encoder_id; 186 __u32 encoder_id;
@@ -203,6 +219,7 @@ struct drm_mode_get_encoder {
203#define DRM_MODE_CONNECTOR_TV 13 219#define DRM_MODE_CONNECTOR_TV 13
204#define DRM_MODE_CONNECTOR_eDP 14 220#define DRM_MODE_CONNECTOR_eDP 14
205#define DRM_MODE_CONNECTOR_VIRTUAL 15 221#define DRM_MODE_CONNECTOR_VIRTUAL 15
222#define DRM_MODE_CONNECTOR_DSI 16
206 223
207struct drm_mode_get_connector { 224struct drm_mode_get_connector {
208 225
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index 55bb5729bd78..3a4e97bd8607 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -38,10 +38,10 @@
38 * 38 *
39 * I915_L3_PARITY_UEVENT - Generated when the driver receives a parity mismatch 39 * I915_L3_PARITY_UEVENT - Generated when the driver receives a parity mismatch
40 * event from the gpu l3 cache. Additional information supplied is ROW, 40 * event from the gpu l3 cache. Additional information supplied is ROW,
41 * BANK, SUBBANK of the affected cacheline. Userspace should keep track of 41 * BANK, SUBBANK, SLICE of the affected cacheline. Userspace should keep
42 * these events and if a specific cache-line seems to have a persistent 42 * track of these events and if a specific cache-line seems to have a
43 * error remap it with the l3 remapping tool supplied in intel-gpu-tools. 43 * persistent error remap it with the l3 remapping tool supplied in
44 * The value supplied with the event is always 1. 44 * intel-gpu-tools. The value supplied with the event is always 1.
45 * 45 *
46 * I915_ERROR_UEVENT - Generated upon error detection, currently only via 46 * I915_ERROR_UEVENT - Generated upon error detection, currently only via
47 * hangcheck. The error detection event is a good indicator of when things 47 * hangcheck. The error detection event is a good indicator of when things
diff --git a/include/uapi/drm/tegra_drm.h b/include/uapi/drm/tegra_drm.h
index 73bde4eaf16c..5e1ab552cbed 100644
--- a/include/uapi/drm/tegra_drm.h
+++ b/include/uapi/drm/tegra_drm.h
@@ -19,6 +19,9 @@
19 19
20#include <drm/drm.h> 20#include <drm/drm.h>
21 21
22#define DRM_TEGRA_GEM_CREATE_TILED (1 << 0)
23#define DRM_TEGRA_GEM_CREATE_BOTTOM_UP (1 << 1)
24
22struct drm_tegra_gem_create { 25struct drm_tegra_gem_create {
23 __u64 size; 26 __u64 size;
24 __u32 flags; 27 __u32 flags;
@@ -65,6 +68,12 @@ struct drm_tegra_get_syncpt {
65 __u32 id; 68 __u32 id;
66}; 69};
67 70
71struct drm_tegra_get_syncpt_base {
72 __u64 context;
73 __u32 syncpt;
74 __u32 id;
75};
76
68struct drm_tegra_syncpt { 77struct drm_tegra_syncpt {
69 __u32 id; 78 __u32 id;
70 __u32 incrs; 79 __u32 incrs;
@@ -115,15 +124,16 @@ struct drm_tegra_submit {
115 __u32 reserved[5]; /* future expansion */ 124 __u32 reserved[5]; /* future expansion */
116}; 125};
117 126
118#define DRM_TEGRA_GEM_CREATE 0x00 127#define DRM_TEGRA_GEM_CREATE 0x00
119#define DRM_TEGRA_GEM_MMAP 0x01 128#define DRM_TEGRA_GEM_MMAP 0x01
120#define DRM_TEGRA_SYNCPT_READ 0x02 129#define DRM_TEGRA_SYNCPT_READ 0x02
121#define DRM_TEGRA_SYNCPT_INCR 0x03 130#define DRM_TEGRA_SYNCPT_INCR 0x03
122#define DRM_TEGRA_SYNCPT_WAIT 0x04 131#define DRM_TEGRA_SYNCPT_WAIT 0x04
123#define DRM_TEGRA_OPEN_CHANNEL 0x05 132#define DRM_TEGRA_OPEN_CHANNEL 0x05
124#define DRM_TEGRA_CLOSE_CHANNEL 0x06 133#define DRM_TEGRA_CLOSE_CHANNEL 0x06
125#define DRM_TEGRA_GET_SYNCPT 0x07 134#define DRM_TEGRA_GET_SYNCPT 0x07
126#define DRM_TEGRA_SUBMIT 0x08 135#define DRM_TEGRA_SUBMIT 0x08
136#define DRM_TEGRA_GET_SYNCPT_BASE 0x09
127 137
128#define DRM_IOCTL_TEGRA_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_GEM_CREATE, struct drm_tegra_gem_create) 138#define DRM_IOCTL_TEGRA_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_GEM_CREATE, struct drm_tegra_gem_create)
129#define DRM_IOCTL_TEGRA_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_GEM_MMAP, struct drm_tegra_gem_mmap) 139#define DRM_IOCTL_TEGRA_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_GEM_MMAP, struct drm_tegra_gem_mmap)
@@ -134,5 +144,6 @@ struct drm_tegra_submit {
134#define DRM_IOCTL_TEGRA_CLOSE_CHANNEL DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_CLOSE_CHANNEL, struct drm_tegra_open_channel) 144#define DRM_IOCTL_TEGRA_CLOSE_CHANNEL DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_CLOSE_CHANNEL, struct drm_tegra_open_channel)
135#define DRM_IOCTL_TEGRA_GET_SYNCPT DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_GET_SYNCPT, struct drm_tegra_get_syncpt) 145#define DRM_IOCTL_TEGRA_GET_SYNCPT DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_GET_SYNCPT, struct drm_tegra_get_syncpt)
136#define DRM_IOCTL_TEGRA_SUBMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_SUBMIT, struct drm_tegra_submit) 146#define DRM_IOCTL_TEGRA_SUBMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_SUBMIT, struct drm_tegra_submit)
147#define DRM_IOCTL_TEGRA_GET_SYNCPT_BASE DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_GET_SYNCPT_BASE, struct drm_tegra_get_syncpt_base)
137 148
138#endif 149#endif