aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/Kconfig4
-rw-r--r--drivers/gpu/drm/Makefile2
-rw-r--r--drivers/gpu/drm/armada/Kconfig24
-rw-r--r--drivers/gpu/drm/armada/Makefile7
-rw-r--r--drivers/gpu/drm/armada/armada_510.c87
-rw-r--r--drivers/gpu/drm/armada/armada_crtc.c1098
-rw-r--r--drivers/gpu/drm/armada/armada_crtc.h83
-rw-r--r--drivers/gpu/drm/armada/armada_debugfs.c177
-rw-r--r--drivers/gpu/drm/armada/armada_drm.h113
-rw-r--r--drivers/gpu/drm/armada/armada_drv.c421
-rw-r--r--drivers/gpu/drm/armada/armada_fb.c170
-rw-r--r--drivers/gpu/drm/armada/armada_fb.h24
-rw-r--r--drivers/gpu/drm/armada/armada_fbdev.c202
-rw-r--r--drivers/gpu/drm/armada/armada_gem.c611
-rw-r--r--drivers/gpu/drm/armada/armada_gem.h52
-rw-r--r--drivers/gpu/drm/armada/armada_hw.h318
-rw-r--r--drivers/gpu/drm/armada/armada_ioctlP.h18
-rw-r--r--drivers/gpu/drm/armada/armada_output.c158
-rw-r--r--drivers/gpu/drm/armada/armada_output.h39
-rw-r--r--drivers/gpu/drm/armada/armada_overlay.c477
-rw-r--r--drivers/gpu/drm/armada/armada_slave.c139
-rw-r--r--drivers/gpu/drm/armada/armada_slave.h26
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_mode.c11
-rw-r--r--drivers/gpu/drm/drm_crtc.c51
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c78
-rw-r--r--drivers/gpu/drm/drm_debugfs.c6
-rw-r--r--drivers/gpu/drm/drm_drv.c2
-rw-r--r--drivers/gpu/drm/drm_edid.c37
-rw-r--r--drivers/gpu/drm/drm_fops.c9
-rw-r--r--drivers/gpu/drm/drm_irq.c25
-rw-r--r--drivers/gpu/drm/drm_modes.c2
-rw-r--r--drivers/gpu/drm/drm_pci.c4
-rw-r--r--drivers/gpu/drm/drm_stub.c81
-rw-r--r--drivers/gpu/drm/drm_sysfs.c96
-rw-r--r--drivers/gpu/drm/drm_vm.c2
-rw-r--r--drivers/gpu/drm/gma500/cdv_device.c1
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_dp.c2
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.c2
-rw-r--r--drivers/gpu/drm/gma500/intel_gmbus.c90
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_crtc.c433
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_device.c6
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_lvds.c30
-rw-r--r--drivers/gpu/drm/gma500/psb_device.c1
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.c36
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.h51
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_display.c2
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_sdvo.c59
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c3
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c113
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c34
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h64
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c3
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c3
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c35
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c509
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c2
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c439
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h156
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c42
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c56
-rw-r--r--drivers/gpu/drm/i915/i915_ums.c27
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c10
-rw-r--r--drivers/gpu/drm/i915/intel_bios.h34
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c11
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c124
-rw-r--r--drivers/gpu/drm/i915/intel_display.c144
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c76
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h22
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c2
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c9
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c41
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c863
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c182
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c195
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c11
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c59
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c2
-rw-r--r--drivers/gpu/drm/msm/Makefile1
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx.xml.h42
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx.xml.h46
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_common.xml.h10
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h10
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.xml.h6
-rw-r--r--drivers/gpu/drm/msm/dsi/mmss_cc.xml.h6
-rw-r--r--drivers/gpu/drm/msm/dsi/sfpb.xml.h6
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.xml.h6
-rw-r--r--drivers/gpu/drm/msm/hdmi/qfprom.xml.h6
-rw-r--r--drivers/gpu/drm/msm/mdp4/mdp4.xml.h126
-rw-r--r--drivers/gpu/drm/msm/mdp4/mdp4_crtc.c208
-rw-r--r--drivers/gpu/drm/msm/mdp4/mdp4_format.c16
-rw-r--r--drivers/gpu/drm/msm/mdp4/mdp4_kms.c19
-rw-r--r--drivers/gpu/drm/msm/mdp4/mdp4_kms.h58
-rw-r--r--drivers/gpu/drm/msm/mdp4/mdp4_plane.c30
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c60
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h37
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c160
-rw-r--r--drivers/gpu/drm/msm/msm_gem.h3
-rw-r--r--drivers/gpu/drm/msm/msm_gem_prime.c56
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c4
-rw-r--r--drivers/gpu/drm/nouveau/Makefile48
-rw-r--r--drivers/gpu/drm/nouveau/core/core/event.c119
-rw-r--r--drivers/gpu/drm/nouveau/core/core/option.c11
-rw-r--r--drivers/gpu/drm/nouveau/core/core/printk.c45
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/base.c56
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/ctrl.c144
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nv04.c20
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nv10.c76
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nv20.c40
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nv30.c50
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nv40.c218
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nv50.c195
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nvc0.c118
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nve0.c99
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/priv.h8
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/dport.c52
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv04.c9
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c9
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c12
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c12
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c10
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc1.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd7.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd9.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv10.c14
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c68
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.h15
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c103
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/mpeg/nv44.c194
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/perfmon/base.c449
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/perfmon/daemon.c109
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/perfmon/nv40.c143
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/perfmon/nv40.h26
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/perfmon/nv50.c70
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/perfmon/nv84.c78
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/perfmon/nva3.c96
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/perfmon/nvc0.c173
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/perfmon/nvc0.h17
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/perfmon/nve0.c162
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/perfmon/nvf0.c71
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/perfmon/priv.h91
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/software/nv04.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/software/nv10.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/software/nv50.c96
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/software/nv50.h47
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/software/nvc0.c130
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/class.h73
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/debug.h9
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/device.h7
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/event.h22
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/option.h9
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/printk.h30
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/fifo.h16
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/mpeg.h5
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/perfmon.h39
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/software.h17
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/boost.h29
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/cstep.h28
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/gpio.h10
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/perf.h33
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/rammap.h11
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/timing.h8
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/vmap.h25
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/volt.h27
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bus.h20
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/clock.h111
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/fb.h50
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/i2c.h7
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/mc.h29
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/pwr.h80
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/therm.h2
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/volt.h60
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/boost.c127
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/cstep.c123
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/dp.c10
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/init.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/perf.c140
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/pll.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/rammap.c88
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/timing.c73
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/vmap.c112
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/volt.c137
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bus/hwsq.c145
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bus/hwsq.h113
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bus/nv04.c44
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bus/nv04.h23
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bus/nv31.c38
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bus/nv50.c60
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bus/nv94.c59
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bus/nvc0.c38
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/base.c494
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nv40.c183
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nv50.c520
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nv50.h31
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nv84.c48
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c271
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nva3.h14
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c404
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nve0.c497
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/pllnv04.c37
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/pllnva3.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/seq.h17
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nv04.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nv10.c10
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/base.c15
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/gddr5.c96
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv04.c29
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv04.h55
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv10.c45
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c45
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv20.c47
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv25.c47
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv30.c53
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv35.c47
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv36.c47
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv40.c50
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv40.h17
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv41.c50
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv44.c48
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv46.c46
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv47.c48
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv49.c48
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv4e.c45
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c51
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv50.h33
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv84.c39
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nva3.c39
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nvaa.c39
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nvaf.c39
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c33
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.h29
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nve0.c38
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/priv.h53
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramfuc.h118
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramnv40.c168
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramnv41.c19
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramnv44.c15
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramnv49.c19
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramnv50.c344
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramnva3.c447
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramnvaa.c66
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c567
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramnve0.c1264
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramseq.h18
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/sddr3.c99
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/gpio/base.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/base.c27
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/base.c89
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c44
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv04.h21
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv40.c45
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c38
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c41
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv94.c38
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c37
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c40
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nvc3.c38
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/base.c247
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/host.fuc151
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/idle.fuc84
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/kernel.fuc452
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/macros.fuc199
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/memx.fuc219
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc63
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc.h1165
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc63
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc.h1229
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc63
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc.h1229
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc63
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc.h1229
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/os.h27
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/perf.fuc57
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/test.fuc64
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/memx.c121
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/nv108.c62
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/nva3.c71
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/nvc0.c62
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/nvd0.c62
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/base.c54
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/fan.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/fantog.c7
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/ic.c54
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/nv84.c17
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/nva3.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/temp.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/timer/nv04.c10
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/volt/base.c198
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/volt/gpio.c96
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/volt/nv40.c56
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/Makefile1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/dfp.c18
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/disp.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/disp.h3
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/hw.c12
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/overlay.c320
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/tvnv04.c22
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_agp.c44
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_backlight.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c21
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.h5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c97
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.h4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c93
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.h14
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c43
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c27
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hwmon.c (renamed from drivers/gpu/drm/nouveau/nouveau_pm.c)560
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hwmon.h43
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hwsq.h115
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c647
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_perf.c416
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_pm.h283
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sysfs.c162
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sysfs.h19
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_volt.c250
-rw-r--r--drivers/gpu/drm/nouveau/nv04_pm.c146
-rw-r--r--drivers/gpu/drm/nouveau/nv40_pm.c353
-rw-r--r--drivers/gpu/drm/nouveau/nv50_pm.c855
-rw-r--r--drivers/gpu/drm/nouveau/nva3_pm.c624
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_pm.c599
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c51
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.h2
-rw-r--r--drivers/gpu/drm/qxl/qxl_fb.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_kms.c42
-rw-r--r--drivers/gpu/drm/qxl/qxl_ttm.c2
-rw-r--r--drivers/gpu/drm/radeon/atombios.h127
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c21
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c26
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.c58
-rw-r--r--drivers/gpu/drm/radeon/ci_smc.c4
-rw-r--r--drivers/gpu/drm/radeon/cik.c757
-rw-r--r--drivers/gpu/drm/radeon/cik_sdma.c62
-rw-r--r--drivers/gpu/drm/radeon/cikd.h103
-rw-r--r--drivers/gpu/drm/radeon/dce6_afmt.c66
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c80
-rw-r--r--drivers/gpu/drm/radeon/evergreen_hdmi.c71
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h70
-rw-r--r--drivers/gpu/drm/radeon/ni.c76
-rw-r--r--drivers/gpu/drm/radeon/ni_dma.c19
-rw-r--r--drivers/gpu/drm/radeon/r100.c2
-rw-r--r--drivers/gpu/drm/radeon/r600.c53
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c18
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c102
-rw-r--r--drivers/gpu/drm/radeon/r600d.h28
-rw-r--r--drivers/gpu/drm/radeon/radeon.h33
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c74
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h35
-rw-r--r--drivers/gpu/drm/radeon/radeon_atpx_handler.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c116
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c298
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c78
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c83
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c171
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.h3
-rw-r--r--drivers/gpu/drm/radeon/radeon_family.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c350
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c73
-rw-r--r--drivers/gpu/drm/radeon/radeon_ioc32.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c26
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c21
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_encoders.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h17
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c65
-rw-r--r--drivers/gpu/drm/radeon/radeon_trace.h24
-rw-r--r--drivers/gpu/drm/radeon/radeon_ucode.h4
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c5
-rw-r--r--drivers/gpu/drm/radeon/rs600.c64
-rw-r--r--drivers/gpu/drm/radeon/rs690.c16
-rw-r--r--drivers/gpu/drm/radeon/rv515.c8
-rw-r--r--drivers/gpu/drm/radeon/rv6xx_dpm.c6
-rw-r--r--drivers/gpu/drm/radeon/si.c99
-rw-r--r--drivers/gpu/drm/radeon/si_dma.c22
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c9
-rw-r--r--drivers/gpu/drm/radeon/sid.h47
-rw-r--r--drivers/gpu/drm/shmobile/Kconfig1
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_crtc.c4
-rw-r--r--drivers/gpu/drm/tegra/Kconfig (renamed from drivers/gpu/host1x/drm/Kconfig)12
-rw-r--r--drivers/gpu/drm/tegra/Makefile15
-rw-r--r--drivers/gpu/drm/tegra/bus.c76
-rw-r--r--drivers/gpu/drm/tegra/dc.c (renamed from drivers/gpu/host1x/drm/dc.c)108
-rw-r--r--drivers/gpu/drm/tegra/dc.h (renamed from drivers/gpu/host1x/drm/dc.h)5
-rw-r--r--drivers/gpu/drm/tegra/drm.c (renamed from drivers/gpu/host1x/drm/drm.c)593
-rw-r--r--drivers/gpu/drm/tegra/drm.h (renamed from drivers/gpu/host1x/drm/drm.h)101
-rw-r--r--drivers/gpu/drm/tegra/fb.c (renamed from drivers/gpu/host1x/drm/fb.c)38
-rw-r--r--drivers/gpu/drm/tegra/gem.c (renamed from drivers/gpu/host1x/drm/gem.c)44
-rw-r--r--drivers/gpu/drm/tegra/gem.h (renamed from drivers/gpu/host1x/drm/gem.h)16
-rw-r--r--drivers/gpu/drm/tegra/gr2d.c227
-rw-r--r--drivers/gpu/drm/tegra/gr2d.h28
-rw-r--r--drivers/gpu/drm/tegra/gr3d.c338
-rw-r--r--drivers/gpu/drm/tegra/gr3d.h27
-rw-r--r--drivers/gpu/drm/tegra/hdmi.c (renamed from drivers/gpu/host1x/drm/hdmi.c)257
-rw-r--r--drivers/gpu/drm/tegra/hdmi.h (renamed from drivers/gpu/host1x/drm/hdmi.h)152
-rw-r--r--drivers/gpu/drm/tegra/output.c (renamed from drivers/gpu/host1x/drm/output.c)64
-rw-r--r--drivers/gpu/drm/tegra/rgb.c (renamed from drivers/gpu/host1x/drm/rgb.c)19
-rw-r--r--drivers/gpu/drm/ttm/Makefile6
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c46
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c30
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c92
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc_dma.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c379
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c94
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h98
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c153
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c42
-rw-r--r--drivers/gpu/host1x/Kconfig2
-rw-r--r--drivers/gpu/host1x/Makefile13
-rw-r--r--drivers/gpu/host1x/bus.c550
-rw-r--r--drivers/gpu/host1x/bus.h (renamed from drivers/gpu/host1x/host1x_client.h)24
-rw-r--r--drivers/gpu/host1x/cdma.c2
-rw-r--r--drivers/gpu/host1x/channel.h6
-rw-r--r--drivers/gpu/host1x/dev.c82
-rw-r--r--drivers/gpu/host1x/dev.h11
-rw-r--r--drivers/gpu/host1x/drm/gr2d.c343
-rw-r--r--drivers/gpu/host1x/host1x.h30
-rw-r--r--drivers/gpu/host1x/host1x_bo.h87
-rw-r--r--drivers/gpu/host1x/hw/Makefile6
-rw-r--r--drivers/gpu/host1x/hw/cdma_hw.c8
-rw-r--r--drivers/gpu/host1x/hw/channel_hw.c32
-rw-r--r--drivers/gpu/host1x/hw/debug_hw.c16
-rw-r--r--drivers/gpu/host1x/hw/host1x01.c16
-rw-r--r--drivers/gpu/host1x/hw/host1x02.c42
-rw-r--r--drivers/gpu/host1x/hw/host1x02.h26
-rw-r--r--drivers/gpu/host1x/hw/hw_host1x01_uclass.h6
-rw-r--r--drivers/gpu/host1x/hw/hw_host1x02_channel.h121
-rw-r--r--drivers/gpu/host1x/hw/hw_host1x02_sync.h243
-rw-r--r--drivers/gpu/host1x/hw/hw_host1x02_uclass.h175
-rw-r--r--drivers/gpu/host1x/hw/intr_hw.c4
-rw-r--r--drivers/gpu/host1x/hw/syncpt_hw.c4
-rw-r--r--drivers/gpu/host1x/job.c73
-rw-r--r--drivers/gpu/host1x/job.h108
-rw-r--r--drivers/gpu/host1x/syncpt.c92
-rw-r--r--drivers/gpu/host1x/syncpt.h46
450 files changed, 34202 insertions, 10551 deletions
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index b4e4fc0d6650..f86427591167 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -176,6 +176,8 @@ source "drivers/gpu/drm/mgag200/Kconfig"
176 176
177source "drivers/gpu/drm/cirrus/Kconfig" 177source "drivers/gpu/drm/cirrus/Kconfig"
178 178
179source "drivers/gpu/drm/armada/Kconfig"
180
179source "drivers/gpu/drm/rcar-du/Kconfig" 181source "drivers/gpu/drm/rcar-du/Kconfig"
180 182
181source "drivers/gpu/drm/shmobile/Kconfig" 183source "drivers/gpu/drm/shmobile/Kconfig"
@@ -187,3 +189,5 @@ source "drivers/gpu/drm/tilcdc/Kconfig"
187source "drivers/gpu/drm/qxl/Kconfig" 189source "drivers/gpu/drm/qxl/Kconfig"
188 190
189source "drivers/gpu/drm/msm/Kconfig" 191source "drivers/gpu/drm/msm/Kconfig"
192
193source "drivers/gpu/drm/tegra/Kconfig"
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 5af240bfd29f..cc08b845f965 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -50,10 +50,12 @@ obj-$(CONFIG_DRM_EXYNOS) +=exynos/
50obj-$(CONFIG_DRM_GMA500) += gma500/ 50obj-$(CONFIG_DRM_GMA500) += gma500/
51obj-$(CONFIG_DRM_UDL) += udl/ 51obj-$(CONFIG_DRM_UDL) += udl/
52obj-$(CONFIG_DRM_AST) += ast/ 52obj-$(CONFIG_DRM_AST) += ast/
53obj-$(CONFIG_DRM_ARMADA) += armada/
53obj-$(CONFIG_DRM_RCAR_DU) += rcar-du/ 54obj-$(CONFIG_DRM_RCAR_DU) += rcar-du/
54obj-$(CONFIG_DRM_SHMOBILE) +=shmobile/ 55obj-$(CONFIG_DRM_SHMOBILE) +=shmobile/
55obj-$(CONFIG_DRM_OMAP) += omapdrm/ 56obj-$(CONFIG_DRM_OMAP) += omapdrm/
56obj-$(CONFIG_DRM_TILCDC) += tilcdc/ 57obj-$(CONFIG_DRM_TILCDC) += tilcdc/
57obj-$(CONFIG_DRM_QXL) += qxl/ 58obj-$(CONFIG_DRM_QXL) += qxl/
58obj-$(CONFIG_DRM_MSM) += msm/ 59obj-$(CONFIG_DRM_MSM) += msm/
60obj-$(CONFIG_DRM_TEGRA) += tegra/
59obj-y += i2c/ 61obj-y += i2c/
diff --git a/drivers/gpu/drm/armada/Kconfig b/drivers/gpu/drm/armada/Kconfig
new file mode 100644
index 000000000000..40d371521fe1
--- /dev/null
+++ b/drivers/gpu/drm/armada/Kconfig
@@ -0,0 +1,24 @@
1config DRM_ARMADA
2 tristate "DRM support for Marvell Armada SoCs"
3 depends on DRM && HAVE_CLK && ARM
4 select FB_CFB_FILLRECT
5 select FB_CFB_COPYAREA
6 select FB_CFB_IMAGEBLIT
7 select DRM_KMS_HELPER
8 help
9 Support the "LCD" controllers found on the Marvell Armada 510
10 devices. There are two controllers on the device, each controller
11 supports graphics and video overlays.
12
13 This driver provides no built-in acceleration; acceleration is
14 performed by other IP found on the SoC. This driver provides
15 kernel mode setting and buffer management to userspace.
16
17config DRM_ARMADA_TDA1998X
18 bool "Support TDA1998X HDMI output"
19 depends on DRM_ARMADA != n
20 depends on I2C && DRM_I2C_NXP_TDA998X = y
21 default y
22 help
23 Support the TDA1998x HDMI output device found on the Solid-Run
24 CuBox.
diff --git a/drivers/gpu/drm/armada/Makefile b/drivers/gpu/drm/armada/Makefile
new file mode 100644
index 000000000000..d6f43e06150a
--- /dev/null
+++ b/drivers/gpu/drm/armada/Makefile
@@ -0,0 +1,7 @@
1armada-y := armada_crtc.o armada_drv.o armada_fb.o armada_fbdev.o \
2 armada_gem.o armada_output.o armada_overlay.o \
3 armada_slave.o
4armada-y += armada_510.o
5armada-$(CONFIG_DEBUG_FS) += armada_debugfs.o
6
7obj-$(CONFIG_DRM_ARMADA) := armada.o
diff --git a/drivers/gpu/drm/armada/armada_510.c b/drivers/gpu/drm/armada/armada_510.c
new file mode 100644
index 000000000000..59948eff6095
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_510.c
@@ -0,0 +1,87 @@
1/*
2 * Copyright (C) 2012 Russell King
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * Armada 510 (aka Dove) variant support
9 */
10#include <linux/clk.h>
11#include <linux/io.h>
12#include <drm/drmP.h>
13#include <drm/drm_crtc_helper.h>
14#include "armada_crtc.h"
15#include "armada_drm.h"
16#include "armada_hw.h"
17
18static int armada510_init(struct armada_private *priv, struct device *dev)
19{
20 priv->extclk[0] = devm_clk_get(dev, "ext_ref_clk_1");
21
22 if (IS_ERR(priv->extclk[0]) && PTR_ERR(priv->extclk[0]) == -ENOENT)
23 priv->extclk[0] = ERR_PTR(-EPROBE_DEFER);
24
25 return PTR_RET(priv->extclk[0]);
26}
27
28static int armada510_crtc_init(struct armada_crtc *dcrtc)
29{
30 /* Lower the watermark so to eliminate jitter at higher bandwidths */
31 armada_updatel(0x20, (1 << 11) | 0xff, dcrtc->base + LCD_CFG_RDREG4F);
32 return 0;
33}
34
35/*
36 * Armada510 specific SCLK register selection.
37 * This gets called with sclk = NULL to test whether the mode is
38 * supportable, and again with sclk != NULL to set the clocks up for
39 * that. The former can return an error, but the latter is expected
40 * not to.
41 *
42 * We currently are pretty rudimentary here, always selecting
43 * EXT_REF_CLK_1 for LCD0 and erroring LCD1. This needs improvement!
44 */
45static int armada510_crtc_compute_clock(struct armada_crtc *dcrtc,
46 const struct drm_display_mode *mode, uint32_t *sclk)
47{
48 struct armada_private *priv = dcrtc->crtc.dev->dev_private;
49 struct clk *clk = priv->extclk[0];
50 int ret;
51
52 if (dcrtc->num == 1)
53 return -EINVAL;
54
55 if (IS_ERR(clk))
56 return PTR_ERR(clk);
57
58 if (dcrtc->clk != clk) {
59 ret = clk_prepare_enable(clk);
60 if (ret)
61 return ret;
62 dcrtc->clk = clk;
63 }
64
65 if (sclk) {
66 uint32_t rate, ref, div;
67
68 rate = mode->clock * 1000;
69 ref = clk_round_rate(clk, rate);
70 div = DIV_ROUND_UP(ref, rate);
71 if (div < 1)
72 div = 1;
73
74 clk_set_rate(clk, ref);
75 *sclk = div | SCLK_510_EXTCLK1;
76 }
77
78 return 0;
79}
80
81const struct armada_variant armada510_ops = {
82 .has_spu_adv_reg = true,
83 .spu_adv_reg = ADV_HWC32ENABLE | ADV_HWC32ARGB | ADV_HWC32BLEND,
84 .init = armada510_init,
85 .crtc_init = armada510_crtc_init,
86 .crtc_compute_clock = armada510_crtc_compute_clock,
87};
diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c
new file mode 100644
index 000000000000..d8e398275ca8
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_crtc.c
@@ -0,0 +1,1098 @@
1/*
2 * Copyright (C) 2012 Russell King
3 * Rewritten from the dovefb driver, and Armada510 manuals.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9#include <linux/clk.h>
10#include <drm/drmP.h>
11#include <drm/drm_crtc_helper.h>
12#include "armada_crtc.h"
13#include "armada_drm.h"
14#include "armada_fb.h"
15#include "armada_gem.h"
16#include "armada_hw.h"
17
18struct armada_frame_work {
19 struct drm_pending_vblank_event *event;
20 struct armada_regs regs[4];
21 struct drm_framebuffer *old_fb;
22};
23
24enum csc_mode {
25 CSC_AUTO = 0,
26 CSC_YUV_CCIR601 = 1,
27 CSC_YUV_CCIR709 = 2,
28 CSC_RGB_COMPUTER = 1,
29 CSC_RGB_STUDIO = 2,
30};
31
32/*
33 * A note about interlacing. Let's consider HDMI 1920x1080i.
34 * The timing parameters we have from X are:
35 * Hact HsyA HsyI Htot Vact VsyA VsyI Vtot
36 * 1920 2448 2492 2640 1080 1084 1094 1125
37 * Which get translated to:
38 * Hact HsyA HsyI Htot Vact VsyA VsyI Vtot
39 * 1920 2448 2492 2640 540 542 547 562
40 *
41 * This is how it is defined by CEA-861-D - line and pixel numbers are
42 * referenced to the rising edge of VSYNC and HSYNC. Total clocks per
43 * line: 2640. The odd frame, the first active line is at line 21, and
44 * the even frame, the first active line is 584.
45 *
46 * LN: 560 561 562 563 567 568 569
47 * DE: ~~~|____________________________//__________________________
48 * HSYNC: ____|~|_____|~|_____|~|_____|~|_//__|~|_____|~|_____|~|_____
49 * VSYNC: _________________________|~~~~~~//~~~~~~~~~~~~~~~|__________
50 * 22 blanking lines. VSYNC at 1320 (referenced to the HSYNC rising edge).
51 *
52 * LN: 1123 1124 1125 1 5 6 7
53 * DE: ~~~|____________________________//__________________________
54 * HSYNC: ____|~|_____|~|_____|~|_____|~|_//__|~|_____|~|_____|~|_____
55 * VSYNC: ____________________|~~~~~~~~~~~//~~~~~~~~~~|_______________
56 * 23 blanking lines
57 *
58 * The Armada LCD Controller line and pixel numbers are, like X timings,
59 * referenced to the top left of the active frame.
60 *
61 * So, translating these to our LCD controller:
62 * Odd frame, 563 total lines, VSYNC at line 543-548, pixel 1128.
63 * Even frame, 562 total lines, VSYNC at line 542-547, pixel 2448.
64 * Note: Vsync front porch remains constant!
65 *
66 * if (odd_frame) {
67 * vtotal = mode->crtc_vtotal + 1;
68 * vbackporch = mode->crtc_vsync_start - mode->crtc_vdisplay + 1;
69 * vhorizpos = mode->crtc_hsync_start - mode->crtc_htotal / 2
70 * } else {
71 * vtotal = mode->crtc_vtotal;
72 * vbackporch = mode->crtc_vsync_start - mode->crtc_vdisplay;
73 * vhorizpos = mode->crtc_hsync_start;
74 * }
75 * vfrontporch = mode->crtc_vtotal - mode->crtc_vsync_end;
76 *
77 * So, we need to reprogram these registers on each vsync event:
78 * LCD_SPU_V_PORCH, LCD_SPU_ADV_REG, LCD_SPUT_V_H_TOTAL
79 *
80 * Note: we do not use the frame done interrupts because these appear
81 * to happen too early, and lead to jitter on the display (presumably
82 * they occur at the end of the last active line, before the vsync back
83 * porch, which we're reprogramming.)
84 */
85
86void
87armada_drm_crtc_update_regs(struct armada_crtc *dcrtc, struct armada_regs *regs)
88{
89 while (regs->offset != ~0) {
90 void __iomem *reg = dcrtc->base + regs->offset;
91 uint32_t val;
92
93 val = regs->mask;
94 if (val != 0)
95 val &= readl_relaxed(reg);
96 writel_relaxed(val | regs->val, reg);
97 ++regs;
98 }
99}
100
101#define dpms_blanked(dpms) ((dpms) != DRM_MODE_DPMS_ON)
102
103static void armada_drm_crtc_update(struct armada_crtc *dcrtc)
104{
105 uint32_t dumb_ctrl;
106
107 dumb_ctrl = dcrtc->cfg_dumb_ctrl;
108
109 if (!dpms_blanked(dcrtc->dpms))
110 dumb_ctrl |= CFG_DUMB_ENA;
111
112 /*
113 * When the dumb interface isn't in DUMB24_RGB888_0 mode, it might
114 * be using SPI or GPIO. If we set this to DUMB_BLANK, we will
115 * force LCD_D[23:0] to output blank color, overriding the GPIO or
116 * SPI usage. So leave it as-is unless in DUMB24_RGB888_0 mode.
117 */
118 if (dpms_blanked(dcrtc->dpms) &&
119 (dumb_ctrl & DUMB_MASK) == DUMB24_RGB888_0) {
120 dumb_ctrl &= ~DUMB_MASK;
121 dumb_ctrl |= DUMB_BLANK;
122 }
123
124 /*
125 * The documentation doesn't indicate what the normal state of
126 * the sync signals are. Sebastian Hesselbart kindly probed
127 * these signals on his board to determine their state.
128 *
129 * The non-inverted state of the sync signals is active high.
130 * Setting these bits makes the appropriate signal active low.
131 */
132 if (dcrtc->crtc.mode.flags & DRM_MODE_FLAG_NCSYNC)
133 dumb_ctrl |= CFG_INV_CSYNC;
134 if (dcrtc->crtc.mode.flags & DRM_MODE_FLAG_NHSYNC)
135 dumb_ctrl |= CFG_INV_HSYNC;
136 if (dcrtc->crtc.mode.flags & DRM_MODE_FLAG_NVSYNC)
137 dumb_ctrl |= CFG_INV_VSYNC;
138
139 if (dcrtc->dumb_ctrl != dumb_ctrl) {
140 dcrtc->dumb_ctrl = dumb_ctrl;
141 writel_relaxed(dumb_ctrl, dcrtc->base + LCD_SPU_DUMB_CTRL);
142 }
143}
144
145static unsigned armada_drm_crtc_calc_fb(struct drm_framebuffer *fb,
146 int x, int y, struct armada_regs *regs, bool interlaced)
147{
148 struct armada_gem_object *obj = drm_fb_obj(fb);
149 unsigned pitch = fb->pitches[0];
150 unsigned offset = y * pitch + x * fb->bits_per_pixel / 8;
151 uint32_t addr_odd, addr_even;
152 unsigned i = 0;
153
154 DRM_DEBUG_DRIVER("pitch %u x %d y %d bpp %d\n",
155 pitch, x, y, fb->bits_per_pixel);
156
157 addr_odd = addr_even = obj->dev_addr + offset;
158
159 if (interlaced) {
160 addr_even += pitch;
161 pitch *= 2;
162 }
163
164 /* write offset, base, and pitch */
165 armada_reg_queue_set(regs, i, addr_odd, LCD_CFG_GRA_START_ADDR0);
166 armada_reg_queue_set(regs, i, addr_even, LCD_CFG_GRA_START_ADDR1);
167 armada_reg_queue_mod(regs, i, pitch, 0xffff, LCD_CFG_GRA_PITCH);
168
169 return i;
170}
171
172static int armada_drm_crtc_queue_frame_work(struct armada_crtc *dcrtc,
173 struct armada_frame_work *work)
174{
175 struct drm_device *dev = dcrtc->crtc.dev;
176 unsigned long flags;
177 int ret;
178
179 ret = drm_vblank_get(dev, dcrtc->num);
180 if (ret) {
181 DRM_ERROR("failed to acquire vblank counter\n");
182 return ret;
183 }
184
185 spin_lock_irqsave(&dev->event_lock, flags);
186 if (!dcrtc->frame_work)
187 dcrtc->frame_work = work;
188 else
189 ret = -EBUSY;
190 spin_unlock_irqrestore(&dev->event_lock, flags);
191
192 if (ret)
193 drm_vblank_put(dev, dcrtc->num);
194
195 return ret;
196}
197
198static void armada_drm_crtc_complete_frame_work(struct armada_crtc *dcrtc)
199{
200 struct drm_device *dev = dcrtc->crtc.dev;
201 struct armada_frame_work *work = dcrtc->frame_work;
202
203 dcrtc->frame_work = NULL;
204
205 armada_drm_crtc_update_regs(dcrtc, work->regs);
206
207 if (work->event)
208 drm_send_vblank_event(dev, dcrtc->num, work->event);
209
210 drm_vblank_put(dev, dcrtc->num);
211
212 /* Finally, queue the process-half of the cleanup. */
213 __armada_drm_queue_unref_work(dcrtc->crtc.dev, work->old_fb);
214 kfree(work);
215}
216
217static void armada_drm_crtc_finish_fb(struct armada_crtc *dcrtc,
218 struct drm_framebuffer *fb, bool force)
219{
220 struct armada_frame_work *work;
221
222 if (!fb)
223 return;
224
225 if (force) {
226 /* Display is disabled, so just drop the old fb */
227 drm_framebuffer_unreference(fb);
228 return;
229 }
230
231 work = kmalloc(sizeof(*work), GFP_KERNEL);
232 if (work) {
233 int i = 0;
234 work->event = NULL;
235 work->old_fb = fb;
236 armada_reg_queue_end(work->regs, i);
237
238 if (armada_drm_crtc_queue_frame_work(dcrtc, work) == 0)
239 return;
240
241 kfree(work);
242 }
243
244 /*
245 * Oops - just drop the reference immediately and hope for
246 * the best. The worst that will happen is the buffer gets
247 * reused before it has finished being displayed.
248 */
249 drm_framebuffer_unreference(fb);
250}
251
252static void armada_drm_vblank_off(struct armada_crtc *dcrtc)
253{
254 struct drm_device *dev = dcrtc->crtc.dev;
255
256 /*
257 * Tell the DRM core that vblank IRQs aren't going to happen for
258 * a while. This cleans up any pending vblank events for us.
259 */
260 drm_vblank_off(dev, dcrtc->num);
261
262 /* Handle any pending flip event. */
263 spin_lock_irq(&dev->event_lock);
264 if (dcrtc->frame_work)
265 armada_drm_crtc_complete_frame_work(dcrtc);
266 spin_unlock_irq(&dev->event_lock);
267}
268
269void armada_drm_crtc_gamma_set(struct drm_crtc *crtc, u16 r, u16 g, u16 b,
270 int idx)
271{
272}
273
274void armada_drm_crtc_gamma_get(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
275 int idx)
276{
277}
278
279/* The mode_config.mutex will be held for this call */
280static void armada_drm_crtc_dpms(struct drm_crtc *crtc, int dpms)
281{
282 struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
283
284 if (dcrtc->dpms != dpms) {
285 dcrtc->dpms = dpms;
286 armada_drm_crtc_update(dcrtc);
287 if (dpms_blanked(dpms))
288 armada_drm_vblank_off(dcrtc);
289 }
290}
291
292/*
293 * Prepare for a mode set. Turn off overlay to ensure that we don't end
294 * up with the overlay size being bigger than the active screen size.
295 * We rely upon X refreshing this state after the mode set has completed.
296 *
297 * The mode_config.mutex will be held for this call
298 */
299static void armada_drm_crtc_prepare(struct drm_crtc *crtc)
300{
301 struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
302 struct drm_plane *plane;
303
304 /*
305 * If we have an overlay plane associated with this CRTC, disable
306 * it before the modeset to avoid its coordinates being outside
307 * the new mode parameters. DRM doesn't provide help with this.
308 */
309 plane = dcrtc->plane;
310 if (plane) {
311 struct drm_framebuffer *fb = plane->fb;
312
313 plane->funcs->disable_plane(plane);
314 plane->fb = NULL;
315 plane->crtc = NULL;
316 drm_framebuffer_unreference(fb);
317 }
318}
319
320/* The mode_config.mutex will be held for this call */
321static void armada_drm_crtc_commit(struct drm_crtc *crtc)
322{
323 struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
324
325 if (dcrtc->dpms != DRM_MODE_DPMS_ON) {
326 dcrtc->dpms = DRM_MODE_DPMS_ON;
327 armada_drm_crtc_update(dcrtc);
328 }
329}
330
331/* The mode_config.mutex will be held for this call */
332static bool armada_drm_crtc_mode_fixup(struct drm_crtc *crtc,
333 const struct drm_display_mode *mode, struct drm_display_mode *adj)
334{
335 struct armada_private *priv = crtc->dev->dev_private;
336 struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
337 int ret;
338
339 /* We can't do interlaced modes if we don't have the SPU_ADV_REG */
340 if (!priv->variant->has_spu_adv_reg &&
341 adj->flags & DRM_MODE_FLAG_INTERLACE)
342 return false;
343
344 /* Check whether the display mode is possible */
345 ret = priv->variant->crtc_compute_clock(dcrtc, adj, NULL);
346 if (ret)
347 return false;
348
349 return true;
350}
351
352void armada_drm_crtc_irq(struct armada_crtc *dcrtc, u32 stat)
353{
354 struct armada_vbl_event *e, *n;
355 void __iomem *base = dcrtc->base;
356
357 if (stat & DMA_FF_UNDERFLOW)
358 DRM_ERROR("video underflow on crtc %u\n", dcrtc->num);
359 if (stat & GRA_FF_UNDERFLOW)
360 DRM_ERROR("graphics underflow on crtc %u\n", dcrtc->num);
361
362 if (stat & VSYNC_IRQ)
363 drm_handle_vblank(dcrtc->crtc.dev, dcrtc->num);
364
365 spin_lock(&dcrtc->irq_lock);
366
367 list_for_each_entry_safe(e, n, &dcrtc->vbl_list, node) {
368 list_del_init(&e->node);
369 drm_vblank_put(dcrtc->crtc.dev, dcrtc->num);
370 e->fn(dcrtc, e->data);
371 }
372
373 if (stat & GRA_FRAME_IRQ && dcrtc->interlaced) {
374 int i = stat & GRA_FRAME_IRQ0 ? 0 : 1;
375 uint32_t val;
376
377 writel_relaxed(dcrtc->v[i].spu_v_porch, base + LCD_SPU_V_PORCH);
378 writel_relaxed(dcrtc->v[i].spu_v_h_total,
379 base + LCD_SPUT_V_H_TOTAL);
380
381 val = readl_relaxed(base + LCD_SPU_ADV_REG);
382 val &= ~(ADV_VSYNC_L_OFF | ADV_VSYNC_H_OFF | ADV_VSYNCOFFEN);
383 val |= dcrtc->v[i].spu_adv_reg;
384 writel_relaxed(val, base + LCD_SPU_ADV_REG);
385 }
386
387 if (stat & DUMB_FRAMEDONE && dcrtc->cursor_update) {
388 writel_relaxed(dcrtc->cursor_hw_pos,
389 base + LCD_SPU_HWC_OVSA_HPXL_VLN);
390 writel_relaxed(dcrtc->cursor_hw_sz,
391 base + LCD_SPU_HWC_HPXL_VLN);
392 armada_updatel(CFG_HWC_ENA,
393 CFG_HWC_ENA | CFG_HWC_1BITMOD | CFG_HWC_1BITENA,
394 base + LCD_SPU_DMA_CTRL0);
395 dcrtc->cursor_update = false;
396 armada_drm_crtc_disable_irq(dcrtc, DUMB_FRAMEDONE_ENA);
397 }
398
399 spin_unlock(&dcrtc->irq_lock);
400
401 if (stat & GRA_FRAME_IRQ) {
402 struct drm_device *dev = dcrtc->crtc.dev;
403
404 spin_lock(&dev->event_lock);
405 if (dcrtc->frame_work)
406 armada_drm_crtc_complete_frame_work(dcrtc);
407 spin_unlock(&dev->event_lock);
408
409 wake_up(&dcrtc->frame_wait);
410 }
411}
412
413/* These are locked by dev->vbl_lock */
414void armada_drm_crtc_disable_irq(struct armada_crtc *dcrtc, u32 mask)
415{
416 if (dcrtc->irq_ena & mask) {
417 dcrtc->irq_ena &= ~mask;
418 writel(dcrtc->irq_ena, dcrtc->base + LCD_SPU_IRQ_ENA);
419 }
420}
421
422void armada_drm_crtc_enable_irq(struct armada_crtc *dcrtc, u32 mask)
423{
424 if ((dcrtc->irq_ena & mask) != mask) {
425 dcrtc->irq_ena |= mask;
426 writel(dcrtc->irq_ena, dcrtc->base + LCD_SPU_IRQ_ENA);
427 if (readl_relaxed(dcrtc->base + LCD_SPU_IRQ_ISR) & mask)
428 writel(0, dcrtc->base + LCD_SPU_IRQ_ISR);
429 }
430}
431
432static uint32_t armada_drm_crtc_calculate_csc(struct armada_crtc *dcrtc)
433{
434 struct drm_display_mode *adj = &dcrtc->crtc.mode;
435 uint32_t val = 0;
436
437 if (dcrtc->csc_yuv_mode == CSC_YUV_CCIR709)
438 val |= CFG_CSC_YUV_CCIR709;
439 if (dcrtc->csc_rgb_mode == CSC_RGB_STUDIO)
440 val |= CFG_CSC_RGB_STUDIO;
441
442 /*
443 * In auto mode, set the colorimetry, based upon the HDMI spec.
444 * 1280x720p, 1920x1080p and 1920x1080i use ITU709, others use
445 * ITU601. It may be more appropriate to set this depending on
446 * the source - but what if the graphic frame is YUV and the
447 * video frame is RGB?
448 */
449 if ((adj->hdisplay == 1280 && adj->vdisplay == 720 &&
450 !(adj->flags & DRM_MODE_FLAG_INTERLACE)) ||
451 (adj->hdisplay == 1920 && adj->vdisplay == 1080)) {
452 if (dcrtc->csc_yuv_mode == CSC_AUTO)
453 val |= CFG_CSC_YUV_CCIR709;
454 }
455
456 /*
457 * We assume we're connected to a TV-like device, so the YUV->RGB
458 * conversion should produce a limited range. We should set this
459 * depending on the connectors attached to this CRTC, and what
460 * kind of device they report being connected.
461 */
462 if (dcrtc->csc_rgb_mode == CSC_AUTO)
463 val |= CFG_CSC_RGB_STUDIO;
464
465 return val;
466}
467
468/* The mode_config.mutex will be held for this call */
469static int armada_drm_crtc_mode_set(struct drm_crtc *crtc,
470 struct drm_display_mode *mode, struct drm_display_mode *adj,
471 int x, int y, struct drm_framebuffer *old_fb)
472{
473 struct armada_private *priv = crtc->dev->dev_private;
474 struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
475 struct armada_regs regs[17];
476 uint32_t lm, rm, tm, bm, val, sclk;
477 unsigned long flags;
478 unsigned i;
479 bool interlaced;
480
481 drm_framebuffer_reference(crtc->fb);
482
483 interlaced = !!(adj->flags & DRM_MODE_FLAG_INTERLACE);
484
485 i = armada_drm_crtc_calc_fb(dcrtc->crtc.fb, x, y, regs, interlaced);
486
487 rm = adj->crtc_hsync_start - adj->crtc_hdisplay;
488 lm = adj->crtc_htotal - adj->crtc_hsync_end;
489 bm = adj->crtc_vsync_start - adj->crtc_vdisplay;
490 tm = adj->crtc_vtotal - adj->crtc_vsync_end;
491
492 DRM_DEBUG_DRIVER("H: %d %d %d %d lm %d rm %d\n",
493 adj->crtc_hdisplay,
494 adj->crtc_hsync_start,
495 adj->crtc_hsync_end,
496 adj->crtc_htotal, lm, rm);
497 DRM_DEBUG_DRIVER("V: %d %d %d %d tm %d bm %d\n",
498 adj->crtc_vdisplay,
499 adj->crtc_vsync_start,
500 adj->crtc_vsync_end,
501 adj->crtc_vtotal, tm, bm);
502
503 /* Wait for pending flips to complete */
504 wait_event(dcrtc->frame_wait, !dcrtc->frame_work);
505
506 drm_vblank_pre_modeset(crtc->dev, dcrtc->num);
507
508 crtc->mode = *adj;
509
510 val = dcrtc->dumb_ctrl & ~CFG_DUMB_ENA;
511 if (val != dcrtc->dumb_ctrl) {
512 dcrtc->dumb_ctrl = val;
513 writel_relaxed(val, dcrtc->base + LCD_SPU_DUMB_CTRL);
514 }
515
516 /* Now compute the divider for real */
517 priv->variant->crtc_compute_clock(dcrtc, adj, &sclk);
518
519 /* Ensure graphic fifo is enabled */
520 armada_reg_queue_mod(regs, i, 0, CFG_PDWN64x66, LCD_SPU_SRAM_PARA1);
521 armada_reg_queue_set(regs, i, sclk, LCD_CFG_SCLK_DIV);
522
523 if (interlaced ^ dcrtc->interlaced) {
524 if (adj->flags & DRM_MODE_FLAG_INTERLACE)
525 drm_vblank_get(dcrtc->crtc.dev, dcrtc->num);
526 else
527 drm_vblank_put(dcrtc->crtc.dev, dcrtc->num);
528 dcrtc->interlaced = interlaced;
529 }
530
531 spin_lock_irqsave(&dcrtc->irq_lock, flags);
532
533 /* Even interlaced/progressive frame */
534 dcrtc->v[1].spu_v_h_total = adj->crtc_vtotal << 16 |
535 adj->crtc_htotal;
536 dcrtc->v[1].spu_v_porch = tm << 16 | bm;
537 val = adj->crtc_hsync_start;
538 dcrtc->v[1].spu_adv_reg = val << 20 | val | ADV_VSYNCOFFEN |
539 priv->variant->spu_adv_reg;
540
541 if (interlaced) {
542 /* Odd interlaced frame */
543 dcrtc->v[0].spu_v_h_total = dcrtc->v[1].spu_v_h_total +
544 (1 << 16);
545 dcrtc->v[0].spu_v_porch = dcrtc->v[1].spu_v_porch + 1;
546 val = adj->crtc_hsync_start - adj->crtc_htotal / 2;
547 dcrtc->v[0].spu_adv_reg = val << 20 | val | ADV_VSYNCOFFEN |
548 priv->variant->spu_adv_reg;
549 } else {
550 dcrtc->v[0] = dcrtc->v[1];
551 }
552
553 val = adj->crtc_vdisplay << 16 | adj->crtc_hdisplay;
554
555 armada_reg_queue_set(regs, i, val, LCD_SPU_V_H_ACTIVE);
556 armada_reg_queue_set(regs, i, val, LCD_SPU_GRA_HPXL_VLN);
557 armada_reg_queue_set(regs, i, val, LCD_SPU_GZM_HPXL_VLN);
558 armada_reg_queue_set(regs, i, (lm << 16) | rm, LCD_SPU_H_PORCH);
559 armada_reg_queue_set(regs, i, dcrtc->v[0].spu_v_porch, LCD_SPU_V_PORCH);
560 armada_reg_queue_set(regs, i, dcrtc->v[0].spu_v_h_total,
561 LCD_SPUT_V_H_TOTAL);
562
563 if (priv->variant->has_spu_adv_reg) {
564 armada_reg_queue_mod(regs, i, dcrtc->v[0].spu_adv_reg,
565 ADV_VSYNC_L_OFF | ADV_VSYNC_H_OFF |
566 ADV_VSYNCOFFEN, LCD_SPU_ADV_REG);
567 }
568
569 val = CFG_GRA_ENA | CFG_GRA_HSMOOTH;
570 val |= CFG_GRA_FMT(drm_fb_to_armada_fb(dcrtc->crtc.fb)->fmt);
571 val |= CFG_GRA_MOD(drm_fb_to_armada_fb(dcrtc->crtc.fb)->mod);
572
573 if (drm_fb_to_armada_fb(dcrtc->crtc.fb)->fmt > CFG_420)
574 val |= CFG_PALETTE_ENA;
575
576 if (interlaced)
577 val |= CFG_GRA_FTOGGLE;
578
579 armada_reg_queue_mod(regs, i, val, CFG_GRAFORMAT |
580 CFG_GRA_MOD(CFG_SWAPRB | CFG_SWAPUV |
581 CFG_SWAPYU | CFG_YUV2RGB) |
582 CFG_PALETTE_ENA | CFG_GRA_FTOGGLE,
583 LCD_SPU_DMA_CTRL0);
584
585 val = adj->flags & DRM_MODE_FLAG_NVSYNC ? CFG_VSYNC_INV : 0;
586 armada_reg_queue_mod(regs, i, val, CFG_VSYNC_INV, LCD_SPU_DMA_CTRL1);
587
588 val = dcrtc->spu_iopad_ctrl | armada_drm_crtc_calculate_csc(dcrtc);
589 armada_reg_queue_set(regs, i, val, LCD_SPU_IOPAD_CONTROL);
590 armada_reg_queue_end(regs, i);
591
592 armada_drm_crtc_update_regs(dcrtc, regs);
593 spin_unlock_irqrestore(&dcrtc->irq_lock, flags);
594
595 armada_drm_crtc_update(dcrtc);
596
597 drm_vblank_post_modeset(crtc->dev, dcrtc->num);
598 armada_drm_crtc_finish_fb(dcrtc, old_fb, dpms_blanked(dcrtc->dpms));
599
600 return 0;
601}
602
603/* The mode_config.mutex will be held for this call */
604static int armada_drm_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
605 struct drm_framebuffer *old_fb)
606{
607 struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
608 struct armada_regs regs[4];
609 unsigned i;
610
611 i = armada_drm_crtc_calc_fb(crtc->fb, crtc->x, crtc->y, regs,
612 dcrtc->interlaced);
613 armada_reg_queue_end(regs, i);
614
615 /* Wait for pending flips to complete */
616 wait_event(dcrtc->frame_wait, !dcrtc->frame_work);
617
618 /* Take a reference to the new fb as we're using it */
619 drm_framebuffer_reference(crtc->fb);
620
621 /* Update the base in the CRTC */
622 armada_drm_crtc_update_regs(dcrtc, regs);
623
624 /* Drop our previously held reference */
625 armada_drm_crtc_finish_fb(dcrtc, old_fb, dpms_blanked(dcrtc->dpms));
626
627 return 0;
628}
629
630static void armada_drm_crtc_load_lut(struct drm_crtc *crtc)
631{
632}
633
634/* The mode_config.mutex will be held for this call */
635static void armada_drm_crtc_disable(struct drm_crtc *crtc)
636{
637 struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
638
639 armada_drm_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
640 armada_drm_crtc_finish_fb(dcrtc, crtc->fb, true);
641
642 /* Power down most RAMs and FIFOs */
643 writel_relaxed(CFG_PDWN256x32 | CFG_PDWN256x24 | CFG_PDWN256x8 |
644 CFG_PDWN32x32 | CFG_PDWN16x66 | CFG_PDWN32x66 |
645 CFG_PDWN64x66, dcrtc->base + LCD_SPU_SRAM_PARA1);
646}
647
648static const struct drm_crtc_helper_funcs armada_crtc_helper_funcs = {
649 .dpms = armada_drm_crtc_dpms,
650 .prepare = armada_drm_crtc_prepare,
651 .commit = armada_drm_crtc_commit,
652 .mode_fixup = armada_drm_crtc_mode_fixup,
653 .mode_set = armada_drm_crtc_mode_set,
654 .mode_set_base = armada_drm_crtc_mode_set_base,
655 .load_lut = armada_drm_crtc_load_lut,
656 .disable = armada_drm_crtc_disable,
657};
658
659static void armada_load_cursor_argb(void __iomem *base, uint32_t *pix,
660 unsigned stride, unsigned width, unsigned height)
661{
662 uint32_t addr;
663 unsigned y;
664
665 addr = SRAM_HWC32_RAM1;
666 for (y = 0; y < height; y++) {
667 uint32_t *p = &pix[y * stride];
668 unsigned x;
669
670 for (x = 0; x < width; x++, p++) {
671 uint32_t val = *p;
672
673 val = (val & 0xff00ff00) |
674 (val & 0x000000ff) << 16 |
675 (val & 0x00ff0000) >> 16;
676
677 writel_relaxed(val,
678 base + LCD_SPU_SRAM_WRDAT);
679 writel_relaxed(addr | SRAM_WRITE,
680 base + LCD_SPU_SRAM_CTRL);
681 addr += 1;
682 if ((addr & 0x00ff) == 0)
683 addr += 0xf00;
684 if ((addr & 0x30ff) == 0)
685 addr = SRAM_HWC32_RAM2;
686 }
687 }
688}
689
690static void armada_drm_crtc_cursor_tran(void __iomem *base)
691{
692 unsigned addr;
693
694 for (addr = 0; addr < 256; addr++) {
695 /* write the default value */
696 writel_relaxed(0x55555555, base + LCD_SPU_SRAM_WRDAT);
697 writel_relaxed(addr | SRAM_WRITE | SRAM_HWC32_TRAN,
698 base + LCD_SPU_SRAM_CTRL);
699 }
700}
701
702static int armada_drm_crtc_cursor_update(struct armada_crtc *dcrtc, bool reload)
703{
704 uint32_t xoff, xscr, w = dcrtc->cursor_w, s;
705 uint32_t yoff, yscr, h = dcrtc->cursor_h;
706 uint32_t para1;
707
708 /*
709 * Calculate the visible width and height of the cursor,
710 * screen position, and the position in the cursor bitmap.
711 */
712 if (dcrtc->cursor_x < 0) {
713 xoff = -dcrtc->cursor_x;
714 xscr = 0;
715 w -= min(xoff, w);
716 } else if (dcrtc->cursor_x + w > dcrtc->crtc.mode.hdisplay) {
717 xoff = 0;
718 xscr = dcrtc->cursor_x;
719 w = max_t(int, dcrtc->crtc.mode.hdisplay - dcrtc->cursor_x, 0);
720 } else {
721 xoff = 0;
722 xscr = dcrtc->cursor_x;
723 }
724
725 if (dcrtc->cursor_y < 0) {
726 yoff = -dcrtc->cursor_y;
727 yscr = 0;
728 h -= min(yoff, h);
729 } else if (dcrtc->cursor_y + h > dcrtc->crtc.mode.vdisplay) {
730 yoff = 0;
731 yscr = dcrtc->cursor_y;
732 h = max_t(int, dcrtc->crtc.mode.vdisplay - dcrtc->cursor_y, 0);
733 } else {
734 yoff = 0;
735 yscr = dcrtc->cursor_y;
736 }
737
738 /* On interlaced modes, the vertical cursor size must be halved */
739 s = dcrtc->cursor_w;
740 if (dcrtc->interlaced) {
741 s *= 2;
742 yscr /= 2;
743 h /= 2;
744 }
745
746 if (!dcrtc->cursor_obj || !h || !w) {
747 spin_lock_irq(&dcrtc->irq_lock);
748 armada_drm_crtc_disable_irq(dcrtc, DUMB_FRAMEDONE_ENA);
749 dcrtc->cursor_update = false;
750 armada_updatel(0, CFG_HWC_ENA, dcrtc->base + LCD_SPU_DMA_CTRL0);
751 spin_unlock_irq(&dcrtc->irq_lock);
752 return 0;
753 }
754
755 para1 = readl_relaxed(dcrtc->base + LCD_SPU_SRAM_PARA1);
756 armada_updatel(CFG_CSB_256x32, CFG_CSB_256x32 | CFG_PDWN256x32,
757 dcrtc->base + LCD_SPU_SRAM_PARA1);
758
759 /*
760 * Initialize the transparency if the SRAM was powered down.
761 * We must also reload the cursor data as well.
762 */
763 if (!(para1 & CFG_CSB_256x32)) {
764 armada_drm_crtc_cursor_tran(dcrtc->base);
765 reload = true;
766 }
767
768 if (dcrtc->cursor_hw_sz != (h << 16 | w)) {
769 spin_lock_irq(&dcrtc->irq_lock);
770 armada_drm_crtc_disable_irq(dcrtc, DUMB_FRAMEDONE_ENA);
771 dcrtc->cursor_update = false;
772 armada_updatel(0, CFG_HWC_ENA, dcrtc->base + LCD_SPU_DMA_CTRL0);
773 spin_unlock_irq(&dcrtc->irq_lock);
774 reload = true;
775 }
776 if (reload) {
777 struct armada_gem_object *obj = dcrtc->cursor_obj;
778 uint32_t *pix;
779 /* Set the top-left corner of the cursor image */
780 pix = obj->addr;
781 pix += yoff * s + xoff;
782 armada_load_cursor_argb(dcrtc->base, pix, s, w, h);
783 }
784
785 /* Reload the cursor position, size and enable in the IRQ handler */
786 spin_lock_irq(&dcrtc->irq_lock);
787 dcrtc->cursor_hw_pos = yscr << 16 | xscr;
788 dcrtc->cursor_hw_sz = h << 16 | w;
789 dcrtc->cursor_update = true;
790 armada_drm_crtc_enable_irq(dcrtc, DUMB_FRAMEDONE_ENA);
791 spin_unlock_irq(&dcrtc->irq_lock);
792
793 return 0;
794}
795
796static void cursor_update(void *data)
797{
798 armada_drm_crtc_cursor_update(data, true);
799}
800
801static int armada_drm_crtc_cursor_set(struct drm_crtc *crtc,
802 struct drm_file *file, uint32_t handle, uint32_t w, uint32_t h)
803{
804 struct drm_device *dev = crtc->dev;
805 struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
806 struct armada_private *priv = crtc->dev->dev_private;
807 struct armada_gem_object *obj = NULL;
808 int ret;
809
810 /* If no cursor support, replicate drm's return value */
811 if (!priv->variant->has_spu_adv_reg)
812 return -ENXIO;
813
814 if (handle && w > 0 && h > 0) {
815 /* maximum size is 64x32 or 32x64 */
816 if (w > 64 || h > 64 || (w > 32 && h > 32))
817 return -ENOMEM;
818
819 obj = armada_gem_object_lookup(dev, file, handle);
820 if (!obj)
821 return -ENOENT;
822
823 /* Must be a kernel-mapped object */
824 if (!obj->addr) {
825 drm_gem_object_unreference_unlocked(&obj->obj);
826 return -EINVAL;
827 }
828
829 if (obj->obj.size < w * h * 4) {
830 DRM_ERROR("buffer is too small\n");
831 drm_gem_object_unreference_unlocked(&obj->obj);
832 return -ENOMEM;
833 }
834 }
835
836 mutex_lock(&dev->struct_mutex);
837 if (dcrtc->cursor_obj) {
838 dcrtc->cursor_obj->update = NULL;
839 dcrtc->cursor_obj->update_data = NULL;
840 drm_gem_object_unreference(&dcrtc->cursor_obj->obj);
841 }
842 dcrtc->cursor_obj = obj;
843 dcrtc->cursor_w = w;
844 dcrtc->cursor_h = h;
845 ret = armada_drm_crtc_cursor_update(dcrtc, true);
846 if (obj) {
847 obj->update_data = dcrtc;
848 obj->update = cursor_update;
849 }
850 mutex_unlock(&dev->struct_mutex);
851
852 return ret;
853}
854
855static int armada_drm_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
856{
857 struct drm_device *dev = crtc->dev;
858 struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
859 struct armada_private *priv = crtc->dev->dev_private;
860 int ret;
861
862 /* If no cursor support, replicate drm's return value */
863 if (!priv->variant->has_spu_adv_reg)
864 return -EFAULT;
865
866 mutex_lock(&dev->struct_mutex);
867 dcrtc->cursor_x = x;
868 dcrtc->cursor_y = y;
869 ret = armada_drm_crtc_cursor_update(dcrtc, false);
870 mutex_unlock(&dev->struct_mutex);
871
872 return ret;
873}
874
875static void armada_drm_crtc_destroy(struct drm_crtc *crtc)
876{
877 struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
878 struct armada_private *priv = crtc->dev->dev_private;
879
880 if (dcrtc->cursor_obj)
881 drm_gem_object_unreference(&dcrtc->cursor_obj->obj);
882
883 priv->dcrtc[dcrtc->num] = NULL;
884 drm_crtc_cleanup(&dcrtc->crtc);
885
886 if (!IS_ERR(dcrtc->clk))
887 clk_disable_unprepare(dcrtc->clk);
888
889 kfree(dcrtc);
890}
891
892/*
893 * The mode_config lock is held here, to prevent races between this
894 * and a mode_set.
895 */
896static int armada_drm_crtc_page_flip(struct drm_crtc *crtc,
897 struct drm_framebuffer *fb, struct drm_pending_vblank_event *event, uint32_t page_flip_flags)
898{
899 struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
900 struct armada_frame_work *work;
901 struct drm_device *dev = crtc->dev;
902 unsigned long flags;
903 unsigned i;
904 int ret;
905
906 /* We don't support changing the pixel format */
907 if (fb->pixel_format != crtc->fb->pixel_format)
908 return -EINVAL;
909
910 work = kmalloc(sizeof(*work), GFP_KERNEL);
911 if (!work)
912 return -ENOMEM;
913
914 work->event = event;
915 work->old_fb = dcrtc->crtc.fb;
916
917 i = armada_drm_crtc_calc_fb(fb, crtc->x, crtc->y, work->regs,
918 dcrtc->interlaced);
919 armada_reg_queue_end(work->regs, i);
920
921 /*
922 * Hold the old framebuffer for the work - DRM appears to drop our
923 * reference to the old framebuffer in drm_mode_page_flip_ioctl().
924 */
925 drm_framebuffer_reference(work->old_fb);
926
927 ret = armada_drm_crtc_queue_frame_work(dcrtc, work);
928 if (ret) {
929 /*
930 * Undo our reference above; DRM does not drop the reference
931 * to this object on error, so that's okay.
932 */
933 drm_framebuffer_unreference(work->old_fb);
934 kfree(work);
935 return ret;
936 }
937
938 /*
939 * Don't take a reference on the new framebuffer;
940 * drm_mode_page_flip_ioctl() has already grabbed a reference and
941 * will _not_ drop that reference on successful return from this
942 * function. Simply mark this new framebuffer as the current one.
943 */
944 dcrtc->crtc.fb = fb;
945
946 /*
947 * Finally, if the display is blanked, we won't receive an
948 * interrupt, so complete it now.
949 */
950 if (dpms_blanked(dcrtc->dpms)) {
951 spin_lock_irqsave(&dev->event_lock, flags);
952 if (dcrtc->frame_work)
953 armada_drm_crtc_complete_frame_work(dcrtc);
954 spin_unlock_irqrestore(&dev->event_lock, flags);
955 }
956
957 return 0;
958}
959
960static int
961armada_drm_crtc_set_property(struct drm_crtc *crtc,
962 struct drm_property *property, uint64_t val)
963{
964 struct armada_private *priv = crtc->dev->dev_private;
965 struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
966 bool update_csc = false;
967
968 if (property == priv->csc_yuv_prop) {
969 dcrtc->csc_yuv_mode = val;
970 update_csc = true;
971 } else if (property == priv->csc_rgb_prop) {
972 dcrtc->csc_rgb_mode = val;
973 update_csc = true;
974 }
975
976 if (update_csc) {
977 uint32_t val;
978
979 val = dcrtc->spu_iopad_ctrl |
980 armada_drm_crtc_calculate_csc(dcrtc);
981 writel_relaxed(val, dcrtc->base + LCD_SPU_IOPAD_CONTROL);
982 }
983
984 return 0;
985}
986
987static struct drm_crtc_funcs armada_crtc_funcs = {
988 .cursor_set = armada_drm_crtc_cursor_set,
989 .cursor_move = armada_drm_crtc_cursor_move,
990 .destroy = armada_drm_crtc_destroy,
991 .set_config = drm_crtc_helper_set_config,
992 .page_flip = armada_drm_crtc_page_flip,
993 .set_property = armada_drm_crtc_set_property,
994};
995
996static struct drm_prop_enum_list armada_drm_csc_yuv_enum_list[] = {
997 { CSC_AUTO, "Auto" },
998 { CSC_YUV_CCIR601, "CCIR601" },
999 { CSC_YUV_CCIR709, "CCIR709" },
1000};
1001
1002static struct drm_prop_enum_list armada_drm_csc_rgb_enum_list[] = {
1003 { CSC_AUTO, "Auto" },
1004 { CSC_RGB_COMPUTER, "Computer system" },
1005 { CSC_RGB_STUDIO, "Studio" },
1006};
1007
1008static int armada_drm_crtc_create_properties(struct drm_device *dev)
1009{
1010 struct armada_private *priv = dev->dev_private;
1011
1012 if (priv->csc_yuv_prop)
1013 return 0;
1014
1015 priv->csc_yuv_prop = drm_property_create_enum(dev, 0,
1016 "CSC_YUV", armada_drm_csc_yuv_enum_list,
1017 ARRAY_SIZE(armada_drm_csc_yuv_enum_list));
1018 priv->csc_rgb_prop = drm_property_create_enum(dev, 0,
1019 "CSC_RGB", armada_drm_csc_rgb_enum_list,
1020 ARRAY_SIZE(armada_drm_csc_rgb_enum_list));
1021
1022 if (!priv->csc_yuv_prop || !priv->csc_rgb_prop)
1023 return -ENOMEM;
1024
1025 return 0;
1026}
1027
1028int armada_drm_crtc_create(struct drm_device *dev, unsigned num,
1029 struct resource *res)
1030{
1031 struct armada_private *priv = dev->dev_private;
1032 struct armada_crtc *dcrtc;
1033 void __iomem *base;
1034 int ret;
1035
1036 ret = armada_drm_crtc_create_properties(dev);
1037 if (ret)
1038 return ret;
1039
1040 base = devm_request_and_ioremap(dev->dev, res);
1041 if (!base) {
1042 DRM_ERROR("failed to ioremap register\n");
1043 return -ENOMEM;
1044 }
1045
1046 dcrtc = kzalloc(sizeof(*dcrtc), GFP_KERNEL);
1047 if (!dcrtc) {
1048 DRM_ERROR("failed to allocate Armada crtc\n");
1049 return -ENOMEM;
1050 }
1051
1052 dcrtc->base = base;
1053 dcrtc->num = num;
1054 dcrtc->clk = ERR_PTR(-EINVAL);
1055 dcrtc->csc_yuv_mode = CSC_AUTO;
1056 dcrtc->csc_rgb_mode = CSC_AUTO;
1057 dcrtc->cfg_dumb_ctrl = DUMB24_RGB888_0;
1058 dcrtc->spu_iopad_ctrl = CFG_VSCALE_LN_EN | CFG_IOPAD_DUMB24;
1059 spin_lock_init(&dcrtc->irq_lock);
1060 dcrtc->irq_ena = CLEAN_SPU_IRQ_ISR;
1061 INIT_LIST_HEAD(&dcrtc->vbl_list);
1062 init_waitqueue_head(&dcrtc->frame_wait);
1063
1064 /* Initialize some registers which we don't otherwise set */
1065 writel_relaxed(0x00000001, dcrtc->base + LCD_CFG_SCLK_DIV);
1066 writel_relaxed(0x00000000, dcrtc->base + LCD_SPU_BLANKCOLOR);
1067 writel_relaxed(dcrtc->spu_iopad_ctrl,
1068 dcrtc->base + LCD_SPU_IOPAD_CONTROL);
1069 writel_relaxed(0x00000000, dcrtc->base + LCD_SPU_SRAM_PARA0);
1070 writel_relaxed(CFG_PDWN256x32 | CFG_PDWN256x24 | CFG_PDWN256x8 |
1071 CFG_PDWN32x32 | CFG_PDWN16x66 | CFG_PDWN32x66 |
1072 CFG_PDWN64x66, dcrtc->base + LCD_SPU_SRAM_PARA1);
1073 writel_relaxed(0x2032ff81, dcrtc->base + LCD_SPU_DMA_CTRL1);
1074 writel_relaxed(0x00000000, dcrtc->base + LCD_SPU_GRA_OVSA_HPXL_VLN);
1075
1076 if (priv->variant->crtc_init) {
1077 ret = priv->variant->crtc_init(dcrtc);
1078 if (ret) {
1079 kfree(dcrtc);
1080 return ret;
1081 }
1082 }
1083
1084 /* Ensure AXI pipeline is enabled */
1085 armada_updatel(CFG_ARBFAST_ENA, 0, dcrtc->base + LCD_SPU_DMA_CTRL0);
1086
1087 priv->dcrtc[dcrtc->num] = dcrtc;
1088
1089 drm_crtc_init(dev, &dcrtc->crtc, &armada_crtc_funcs);
1090 drm_crtc_helper_add(&dcrtc->crtc, &armada_crtc_helper_funcs);
1091
1092 drm_object_attach_property(&dcrtc->crtc.base, priv->csc_yuv_prop,
1093 dcrtc->csc_yuv_mode);
1094 drm_object_attach_property(&dcrtc->crtc.base, priv->csc_rgb_prop,
1095 dcrtc->csc_rgb_mode);
1096
1097 return armada_overlay_plane_create(dev, 1 << dcrtc->num);
1098}
diff --git a/drivers/gpu/drm/armada/armada_crtc.h b/drivers/gpu/drm/armada/armada_crtc.h
new file mode 100644
index 000000000000..9c10a07e7492
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_crtc.h
@@ -0,0 +1,83 @@
1/*
2 * Copyright (C) 2012 Russell King
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8#ifndef ARMADA_CRTC_H
9#define ARMADA_CRTC_H
10
11struct armada_gem_object;
12
13struct armada_regs {
14 uint32_t offset;
15 uint32_t mask;
16 uint32_t val;
17};
18
19#define armada_reg_queue_mod(_r, _i, _v, _m, _o) \
20 do { \
21 struct armada_regs *__reg = _r; \
22 __reg[_i].offset = _o; \
23 __reg[_i].mask = ~(_m); \
24 __reg[_i].val = _v; \
25 _i++; \
26 } while (0)
27
28#define armada_reg_queue_set(_r, _i, _v, _o) \
29 armada_reg_queue_mod(_r, _i, _v, ~0, _o)
30
31#define armada_reg_queue_end(_r, _i) \
32 armada_reg_queue_mod(_r, _i, 0, 0, ~0)
33
34struct armada_frame_work;
35
36struct armada_crtc {
37 struct drm_crtc crtc;
38 unsigned num;
39 void __iomem *base;
40 struct clk *clk;
41 struct {
42 uint32_t spu_v_h_total;
43 uint32_t spu_v_porch;
44 uint32_t spu_adv_reg;
45 } v[2];
46 bool interlaced;
47 bool cursor_update;
48 uint8_t csc_yuv_mode;
49 uint8_t csc_rgb_mode;
50
51 struct drm_plane *plane;
52
53 struct armada_gem_object *cursor_obj;
54 int cursor_x;
55 int cursor_y;
56 uint32_t cursor_hw_pos;
57 uint32_t cursor_hw_sz;
58 uint32_t cursor_w;
59 uint32_t cursor_h;
60
61 int dpms;
62 uint32_t cfg_dumb_ctrl;
63 uint32_t dumb_ctrl;
64 uint32_t spu_iopad_ctrl;
65
66 wait_queue_head_t frame_wait;
67 struct armada_frame_work *frame_work;
68
69 spinlock_t irq_lock;
70 uint32_t irq_ena;
71 struct list_head vbl_list;
72};
73#define drm_to_armada_crtc(c) container_of(c, struct armada_crtc, crtc)
74
75int armada_drm_crtc_create(struct drm_device *, unsigned, struct resource *);
76void armada_drm_crtc_gamma_set(struct drm_crtc *, u16, u16, u16, int);
77void armada_drm_crtc_gamma_get(struct drm_crtc *, u16 *, u16 *, u16 *, int);
78void armada_drm_crtc_irq(struct armada_crtc *, u32);
79void armada_drm_crtc_disable_irq(struct armada_crtc *, u32);
80void armada_drm_crtc_enable_irq(struct armada_crtc *, u32);
81void armada_drm_crtc_update_regs(struct armada_crtc *, struct armada_regs *);
82
83#endif
diff --git a/drivers/gpu/drm/armada/armada_debugfs.c b/drivers/gpu/drm/armada/armada_debugfs.c
new file mode 100644
index 000000000000..471e45627f1e
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_debugfs.c
@@ -0,0 +1,177 @@
1/*
2 * Copyright (C) 2012 Russell King
3 * Rewritten from the dovefb driver, and Armada510 manuals.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9#include <linux/ctype.h>
10#include <linux/debugfs.h>
11#include <linux/module.h>
12#include <linux/seq_file.h>
13#include <drm/drmP.h>
14#include "armada_crtc.h"
15#include "armada_drm.h"
16
17static int armada_debugfs_gem_linear_show(struct seq_file *m, void *data)
18{
19 struct drm_info_node *node = m->private;
20 struct drm_device *dev = node->minor->dev;
21 struct armada_private *priv = dev->dev_private;
22 int ret;
23
24 mutex_lock(&dev->struct_mutex);
25 ret = drm_mm_dump_table(m, &priv->linear);
26 mutex_unlock(&dev->struct_mutex);
27
28 return ret;
29}
30
31static int armada_debugfs_reg_show(struct seq_file *m, void *data)
32{
33 struct drm_device *dev = m->private;
34 struct armada_private *priv = dev->dev_private;
35 int n, i;
36
37 if (priv) {
38 for (n = 0; n < ARRAY_SIZE(priv->dcrtc); n++) {
39 struct armada_crtc *dcrtc = priv->dcrtc[n];
40 if (!dcrtc)
41 continue;
42
43 for (i = 0x84; i <= 0x1c4; i += 4) {
44 uint32_t v = readl_relaxed(dcrtc->base + i);
45 seq_printf(m, "%u: 0x%04x: 0x%08x\n", n, i, v);
46 }
47 }
48 }
49
50 return 0;
51}
52
53static int armada_debugfs_reg_r_open(struct inode *inode, struct file *file)
54{
55 return single_open(file, armada_debugfs_reg_show, inode->i_private);
56}
57
58static const struct file_operations fops_reg_r = {
59 .owner = THIS_MODULE,
60 .open = armada_debugfs_reg_r_open,
61 .read = seq_read,
62 .llseek = seq_lseek,
63 .release = single_release,
64};
65
66static int armada_debugfs_write(struct file *file, const char __user *ptr,
67 size_t len, loff_t *off)
68{
69 struct drm_device *dev = file->private_data;
70 struct armada_private *priv = dev->dev_private;
71 struct armada_crtc *dcrtc = priv->dcrtc[0];
72 char buf[32], *p;
73 uint32_t reg, val;
74 int ret;
75
76 if (*off != 0)
77 return 0;
78
79 if (len > sizeof(buf) - 1)
80 len = sizeof(buf) - 1;
81
82 ret = strncpy_from_user(buf, ptr, len);
83 if (ret < 0)
84 return ret;
85 buf[len] = '\0';
86
87 reg = simple_strtoul(buf, &p, 16);
88 if (!isspace(*p))
89 return -EINVAL;
90 val = simple_strtoul(p + 1, NULL, 16);
91
92 if (reg >= 0x84 && reg <= 0x1c4)
93 writel(val, dcrtc->base + reg);
94
95 return len;
96}
97
98static const struct file_operations fops_reg_w = {
99 .owner = THIS_MODULE,
100 .open = simple_open,
101 .write = armada_debugfs_write,
102 .llseek = noop_llseek,
103};
104
105static struct drm_info_list armada_debugfs_list[] = {
106 { "gem_linear", armada_debugfs_gem_linear_show, 0 },
107};
108#define ARMADA_DEBUGFS_ENTRIES ARRAY_SIZE(armada_debugfs_list)
109
110static int drm_add_fake_info_node(struct drm_minor *minor, struct dentry *ent,
111 const void *key)
112{
113 struct drm_info_node *node;
114
115 node = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL);
116 if (node == NULL) {
117 debugfs_remove(ent);
118 return -ENOMEM;
119 }
120
121 node->minor = minor;
122 node->dent = ent;
123 node->info_ent = (void *) key;
124
125 mutex_lock(&minor->debugfs_lock);
126 list_add(&node->list, &minor->debugfs_list);
127 mutex_unlock(&minor->debugfs_lock);
128
129 return 0;
130}
131
132static int armada_debugfs_create(struct dentry *root, struct drm_minor *minor,
133 const char *name, umode_t mode, const struct file_operations *fops)
134{
135 struct dentry *de;
136
137 de = debugfs_create_file(name, mode, root, minor->dev, fops);
138
139 return drm_add_fake_info_node(minor, de, fops);
140}
141
142int armada_drm_debugfs_init(struct drm_minor *minor)
143{
144 int ret;
145
146 ret = drm_debugfs_create_files(armada_debugfs_list,
147 ARMADA_DEBUGFS_ENTRIES,
148 minor->debugfs_root, minor);
149 if (ret)
150 return ret;
151
152 ret = armada_debugfs_create(minor->debugfs_root, minor,
153 "reg", S_IFREG | S_IRUSR, &fops_reg_r);
154 if (ret)
155 goto err_1;
156
157 ret = armada_debugfs_create(minor->debugfs_root, minor,
158 "reg_wr", S_IFREG | S_IWUSR, &fops_reg_w);
159 if (ret)
160 goto err_2;
161 return ret;
162
163 err_2:
164 drm_debugfs_remove_files((struct drm_info_list *)&fops_reg_r, 1, minor);
165 err_1:
166 drm_debugfs_remove_files(armada_debugfs_list, ARMADA_DEBUGFS_ENTRIES,
167 minor);
168 return ret;
169}
170
171void armada_drm_debugfs_cleanup(struct drm_minor *minor)
172{
173 drm_debugfs_remove_files((struct drm_info_list *)&fops_reg_w, 1, minor);
174 drm_debugfs_remove_files((struct drm_info_list *)&fops_reg_r, 1, minor);
175 drm_debugfs_remove_files(armada_debugfs_list, ARMADA_DEBUGFS_ENTRIES,
176 minor);
177}
diff --git a/drivers/gpu/drm/armada/armada_drm.h b/drivers/gpu/drm/armada/armada_drm.h
new file mode 100644
index 000000000000..eef09ec9a5ff
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_drm.h
@@ -0,0 +1,113 @@
1/*
2 * Copyright (C) 2012 Russell King
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8#ifndef ARMADA_DRM_H
9#define ARMADA_DRM_H
10
11#include <linux/kfifo.h>
12#include <linux/io.h>
13#include <linux/workqueue.h>
14#include <drm/drmP.h>
15
16struct armada_crtc;
17struct armada_gem_object;
18struct clk;
19struct drm_fb_helper;
20
21static inline void
22armada_updatel(uint32_t val, uint32_t mask, void __iomem *ptr)
23{
24 uint32_t ov, v;
25
26 ov = v = readl_relaxed(ptr);
27 v = (v & ~mask) | val;
28 if (ov != v)
29 writel_relaxed(v, ptr);
30}
31
32static inline uint32_t armada_pitch(uint32_t width, uint32_t bpp)
33{
34 uint32_t pitch = bpp != 4 ? width * ((bpp + 7) / 8) : width / 2;
35
36 /* 88AP510 spec recommends pitch be a multiple of 128 */
37 return ALIGN(pitch, 128);
38}
39
40struct armada_vbl_event {
41 struct list_head node;
42 void *data;
43 void (*fn)(struct armada_crtc *, void *);
44};
45void armada_drm_vbl_event_add(struct armada_crtc *,
46 struct armada_vbl_event *);
47void armada_drm_vbl_event_remove(struct armada_crtc *,
48 struct armada_vbl_event *);
49void armada_drm_vbl_event_remove_unlocked(struct armada_crtc *,
50 struct armada_vbl_event *);
51#define armada_drm_vbl_event_init(_e, _f, _d) do { \
52 struct armada_vbl_event *__e = _e; \
53 INIT_LIST_HEAD(&__e->node); \
54 __e->data = _d; \
55 __e->fn = _f; \
56} while (0)
57
58
59struct armada_private;
60
61struct armada_variant {
62 bool has_spu_adv_reg;
63 uint32_t spu_adv_reg;
64 int (*init)(struct armada_private *, struct device *);
65 int (*crtc_init)(struct armada_crtc *);
66 int (*crtc_compute_clock)(struct armada_crtc *,
67 const struct drm_display_mode *,
68 uint32_t *);
69};
70
71/* Variant ops */
72extern const struct armada_variant armada510_ops;
73
74struct armada_private {
75 const struct armada_variant *variant;
76 struct work_struct fb_unref_work;
77 DECLARE_KFIFO(fb_unref, struct drm_framebuffer *, 8);
78 struct drm_fb_helper *fbdev;
79 struct armada_crtc *dcrtc[2];
80 struct drm_mm linear;
81 struct clk *extclk[2];
82 struct drm_property *csc_yuv_prop;
83 struct drm_property *csc_rgb_prop;
84 struct drm_property *colorkey_prop;
85 struct drm_property *colorkey_min_prop;
86 struct drm_property *colorkey_max_prop;
87 struct drm_property *colorkey_val_prop;
88 struct drm_property *colorkey_alpha_prop;
89 struct drm_property *colorkey_mode_prop;
90 struct drm_property *brightness_prop;
91 struct drm_property *contrast_prop;
92 struct drm_property *saturation_prop;
93#ifdef CONFIG_DEBUG_FS
94 struct dentry *de;
95#endif
96};
97
98void __armada_drm_queue_unref_work(struct drm_device *,
99 struct drm_framebuffer *);
100void armada_drm_queue_unref_work(struct drm_device *,
101 struct drm_framebuffer *);
102
103extern const struct drm_mode_config_funcs armada_drm_mode_config_funcs;
104
105int armada_fbdev_init(struct drm_device *);
106void armada_fbdev_fini(struct drm_device *);
107
108int armada_overlay_plane_create(struct drm_device *, unsigned long);
109
110int armada_drm_debugfs_init(struct drm_minor *);
111void armada_drm_debugfs_cleanup(struct drm_minor *);
112
113#endif
diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c
new file mode 100644
index 000000000000..4f2b28354915
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_drv.c
@@ -0,0 +1,421 @@
1/*
2 * Copyright (C) 2012 Russell King
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8#include <linux/clk.h>
9#include <linux/module.h>
10#include <drm/drmP.h>
11#include <drm/drm_crtc_helper.h>
12#include "armada_crtc.h"
13#include "armada_drm.h"
14#include "armada_gem.h"
15#include "armada_hw.h"
16#include <drm/armada_drm.h>
17#include "armada_ioctlP.h"
18
19#ifdef CONFIG_DRM_ARMADA_TDA1998X
20#include <drm/i2c/tda998x.h>
21#include "armada_slave.h"
22
23static struct tda998x_encoder_params params = {
24 /* With 0x24, there is no translation between vp_out and int_vp
25 FB LCD out Pins VIP Int Vp
26 R:23:16 R:7:0 VPC7:0 7:0 7:0[R]
27 G:15:8 G:15:8 VPB7:0 23:16 23:16[G]
28 B:7:0 B:23:16 VPA7:0 15:8 15:8[B]
29 */
30 .swap_a = 2,
31 .swap_b = 3,
32 .swap_c = 4,
33 .swap_d = 5,
34 .swap_e = 0,
35 .swap_f = 1,
36 .audio_cfg = BIT(2),
37 .audio_frame[1] = 1,
38 .audio_format = AFMT_SPDIF,
39 .audio_sample_rate = 44100,
40};
41
42static const struct armada_drm_slave_config tda19988_config = {
43 .i2c_adapter_id = 0,
44 .crtcs = 1 << 0, /* Only LCD0 at the moment */
45 .polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT,
46 .interlace_allowed = true,
47 .info = {
48 .type = "tda998x",
49 .addr = 0x70,
50 .platform_data = &params,
51 },
52};
53#endif
54
55static void armada_drm_unref_work(struct work_struct *work)
56{
57 struct armada_private *priv =
58 container_of(work, struct armada_private, fb_unref_work);
59 struct drm_framebuffer *fb;
60
61 while (kfifo_get(&priv->fb_unref, &fb))
62 drm_framebuffer_unreference(fb);
63}
64
65/* Must be called with dev->event_lock held */
66void __armada_drm_queue_unref_work(struct drm_device *dev,
67 struct drm_framebuffer *fb)
68{
69 struct armada_private *priv = dev->dev_private;
70
71 /*
72 * Yes, we really must jump through these hoops just to store a
73 * _pointer_ to something into the kfifo. This is utterly insane
74 * and idiotic, because it kfifo requires the _data_ pointed to by
75 * the pointer const, not the pointer itself. Not only that, but
76 * you have to pass a pointer _to_ the pointer you want stored.
77 */
78 const struct drm_framebuffer *silly_api_alert = fb;
79 WARN_ON(!kfifo_put(&priv->fb_unref, &silly_api_alert));
80 schedule_work(&priv->fb_unref_work);
81}
82
83void armada_drm_queue_unref_work(struct drm_device *dev,
84 struct drm_framebuffer *fb)
85{
86 unsigned long flags;
87
88 spin_lock_irqsave(&dev->event_lock, flags);
89 __armada_drm_queue_unref_work(dev, fb);
90 spin_unlock_irqrestore(&dev->event_lock, flags);
91}
92
93static int armada_drm_load(struct drm_device *dev, unsigned long flags)
94{
95 const struct platform_device_id *id;
96 struct armada_private *priv;
97 struct resource *res[ARRAY_SIZE(priv->dcrtc)];
98 struct resource *mem = NULL;
99 int ret, n, i;
100
101 memset(res, 0, sizeof(res));
102
103 for (n = i = 0; ; n++) {
104 struct resource *r = platform_get_resource(dev->platformdev,
105 IORESOURCE_MEM, n);
106 if (!r)
107 break;
108
109 /* Resources above 64K are graphics memory */
110 if (resource_size(r) > SZ_64K)
111 mem = r;
112 else if (i < ARRAY_SIZE(priv->dcrtc))
113 res[i++] = r;
114 else
115 return -EINVAL;
116 }
117
118 if (!res[0] || !mem)
119 return -ENXIO;
120
121 if (!devm_request_mem_region(dev->dev, mem->start,
122 resource_size(mem), "armada-drm"))
123 return -EBUSY;
124
125 priv = devm_kzalloc(dev->dev, sizeof(*priv), GFP_KERNEL);
126 if (!priv) {
127 DRM_ERROR("failed to allocate private\n");
128 return -ENOMEM;
129 }
130
131 dev->dev_private = priv;
132
133 /* Get the implementation specific driver data. */
134 id = platform_get_device_id(dev->platformdev);
135 if (!id)
136 return -ENXIO;
137
138 priv->variant = (struct armada_variant *)id->driver_data;
139
140 ret = priv->variant->init(priv, dev->dev);
141 if (ret)
142 return ret;
143
144 INIT_WORK(&priv->fb_unref_work, armada_drm_unref_work);
145 INIT_KFIFO(priv->fb_unref);
146
147 /* Mode setting support */
148 drm_mode_config_init(dev);
149 dev->mode_config.min_width = 320;
150 dev->mode_config.min_height = 200;
151
152 /*
153 * With vscale enabled, the maximum width is 1920 due to the
154 * 1920 by 3 lines RAM
155 */
156 dev->mode_config.max_width = 1920;
157 dev->mode_config.max_height = 2048;
158
159 dev->mode_config.preferred_depth = 24;
160 dev->mode_config.funcs = &armada_drm_mode_config_funcs;
161 drm_mm_init(&priv->linear, mem->start, resource_size(mem));
162
163 /* Create all LCD controllers */
164 for (n = 0; n < ARRAY_SIZE(priv->dcrtc); n++) {
165 if (!res[n])
166 break;
167
168 ret = armada_drm_crtc_create(dev, n, res[n]);
169 if (ret)
170 goto err_kms;
171 }
172
173#ifdef CONFIG_DRM_ARMADA_TDA1998X
174 ret = armada_drm_connector_slave_create(dev, &tda19988_config);
175 if (ret)
176 goto err_kms;
177#endif
178
179 ret = drm_vblank_init(dev, n);
180 if (ret)
181 goto err_kms;
182
183 ret = drm_irq_install(dev);
184 if (ret)
185 goto err_kms;
186
187 dev->vblank_disable_allowed = 1;
188
189 ret = armada_fbdev_init(dev);
190 if (ret)
191 goto err_irq;
192
193 drm_kms_helper_poll_init(dev);
194
195 return 0;
196
197 err_irq:
198 drm_irq_uninstall(dev);
199 err_kms:
200 drm_mode_config_cleanup(dev);
201 drm_mm_takedown(&priv->linear);
202 flush_work(&priv->fb_unref_work);
203
204 return ret;
205}
206
207static int armada_drm_unload(struct drm_device *dev)
208{
209 struct armada_private *priv = dev->dev_private;
210
211 drm_kms_helper_poll_fini(dev);
212 armada_fbdev_fini(dev);
213 drm_irq_uninstall(dev);
214 drm_mode_config_cleanup(dev);
215 drm_mm_takedown(&priv->linear);
216 flush_work(&priv->fb_unref_work);
217 dev->dev_private = NULL;
218
219 return 0;
220}
221
222void armada_drm_vbl_event_add(struct armada_crtc *dcrtc,
223 struct armada_vbl_event *evt)
224{
225 unsigned long flags;
226
227 spin_lock_irqsave(&dcrtc->irq_lock, flags);
228 if (list_empty(&evt->node)) {
229 list_add_tail(&evt->node, &dcrtc->vbl_list);
230
231 drm_vblank_get(dcrtc->crtc.dev, dcrtc->num);
232 }
233 spin_unlock_irqrestore(&dcrtc->irq_lock, flags);
234}
235
236void armada_drm_vbl_event_remove(struct armada_crtc *dcrtc,
237 struct armada_vbl_event *evt)
238{
239 if (!list_empty(&evt->node)) {
240 list_del_init(&evt->node);
241 drm_vblank_put(dcrtc->crtc.dev, dcrtc->num);
242 }
243}
244
245void armada_drm_vbl_event_remove_unlocked(struct armada_crtc *dcrtc,
246 struct armada_vbl_event *evt)
247{
248 unsigned long flags;
249
250 spin_lock_irqsave(&dcrtc->irq_lock, flags);
251 armada_drm_vbl_event_remove(dcrtc, evt);
252 spin_unlock_irqrestore(&dcrtc->irq_lock, flags);
253}
254
255/* These are called under the vbl_lock. */
256static int armada_drm_enable_vblank(struct drm_device *dev, int crtc)
257{
258 struct armada_private *priv = dev->dev_private;
259 armada_drm_crtc_enable_irq(priv->dcrtc[crtc], VSYNC_IRQ_ENA);
260 return 0;
261}
262
263static void armada_drm_disable_vblank(struct drm_device *dev, int crtc)
264{
265 struct armada_private *priv = dev->dev_private;
266 armada_drm_crtc_disable_irq(priv->dcrtc[crtc], VSYNC_IRQ_ENA);
267}
268
269static irqreturn_t armada_drm_irq_handler(int irq, void *arg)
270{
271 struct drm_device *dev = arg;
272 struct armada_private *priv = dev->dev_private;
273 struct armada_crtc *dcrtc = priv->dcrtc[0];
274 uint32_t v, stat = readl_relaxed(dcrtc->base + LCD_SPU_IRQ_ISR);
275 irqreturn_t handled = IRQ_NONE;
276
277 /*
278 * This is rediculous - rather than writing bits to clear, we
279 * have to set the actual status register value. This is racy.
280 */
281 writel_relaxed(0, dcrtc->base + LCD_SPU_IRQ_ISR);
282
283 /* Mask out those interrupts we haven't enabled */
284 v = stat & dcrtc->irq_ena;
285
286 if (v & (VSYNC_IRQ|GRA_FRAME_IRQ|DUMB_FRAMEDONE)) {
287 armada_drm_crtc_irq(dcrtc, stat);
288 handled = IRQ_HANDLED;
289 }
290
291 return handled;
292}
293
294static int armada_drm_irq_postinstall(struct drm_device *dev)
295{
296 struct armada_private *priv = dev->dev_private;
297 struct armada_crtc *dcrtc = priv->dcrtc[0];
298
299 spin_lock_irq(&dev->vbl_lock);
300 writel_relaxed(dcrtc->irq_ena, dcrtc->base + LCD_SPU_IRQ_ENA);
301 writel(0, dcrtc->base + LCD_SPU_IRQ_ISR);
302 spin_unlock_irq(&dev->vbl_lock);
303
304 return 0;
305}
306
307static void armada_drm_irq_uninstall(struct drm_device *dev)
308{
309 struct armada_private *priv = dev->dev_private;
310 struct armada_crtc *dcrtc = priv->dcrtc[0];
311
312 writel(0, dcrtc->base + LCD_SPU_IRQ_ENA);
313}
314
315static struct drm_ioctl_desc armada_ioctls[] = {
316 DRM_IOCTL_DEF_DRV(ARMADA_GEM_CREATE, armada_gem_create_ioctl,
317 DRM_UNLOCKED),
318 DRM_IOCTL_DEF_DRV(ARMADA_GEM_MMAP, armada_gem_mmap_ioctl,
319 DRM_UNLOCKED),
320 DRM_IOCTL_DEF_DRV(ARMADA_GEM_PWRITE, armada_gem_pwrite_ioctl,
321 DRM_UNLOCKED),
322};
323
324static const struct file_operations armada_drm_fops = {
325 .owner = THIS_MODULE,
326 .llseek = no_llseek,
327 .read = drm_read,
328 .poll = drm_poll,
329 .unlocked_ioctl = drm_ioctl,
330 .mmap = drm_gem_mmap,
331 .open = drm_open,
332 .release = drm_release,
333};
334
335static struct drm_driver armada_drm_driver = {
336 .load = armada_drm_load,
337 .open = NULL,
338 .preclose = NULL,
339 .postclose = NULL,
340 .lastclose = NULL,
341 .unload = armada_drm_unload,
342 .get_vblank_counter = drm_vblank_count,
343 .enable_vblank = armada_drm_enable_vblank,
344 .disable_vblank = armada_drm_disable_vblank,
345 .irq_handler = armada_drm_irq_handler,
346 .irq_postinstall = armada_drm_irq_postinstall,
347 .irq_uninstall = armada_drm_irq_uninstall,
348#ifdef CONFIG_DEBUG_FS
349 .debugfs_init = armada_drm_debugfs_init,
350 .debugfs_cleanup = armada_drm_debugfs_cleanup,
351#endif
352 .gem_free_object = armada_gem_free_object,
353 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
354 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
355 .gem_prime_export = armada_gem_prime_export,
356 .gem_prime_import = armada_gem_prime_import,
357 .dumb_create = armada_gem_dumb_create,
358 .dumb_map_offset = armada_gem_dumb_map_offset,
359 .dumb_destroy = armada_gem_dumb_destroy,
360 .gem_vm_ops = &armada_gem_vm_ops,
361 .major = 1,
362 .minor = 0,
363 .name = "armada-drm",
364 .desc = "Armada SoC DRM",
365 .date = "20120730",
366 .driver_features = DRIVER_GEM | DRIVER_MODESET |
367 DRIVER_HAVE_IRQ | DRIVER_PRIME,
368 .ioctls = armada_ioctls,
369 .fops = &armada_drm_fops,
370};
371
372static int armada_drm_probe(struct platform_device *pdev)
373{
374 return drm_platform_init(&armada_drm_driver, pdev);
375}
376
377static int armada_drm_remove(struct platform_device *pdev)
378{
379 drm_platform_exit(&armada_drm_driver, pdev);
380 return 0;
381}
382
383static const struct platform_device_id armada_drm_platform_ids[] = {
384 {
385 .name = "armada-drm",
386 .driver_data = (unsigned long)&armada510_ops,
387 }, {
388 .name = "armada-510-drm",
389 .driver_data = (unsigned long)&armada510_ops,
390 },
391 { },
392};
393MODULE_DEVICE_TABLE(platform, armada_drm_platform_ids);
394
395static struct platform_driver armada_drm_platform_driver = {
396 .probe = armada_drm_probe,
397 .remove = armada_drm_remove,
398 .driver = {
399 .name = "armada-drm",
400 .owner = THIS_MODULE,
401 },
402 .id_table = armada_drm_platform_ids,
403};
404
405static int __init armada_drm_init(void)
406{
407 armada_drm_driver.num_ioctls = DRM_ARRAY_SIZE(armada_ioctls);
408 return platform_driver_register(&armada_drm_platform_driver);
409}
410module_init(armada_drm_init);
411
412static void __exit armada_drm_exit(void)
413{
414 platform_driver_unregister(&armada_drm_platform_driver);
415}
416module_exit(armada_drm_exit);
417
418MODULE_AUTHOR("Russell King <rmk+kernel@arm.linux.org.uk>");
419MODULE_DESCRIPTION("Armada DRM Driver");
420MODULE_LICENSE("GPL");
421MODULE_ALIAS("platform:armada-drm");
diff --git a/drivers/gpu/drm/armada/armada_fb.c b/drivers/gpu/drm/armada/armada_fb.c
new file mode 100644
index 000000000000..1c90969def3e
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_fb.c
@@ -0,0 +1,170 @@
1/*
2 * Copyright (C) 2012 Russell King
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8#include <drm/drmP.h>
9#include <drm/drm_crtc_helper.h>
10#include <drm/drm_fb_helper.h>
11#include "armada_drm.h"
12#include "armada_fb.h"
13#include "armada_gem.h"
14#include "armada_hw.h"
15
16static void armada_fb_destroy(struct drm_framebuffer *fb)
17{
18 struct armada_framebuffer *dfb = drm_fb_to_armada_fb(fb);
19
20 drm_framebuffer_cleanup(&dfb->fb);
21 drm_gem_object_unreference_unlocked(&dfb->obj->obj);
22 kfree(dfb);
23}
24
25static int armada_fb_create_handle(struct drm_framebuffer *fb,
26 struct drm_file *dfile, unsigned int *handle)
27{
28 struct armada_framebuffer *dfb = drm_fb_to_armada_fb(fb);
29 return drm_gem_handle_create(dfile, &dfb->obj->obj, handle);
30}
31
32static const struct drm_framebuffer_funcs armada_fb_funcs = {
33 .destroy = armada_fb_destroy,
34 .create_handle = armada_fb_create_handle,
35};
36
37struct armada_framebuffer *armada_framebuffer_create(struct drm_device *dev,
38 struct drm_mode_fb_cmd2 *mode, struct armada_gem_object *obj)
39{
40 struct armada_framebuffer *dfb;
41 uint8_t format, config;
42 int ret;
43
44 switch (mode->pixel_format) {
45#define FMT(drm, fmt, mod) \
46 case DRM_FORMAT_##drm: \
47 format = CFG_##fmt; \
48 config = mod; \
49 break
50 FMT(RGB565, 565, CFG_SWAPRB);
51 FMT(BGR565, 565, 0);
52 FMT(ARGB1555, 1555, CFG_SWAPRB);
53 FMT(ABGR1555, 1555, 0);
54 FMT(RGB888, 888PACK, CFG_SWAPRB);
55 FMT(BGR888, 888PACK, 0);
56 FMT(XRGB8888, X888, CFG_SWAPRB);
57 FMT(XBGR8888, X888, 0);
58 FMT(ARGB8888, 8888, CFG_SWAPRB);
59 FMT(ABGR8888, 8888, 0);
60 FMT(YUYV, 422PACK, CFG_YUV2RGB | CFG_SWAPYU | CFG_SWAPUV);
61 FMT(UYVY, 422PACK, CFG_YUV2RGB);
62 FMT(VYUY, 422PACK, CFG_YUV2RGB | CFG_SWAPUV);
63 FMT(YVYU, 422PACK, CFG_YUV2RGB | CFG_SWAPYU);
64 FMT(YUV422, 422, CFG_YUV2RGB);
65 FMT(YVU422, 422, CFG_YUV2RGB | CFG_SWAPUV);
66 FMT(YUV420, 420, CFG_YUV2RGB);
67 FMT(YVU420, 420, CFG_YUV2RGB | CFG_SWAPUV);
68 FMT(C8, PSEUDO8, 0);
69#undef FMT
70 default:
71 return ERR_PTR(-EINVAL);
72 }
73
74 dfb = kzalloc(sizeof(*dfb), GFP_KERNEL);
75 if (!dfb) {
76 DRM_ERROR("failed to allocate Armada fb object\n");
77 return ERR_PTR(-ENOMEM);
78 }
79
80 dfb->fmt = format;
81 dfb->mod = config;
82 dfb->obj = obj;
83
84 drm_helper_mode_fill_fb_struct(&dfb->fb, mode);
85
86 ret = drm_framebuffer_init(dev, &dfb->fb, &armada_fb_funcs);
87 if (ret) {
88 kfree(dfb);
89 return ERR_PTR(ret);
90 }
91
92 /*
93 * Take a reference on our object as we're successful - the
94 * caller already holds a reference, which keeps us safe for
95 * the above call, but the caller will drop their reference
96 * to it. Hence we need to take our own reference.
97 */
98 drm_gem_object_reference(&obj->obj);
99
100 return dfb;
101}
102
103static struct drm_framebuffer *armada_fb_create(struct drm_device *dev,
104 struct drm_file *dfile, struct drm_mode_fb_cmd2 *mode)
105{
106 struct armada_gem_object *obj;
107 struct armada_framebuffer *dfb;
108 int ret;
109
110 DRM_DEBUG_DRIVER("w%u h%u pf%08x f%u p%u,%u,%u\n",
111 mode->width, mode->height, mode->pixel_format,
112 mode->flags, mode->pitches[0], mode->pitches[1],
113 mode->pitches[2]);
114
115 /* We can only handle a single plane at the moment */
116 if (drm_format_num_planes(mode->pixel_format) > 1 &&
117 (mode->handles[0] != mode->handles[1] ||
118 mode->handles[0] != mode->handles[2])) {
119 ret = -EINVAL;
120 goto err;
121 }
122
123 obj = armada_gem_object_lookup(dev, dfile, mode->handles[0]);
124 if (!obj) {
125 ret = -ENOENT;
126 goto err;
127 }
128
129 if (obj->obj.import_attach && !obj->sgt) {
130 ret = armada_gem_map_import(obj);
131 if (ret)
132 goto err_unref;
133 }
134
135 /* Framebuffer objects must have a valid device address for scanout */
136 if (obj->dev_addr == DMA_ERROR_CODE) {
137 ret = -EINVAL;
138 goto err_unref;
139 }
140
141 dfb = armada_framebuffer_create(dev, mode, obj);
142 if (IS_ERR(dfb)) {
143 ret = PTR_ERR(dfb);
144 goto err;
145 }
146
147 drm_gem_object_unreference_unlocked(&obj->obj);
148
149 return &dfb->fb;
150
151 err_unref:
152 drm_gem_object_unreference_unlocked(&obj->obj);
153 err:
154 DRM_ERROR("failed to initialize framebuffer: %d\n", ret);
155 return ERR_PTR(ret);
156}
157
158static void armada_output_poll_changed(struct drm_device *dev)
159{
160 struct armada_private *priv = dev->dev_private;
161 struct drm_fb_helper *fbh = priv->fbdev;
162
163 if (fbh)
164 drm_fb_helper_hotplug_event(fbh);
165}
166
167const struct drm_mode_config_funcs armada_drm_mode_config_funcs = {
168 .fb_create = armada_fb_create,
169 .output_poll_changed = armada_output_poll_changed,
170};
diff --git a/drivers/gpu/drm/armada/armada_fb.h b/drivers/gpu/drm/armada/armada_fb.h
new file mode 100644
index 000000000000..ce3f12ebfc53
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_fb.h
@@ -0,0 +1,24 @@
1/*
2 * Copyright (C) 2012 Russell King
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8#ifndef ARMADA_FB_H
9#define ARMADA_FB_H
10
11struct armada_framebuffer {
12 struct drm_framebuffer fb;
13 struct armada_gem_object *obj;
14 uint8_t fmt;
15 uint8_t mod;
16};
17#define drm_fb_to_armada_fb(dfb) \
18 container_of(dfb, struct armada_framebuffer, fb)
19#define drm_fb_obj(fb) drm_fb_to_armada_fb(fb)->obj
20
21struct armada_framebuffer *armada_framebuffer_create(struct drm_device *,
22 struct drm_mode_fb_cmd2 *, struct armada_gem_object *);
23
24#endif
diff --git a/drivers/gpu/drm/armada/armada_fbdev.c b/drivers/gpu/drm/armada/armada_fbdev.c
new file mode 100644
index 000000000000..dd5ea77dac96
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_fbdev.c
@@ -0,0 +1,202 @@
1/*
2 * Copyright (C) 2012 Russell King
3 * Written from the i915 driver.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9#include <linux/errno.h>
10#include <linux/fb.h>
11#include <linux/kernel.h>
12#include <linux/module.h>
13
14#include <drm/drmP.h>
15#include <drm/drm_fb_helper.h>
16#include "armada_crtc.h"
17#include "armada_drm.h"
18#include "armada_fb.h"
19#include "armada_gem.h"
20
21static /*const*/ struct fb_ops armada_fb_ops = {
22 .owner = THIS_MODULE,
23 .fb_check_var = drm_fb_helper_check_var,
24 .fb_set_par = drm_fb_helper_set_par,
25 .fb_fillrect = cfb_fillrect,
26 .fb_copyarea = cfb_copyarea,
27 .fb_imageblit = cfb_imageblit,
28 .fb_pan_display = drm_fb_helper_pan_display,
29 .fb_blank = drm_fb_helper_blank,
30 .fb_setcmap = drm_fb_helper_setcmap,
31 .fb_debug_enter = drm_fb_helper_debug_enter,
32 .fb_debug_leave = drm_fb_helper_debug_leave,
33};
34
35static int armada_fb_create(struct drm_fb_helper *fbh,
36 struct drm_fb_helper_surface_size *sizes)
37{
38 struct drm_device *dev = fbh->dev;
39 struct drm_mode_fb_cmd2 mode;
40 struct armada_framebuffer *dfb;
41 struct armada_gem_object *obj;
42 struct fb_info *info;
43 int size, ret;
44 void *ptr;
45
46 memset(&mode, 0, sizeof(mode));
47 mode.width = sizes->surface_width;
48 mode.height = sizes->surface_height;
49 mode.pitches[0] = armada_pitch(mode.width, sizes->surface_bpp);
50 mode.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
51 sizes->surface_depth);
52
53 size = mode.pitches[0] * mode.height;
54 obj = armada_gem_alloc_private_object(dev, size);
55 if (!obj) {
56 DRM_ERROR("failed to allocate fb memory\n");
57 return -ENOMEM;
58 }
59
60 ret = armada_gem_linear_back(dev, obj);
61 if (ret) {
62 drm_gem_object_unreference_unlocked(&obj->obj);
63 return ret;
64 }
65
66 ptr = armada_gem_map_object(dev, obj);
67 if (!ptr) {
68 drm_gem_object_unreference_unlocked(&obj->obj);
69 return -ENOMEM;
70 }
71
72 dfb = armada_framebuffer_create(dev, &mode, obj);
73
74 /*
75 * A reference is now held by the framebuffer object if
76 * successful, otherwise this drops the ref for the error path.
77 */
78 drm_gem_object_unreference_unlocked(&obj->obj);
79
80 if (IS_ERR(dfb))
81 return PTR_ERR(dfb);
82
83 info = framebuffer_alloc(0, dev->dev);
84 if (!info) {
85 ret = -ENOMEM;
86 goto err_fballoc;
87 }
88
89 ret = fb_alloc_cmap(&info->cmap, 256, 0);
90 if (ret) {
91 ret = -ENOMEM;
92 goto err_fbcmap;
93 }
94
95 strlcpy(info->fix.id, "armada-drmfb", sizeof(info->fix.id));
96 info->par = fbh;
97 info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
98 info->fbops = &armada_fb_ops;
99 info->fix.smem_start = obj->phys_addr;
100 info->fix.smem_len = obj->obj.size;
101 info->screen_size = obj->obj.size;
102 info->screen_base = ptr;
103 fbh->fb = &dfb->fb;
104 fbh->fbdev = info;
105 drm_fb_helper_fill_fix(info, dfb->fb.pitches[0], dfb->fb.depth);
106 drm_fb_helper_fill_var(info, fbh, sizes->fb_width, sizes->fb_height);
107
108 DRM_DEBUG_KMS("allocated %dx%d %dbpp fb: 0x%08x\n",
109 dfb->fb.width, dfb->fb.height,
110 dfb->fb.bits_per_pixel, obj->phys_addr);
111
112 return 0;
113
114 err_fbcmap:
115 framebuffer_release(info);
116 err_fballoc:
117 dfb->fb.funcs->destroy(&dfb->fb);
118 return ret;
119}
120
121static int armada_fb_probe(struct drm_fb_helper *fbh,
122 struct drm_fb_helper_surface_size *sizes)
123{
124 int ret = 0;
125
126 if (!fbh->fb) {
127 ret = armada_fb_create(fbh, sizes);
128 if (ret == 0)
129 ret = 1;
130 }
131 return ret;
132}
133
134static struct drm_fb_helper_funcs armada_fb_helper_funcs = {
135 .gamma_set = armada_drm_crtc_gamma_set,
136 .gamma_get = armada_drm_crtc_gamma_get,
137 .fb_probe = armada_fb_probe,
138};
139
140int armada_fbdev_init(struct drm_device *dev)
141{
142 struct armada_private *priv = dev->dev_private;
143 struct drm_fb_helper *fbh;
144 int ret;
145
146 fbh = devm_kzalloc(dev->dev, sizeof(*fbh), GFP_KERNEL);
147 if (!fbh)
148 return -ENOMEM;
149
150 priv->fbdev = fbh;
151
152 fbh->funcs = &armada_fb_helper_funcs;
153
154 ret = drm_fb_helper_init(dev, fbh, 1, 1);
155 if (ret) {
156 DRM_ERROR("failed to initialize drm fb helper\n");
157 goto err_fb_helper;
158 }
159
160 ret = drm_fb_helper_single_add_all_connectors(fbh);
161 if (ret) {
162 DRM_ERROR("failed to add fb connectors\n");
163 goto err_fb_setup;
164 }
165
166 ret = drm_fb_helper_initial_config(fbh, 32);
167 if (ret) {
168 DRM_ERROR("failed to set initial config\n");
169 goto err_fb_setup;
170 }
171
172 return 0;
173 err_fb_setup:
174 drm_fb_helper_fini(fbh);
175 err_fb_helper:
176 priv->fbdev = NULL;
177 return ret;
178}
179
180void armada_fbdev_fini(struct drm_device *dev)
181{
182 struct armada_private *priv = dev->dev_private;
183 struct drm_fb_helper *fbh = priv->fbdev;
184
185 if (fbh) {
186 struct fb_info *info = fbh->fbdev;
187
188 if (info) {
189 unregister_framebuffer(info);
190 if (info->cmap.len)
191 fb_dealloc_cmap(&info->cmap);
192 framebuffer_release(info);
193 }
194
195 if (fbh->fb)
196 fbh->fb->funcs->destroy(fbh->fb);
197
198 drm_fb_helper_fini(fbh);
199
200 priv->fbdev = NULL;
201 }
202}
diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c
new file mode 100644
index 000000000000..9f2356bae7fd
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_gem.c
@@ -0,0 +1,611 @@
1/*
2 * Copyright (C) 2012 Russell King
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8#include <linux/dma-buf.h>
9#include <linux/dma-mapping.h>
10#include <linux/shmem_fs.h>
11#include <drm/drmP.h>
12#include "armada_drm.h"
13#include "armada_gem.h"
14#include <drm/armada_drm.h>
15#include "armada_ioctlP.h"
16
17static int armada_gem_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
18{
19 struct armada_gem_object *obj = drm_to_armada_gem(vma->vm_private_data);
20 unsigned long addr = (unsigned long)vmf->virtual_address;
21 unsigned long pfn = obj->phys_addr >> PAGE_SHIFT;
22 int ret;
23
24 pfn += (addr - vma->vm_start) >> PAGE_SHIFT;
25 ret = vm_insert_pfn(vma, addr, pfn);
26
27 switch (ret) {
28 case 0:
29 case -EBUSY:
30 return VM_FAULT_NOPAGE;
31 case -ENOMEM:
32 return VM_FAULT_OOM;
33 default:
34 return VM_FAULT_SIGBUS;
35 }
36}
37
38const struct vm_operations_struct armada_gem_vm_ops = {
39 .fault = armada_gem_vm_fault,
40 .open = drm_gem_vm_open,
41 .close = drm_gem_vm_close,
42};
43
44static size_t roundup_gem_size(size_t size)
45{
46 return roundup(size, PAGE_SIZE);
47}
48
49/* dev->struct_mutex is held here */
50void armada_gem_free_object(struct drm_gem_object *obj)
51{
52 struct armada_gem_object *dobj = drm_to_armada_gem(obj);
53
54 DRM_DEBUG_DRIVER("release obj %p\n", dobj);
55
56 drm_gem_free_mmap_offset(&dobj->obj);
57
58 if (dobj->page) {
59 /* page backed memory */
60 unsigned int order = get_order(dobj->obj.size);
61 __free_pages(dobj->page, order);
62 } else if (dobj->linear) {
63 /* linear backed memory */
64 drm_mm_remove_node(dobj->linear);
65 kfree(dobj->linear);
66 if (dobj->addr)
67 iounmap(dobj->addr);
68 }
69
70 if (dobj->obj.import_attach) {
71 /* We only ever display imported data */
72 dma_buf_unmap_attachment(dobj->obj.import_attach, dobj->sgt,
73 DMA_TO_DEVICE);
74 drm_prime_gem_destroy(&dobj->obj, NULL);
75 }
76
77 drm_gem_object_release(&dobj->obj);
78
79 kfree(dobj);
80}
81
82int
83armada_gem_linear_back(struct drm_device *dev, struct armada_gem_object *obj)
84{
85 struct armada_private *priv = dev->dev_private;
86 size_t size = obj->obj.size;
87
88 if (obj->page || obj->linear)
89 return 0;
90
91 /*
92 * If it is a small allocation (typically cursor, which will
93 * be 32x64 or 64x32 ARGB pixels) try to get it from the system.
94 * Framebuffers will never be this small (our minimum size for
95 * framebuffers is larger than this anyway.) Such objects are
96 * only accessed by the CPU so we don't need any special handing
97 * here.
98 */
99 if (size <= 8192) {
100 unsigned int order = get_order(size);
101 struct page *p = alloc_pages(GFP_KERNEL, order);
102
103 if (p) {
104 obj->addr = page_address(p);
105 obj->phys_addr = page_to_phys(p);
106 obj->page = p;
107
108 memset(obj->addr, 0, PAGE_ALIGN(size));
109 }
110 }
111
112 /*
113 * We could grab something from CMA if it's enabled, but that
114 * involves building in a problem:
115 *
116 * CMA's interface uses dma_alloc_coherent(), which provides us
117 * with an CPU virtual address and a device address.
118 *
119 * The CPU virtual address may be either an address in the kernel
120 * direct mapped region (for example, as it would be on x86) or
121 * it may be remapped into another part of kernel memory space
122 * (eg, as it would be on ARM.) This means virt_to_phys() on the
123 * returned virtual address is invalid depending on the architecture
124 * implementation.
125 *
126 * The device address may also not be a physical address; it may
127 * be that there is some kind of remapping between the device and
128 * system RAM, which makes the use of the device address also
129 * unsafe to re-use as a physical address.
130 *
131 * This makes DRM usage of dma_alloc_coherent() in a generic way
132 * at best very questionable and unsafe.
133 */
134
135 /* Otherwise, grab it from our linear allocation */
136 if (!obj->page) {
137 struct drm_mm_node *node;
138 unsigned align = min_t(unsigned, size, SZ_2M);
139 void __iomem *ptr;
140 int ret;
141
142 node = kzalloc(sizeof(*node), GFP_KERNEL);
143 if (!node)
144 return -ENOSPC;
145
146 mutex_lock(&dev->struct_mutex);
147 ret = drm_mm_insert_node(&priv->linear, node, size, align,
148 DRM_MM_SEARCH_DEFAULT);
149 mutex_unlock(&dev->struct_mutex);
150 if (ret) {
151 kfree(node);
152 return ret;
153 }
154
155 obj->linear = node;
156
157 /* Ensure that the memory we're returning is cleared. */
158 ptr = ioremap_wc(obj->linear->start, size);
159 if (!ptr) {
160 mutex_lock(&dev->struct_mutex);
161 drm_mm_remove_node(obj->linear);
162 mutex_unlock(&dev->struct_mutex);
163 kfree(obj->linear);
164 obj->linear = NULL;
165 return -ENOMEM;
166 }
167
168 memset_io(ptr, 0, size);
169 iounmap(ptr);
170
171 obj->phys_addr = obj->linear->start;
172 obj->dev_addr = obj->linear->start;
173 }
174
175 DRM_DEBUG_DRIVER("obj %p phys %#x dev %#x\n",
176 obj, obj->phys_addr, obj->dev_addr);
177
178 return 0;
179}
180
181void *
182armada_gem_map_object(struct drm_device *dev, struct armada_gem_object *dobj)
183{
184 /* only linear objects need to be ioremap'd */
185 if (!dobj->addr && dobj->linear)
186 dobj->addr = ioremap_wc(dobj->phys_addr, dobj->obj.size);
187 return dobj->addr;
188}
189
190struct armada_gem_object *
191armada_gem_alloc_private_object(struct drm_device *dev, size_t size)
192{
193 struct armada_gem_object *obj;
194
195 size = roundup_gem_size(size);
196
197 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
198 if (!obj)
199 return NULL;
200
201 drm_gem_private_object_init(dev, &obj->obj, size);
202 obj->dev_addr = DMA_ERROR_CODE;
203
204 DRM_DEBUG_DRIVER("alloc private obj %p size %zu\n", obj, size);
205
206 return obj;
207}
208
209struct armada_gem_object *armada_gem_alloc_object(struct drm_device *dev,
210 size_t size)
211{
212 struct armada_gem_object *obj;
213 struct address_space *mapping;
214
215 size = roundup_gem_size(size);
216
217 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
218 if (!obj)
219 return NULL;
220
221 if (drm_gem_object_init(dev, &obj->obj, size)) {
222 kfree(obj);
223 return NULL;
224 }
225
226 obj->dev_addr = DMA_ERROR_CODE;
227
228 mapping = obj->obj.filp->f_path.dentry->d_inode->i_mapping;
229 mapping_set_gfp_mask(mapping, GFP_HIGHUSER | __GFP_RECLAIMABLE);
230
231 DRM_DEBUG_DRIVER("alloc obj %p size %zu\n", obj, size);
232
233 return obj;
234}
235
236/* Dumb alloc support */
237int armada_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
238 struct drm_mode_create_dumb *args)
239{
240 struct armada_gem_object *dobj;
241 u32 handle;
242 size_t size;
243 int ret;
244
245 args->pitch = armada_pitch(args->width, args->bpp);
246 args->size = size = args->pitch * args->height;
247
248 dobj = armada_gem_alloc_private_object(dev, size);
249 if (dobj == NULL)
250 return -ENOMEM;
251
252 ret = armada_gem_linear_back(dev, dobj);
253 if (ret)
254 goto err;
255
256 ret = drm_gem_handle_create(file, &dobj->obj, &handle);
257 if (ret)
258 goto err;
259
260 args->handle = handle;
261
262 /* drop reference from allocate - handle holds it now */
263 DRM_DEBUG_DRIVER("obj %p size %zu handle %#x\n", dobj, size, handle);
264 err:
265 drm_gem_object_unreference_unlocked(&dobj->obj);
266 return ret;
267}
268
269int armada_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
270 uint32_t handle, uint64_t *offset)
271{
272 struct armada_gem_object *obj;
273 int ret = 0;
274
275 mutex_lock(&dev->struct_mutex);
276 obj = armada_gem_object_lookup(dev, file, handle);
277 if (!obj) {
278 DRM_ERROR("failed to lookup gem object\n");
279 ret = -EINVAL;
280 goto err_unlock;
281 }
282
283 /* Don't allow imported objects to be mapped */
284 if (obj->obj.import_attach) {
285 ret = -EINVAL;
286 goto err_unlock;
287 }
288
289 ret = drm_gem_create_mmap_offset(&obj->obj);
290 if (ret == 0) {
291 *offset = drm_vma_node_offset_addr(&obj->obj.vma_node);
292 DRM_DEBUG_DRIVER("handle %#x offset %llx\n", handle, *offset);
293 }
294
295 drm_gem_object_unreference(&obj->obj);
296 err_unlock:
297 mutex_unlock(&dev->struct_mutex);
298
299 return ret;
300}
301
302int armada_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
303 uint32_t handle)
304{
305 return drm_gem_handle_delete(file, handle);
306}
307
308/* Private driver gem ioctls */
309int armada_gem_create_ioctl(struct drm_device *dev, void *data,
310 struct drm_file *file)
311{
312 struct drm_armada_gem_create *args = data;
313 struct armada_gem_object *dobj;
314 size_t size;
315 u32 handle;
316 int ret;
317
318 if (args->size == 0)
319 return -ENOMEM;
320
321 size = args->size;
322
323 dobj = armada_gem_alloc_object(dev, size);
324 if (dobj == NULL)
325 return -ENOMEM;
326
327 ret = drm_gem_handle_create(file, &dobj->obj, &handle);
328 if (ret)
329 goto err;
330
331 args->handle = handle;
332
333 /* drop reference from allocate - handle holds it now */
334 DRM_DEBUG_DRIVER("obj %p size %zu handle %#x\n", dobj, size, handle);
335 err:
336 drm_gem_object_unreference_unlocked(&dobj->obj);
337 return ret;
338}
339
340/* Map a shmem-backed object into process memory space */
341int armada_gem_mmap_ioctl(struct drm_device *dev, void *data,
342 struct drm_file *file)
343{
344 struct drm_armada_gem_mmap *args = data;
345 struct armada_gem_object *dobj;
346 unsigned long addr;
347
348 dobj = armada_gem_object_lookup(dev, file, args->handle);
349 if (dobj == NULL)
350 return -ENOENT;
351
352 if (!dobj->obj.filp) {
353 drm_gem_object_unreference(&dobj->obj);
354 return -EINVAL;
355 }
356
357 addr = vm_mmap(dobj->obj.filp, 0, args->size, PROT_READ | PROT_WRITE,
358 MAP_SHARED, args->offset);
359 drm_gem_object_unreference(&dobj->obj);
360 if (IS_ERR_VALUE(addr))
361 return addr;
362
363 args->addr = addr;
364
365 return 0;
366}
367
368int armada_gem_pwrite_ioctl(struct drm_device *dev, void *data,
369 struct drm_file *file)
370{
371 struct drm_armada_gem_pwrite *args = data;
372 struct armada_gem_object *dobj;
373 char __user *ptr;
374 int ret;
375
376 DRM_DEBUG_DRIVER("handle %u off %u size %u ptr 0x%llx\n",
377 args->handle, args->offset, args->size, args->ptr);
378
379 if (args->size == 0)
380 return 0;
381
382 ptr = (char __user *)(uintptr_t)args->ptr;
383
384 if (!access_ok(VERIFY_READ, ptr, args->size))
385 return -EFAULT;
386
387 ret = fault_in_multipages_readable(ptr, args->size);
388 if (ret)
389 return ret;
390
391 dobj = armada_gem_object_lookup(dev, file, args->handle);
392 if (dobj == NULL)
393 return -ENOENT;
394
395 /* Must be a kernel-mapped object */
396 if (!dobj->addr)
397 return -EINVAL;
398
399 if (args->offset > dobj->obj.size ||
400 args->size > dobj->obj.size - args->offset) {
401 DRM_ERROR("invalid size: object size %u\n", dobj->obj.size);
402 ret = -EINVAL;
403 goto unref;
404 }
405
406 if (copy_from_user(dobj->addr + args->offset, ptr, args->size)) {
407 ret = -EFAULT;
408 } else if (dobj->update) {
409 dobj->update(dobj->update_data);
410 ret = 0;
411 }
412
413 unref:
414 drm_gem_object_unreference_unlocked(&dobj->obj);
415 return ret;
416}
417
418/* Prime support */
419struct sg_table *
420armada_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
421 enum dma_data_direction dir)
422{
423 struct drm_gem_object *obj = attach->dmabuf->priv;
424 struct armada_gem_object *dobj = drm_to_armada_gem(obj);
425 struct scatterlist *sg;
426 struct sg_table *sgt;
427 int i, num;
428
429 sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
430 if (!sgt)
431 return NULL;
432
433 if (dobj->obj.filp) {
434 struct address_space *mapping;
435 gfp_t gfp;
436 int count;
437
438 count = dobj->obj.size / PAGE_SIZE;
439 if (sg_alloc_table(sgt, count, GFP_KERNEL))
440 goto free_sgt;
441
442 mapping = file_inode(dobj->obj.filp)->i_mapping;
443 gfp = mapping_gfp_mask(mapping);
444
445 for_each_sg(sgt->sgl, sg, count, i) {
446 struct page *page;
447
448 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
449 if (IS_ERR(page)) {
450 num = i;
451 goto release;
452 }
453
454 sg_set_page(sg, page, PAGE_SIZE, 0);
455 }
456
457 if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0) {
458 num = sgt->nents;
459 goto release;
460 }
461 } else if (dobj->page) {
462 /* Single contiguous page */
463 if (sg_alloc_table(sgt, 1, GFP_KERNEL))
464 goto free_sgt;
465
466 sg_set_page(sgt->sgl, dobj->page, dobj->obj.size, 0);
467
468 if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0)
469 goto free_table;
470 } else if (dobj->linear) {
471 /* Single contiguous physical region - no struct page */
472 if (sg_alloc_table(sgt, 1, GFP_KERNEL))
473 goto free_sgt;
474 sg_dma_address(sgt->sgl) = dobj->dev_addr;
475 sg_dma_len(sgt->sgl) = dobj->obj.size;
476 } else {
477 goto free_sgt;
478 }
479 return sgt;
480
481 release:
482 for_each_sg(sgt->sgl, sg, num, i)
483 page_cache_release(sg_page(sg));
484 free_table:
485 sg_free_table(sgt);
486 free_sgt:
487 kfree(sgt);
488 return NULL;
489}
490
491static void armada_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
492 struct sg_table *sgt, enum dma_data_direction dir)
493{
494 struct drm_gem_object *obj = attach->dmabuf->priv;
495 struct armada_gem_object *dobj = drm_to_armada_gem(obj);
496 int i;
497
498 if (!dobj->linear)
499 dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
500
501 if (dobj->obj.filp) {
502 struct scatterlist *sg;
503 for_each_sg(sgt->sgl, sg, sgt->nents, i)
504 page_cache_release(sg_page(sg));
505 }
506
507 sg_free_table(sgt);
508 kfree(sgt);
509}
510
511static void *armada_gem_dmabuf_no_kmap(struct dma_buf *buf, unsigned long n)
512{
513 return NULL;
514}
515
516static void
517armada_gem_dmabuf_no_kunmap(struct dma_buf *buf, unsigned long n, void *addr)
518{
519}
520
521static int
522armada_gem_dmabuf_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
523{
524 return -EINVAL;
525}
526
527static const struct dma_buf_ops armada_gem_prime_dmabuf_ops = {
528 .map_dma_buf = armada_gem_prime_map_dma_buf,
529 .unmap_dma_buf = armada_gem_prime_unmap_dma_buf,
530 .release = drm_gem_dmabuf_release,
531 .kmap_atomic = armada_gem_dmabuf_no_kmap,
532 .kunmap_atomic = armada_gem_dmabuf_no_kunmap,
533 .kmap = armada_gem_dmabuf_no_kmap,
534 .kunmap = armada_gem_dmabuf_no_kunmap,
535 .mmap = armada_gem_dmabuf_mmap,
536};
537
538struct dma_buf *
539armada_gem_prime_export(struct drm_device *dev, struct drm_gem_object *obj,
540 int flags)
541{
542 return dma_buf_export(obj, &armada_gem_prime_dmabuf_ops, obj->size,
543 O_RDWR);
544}
545
546struct drm_gem_object *
547armada_gem_prime_import(struct drm_device *dev, struct dma_buf *buf)
548{
549 struct dma_buf_attachment *attach;
550 struct armada_gem_object *dobj;
551
552 if (buf->ops == &armada_gem_prime_dmabuf_ops) {
553 struct drm_gem_object *obj = buf->priv;
554 if (obj->dev == dev) {
555 /*
556 * Importing our own dmabuf(s) increases the
557 * refcount on the gem object itself.
558 */
559 drm_gem_object_reference(obj);
560 dma_buf_put(buf);
561 return obj;
562 }
563 }
564
565 attach = dma_buf_attach(buf, dev->dev);
566 if (IS_ERR(attach))
567 return ERR_CAST(attach);
568
569 dobj = armada_gem_alloc_private_object(dev, buf->size);
570 if (!dobj) {
571 dma_buf_detach(buf, attach);
572 return ERR_PTR(-ENOMEM);
573 }
574
575 dobj->obj.import_attach = attach;
576
577 /*
578 * Don't call dma_buf_map_attachment() here - it maps the
579 * scatterlist immediately for DMA, and this is not always
580 * an appropriate thing to do.
581 */
582 return &dobj->obj;
583}
584
585int armada_gem_map_import(struct armada_gem_object *dobj)
586{
587 int ret;
588
589 dobj->sgt = dma_buf_map_attachment(dobj->obj.import_attach,
590 DMA_TO_DEVICE);
591 if (!dobj->sgt) {
592 DRM_ERROR("dma_buf_map_attachment() returned NULL\n");
593 return -EINVAL;
594 }
595 if (IS_ERR(dobj->sgt)) {
596 ret = PTR_ERR(dobj->sgt);
597 dobj->sgt = NULL;
598 DRM_ERROR("dma_buf_map_attachment() error: %d\n", ret);
599 return ret;
600 }
601 if (dobj->sgt->nents > 1) {
602 DRM_ERROR("dma_buf_map_attachment() returned an (unsupported) scattered list\n");
603 return -EINVAL;
604 }
605 if (sg_dma_len(dobj->sgt->sgl) < dobj->obj.size) {
606 DRM_ERROR("dma_buf_map_attachment() returned a small buffer\n");
607 return -EINVAL;
608 }
609 dobj->dev_addr = sg_dma_address(dobj->sgt->sgl);
610 return 0;
611}
diff --git a/drivers/gpu/drm/armada/armada_gem.h b/drivers/gpu/drm/armada/armada_gem.h
new file mode 100644
index 000000000000..00b6cd461a03
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_gem.h
@@ -0,0 +1,52 @@
1/*
2 * Copyright (C) 2012 Russell King
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8#ifndef ARMADA_GEM_H
9#define ARMADA_GEM_H
10
11/* GEM */
12struct armada_gem_object {
13 struct drm_gem_object obj;
14 void *addr;
15 phys_addr_t phys_addr;
16 resource_size_t dev_addr;
17 struct drm_mm_node *linear; /* for linear backed */
18 struct page *page; /* for page backed */
19 struct sg_table *sgt; /* for imported */
20 void (*update)(void *);
21 void *update_data;
22};
23
24extern const struct vm_operations_struct armada_gem_vm_ops;
25
26#define drm_to_armada_gem(o) container_of(o, struct armada_gem_object, obj)
27
28void armada_gem_free_object(struct drm_gem_object *);
29int armada_gem_linear_back(struct drm_device *, struct armada_gem_object *);
30void *armada_gem_map_object(struct drm_device *, struct armada_gem_object *);
31struct armada_gem_object *armada_gem_alloc_private_object(struct drm_device *,
32 size_t);
33int armada_gem_dumb_create(struct drm_file *, struct drm_device *,
34 struct drm_mode_create_dumb *);
35int armada_gem_dumb_map_offset(struct drm_file *, struct drm_device *,
36 uint32_t, uint64_t *);
37int armada_gem_dumb_destroy(struct drm_file *, struct drm_device *,
38 uint32_t);
39struct dma_buf *armada_gem_prime_export(struct drm_device *dev,
40 struct drm_gem_object *obj, int flags);
41struct drm_gem_object *armada_gem_prime_import(struct drm_device *,
42 struct dma_buf *);
43int armada_gem_map_import(struct armada_gem_object *);
44
45static inline struct armada_gem_object *armada_gem_object_lookup(
46 struct drm_device *dev, struct drm_file *dfile, unsigned handle)
47{
48 struct drm_gem_object *obj = drm_gem_object_lookup(dev, dfile, handle);
49
50 return obj ? drm_to_armada_gem(obj) : NULL;
51}
52#endif
diff --git a/drivers/gpu/drm/armada/armada_hw.h b/drivers/gpu/drm/armada/armada_hw.h
new file mode 100644
index 000000000000..27319a8335e2
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_hw.h
@@ -0,0 +1,318 @@
1/*
2 * Copyright (C) 2012 Russell King
3 * Rewritten from the dovefb driver, and Armada510 manuals.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9#ifndef ARMADA_HW_H
10#define ARMADA_HW_H
11
12/*
13 * Note: the following registers are written from IRQ context:
14 * LCD_SPU_V_PORCH, LCD_SPU_ADV_REG, LCD_SPUT_V_H_TOTAL
15 * LCD_SPU_DMA_START_ADDR_[YUV][01], LCD_SPU_DMA_PITCH_YC,
16 * LCD_SPU_DMA_PITCH_UV, LCD_SPU_DMA_OVSA_HPXL_VLN,
17 * LCD_SPU_DMA_HPXL_VLN, LCD_SPU_DZM_HPXL_VLN, LCD_SPU_DMA_CTRL0
18 */
19enum {
20 LCD_SPU_ADV_REG = 0x0084, /* Armada 510 */
21 LCD_SPU_DMA_START_ADDR_Y0 = 0x00c0,
22 LCD_SPU_DMA_START_ADDR_U0 = 0x00c4,
23 LCD_SPU_DMA_START_ADDR_V0 = 0x00c8,
24 LCD_CFG_DMA_START_ADDR_0 = 0x00cc,
25 LCD_SPU_DMA_START_ADDR_Y1 = 0x00d0,
26 LCD_SPU_DMA_START_ADDR_U1 = 0x00d4,
27 LCD_SPU_DMA_START_ADDR_V1 = 0x00d8,
28 LCD_CFG_DMA_START_ADDR_1 = 0x00dc,
29 LCD_SPU_DMA_PITCH_YC = 0x00e0,
30 LCD_SPU_DMA_PITCH_UV = 0x00e4,
31 LCD_SPU_DMA_OVSA_HPXL_VLN = 0x00e8,
32 LCD_SPU_DMA_HPXL_VLN = 0x00ec,
33 LCD_SPU_DZM_HPXL_VLN = 0x00f0,
34 LCD_CFG_GRA_START_ADDR0 = 0x00f4,
35 LCD_CFG_GRA_START_ADDR1 = 0x00f8,
36 LCD_CFG_GRA_PITCH = 0x00fc,
37 LCD_SPU_GRA_OVSA_HPXL_VLN = 0x0100,
38 LCD_SPU_GRA_HPXL_VLN = 0x0104,
39 LCD_SPU_GZM_HPXL_VLN = 0x0108,
40 LCD_SPU_HWC_OVSA_HPXL_VLN = 0x010c,
41 LCD_SPU_HWC_HPXL_VLN = 0x0110,
42 LCD_SPUT_V_H_TOTAL = 0x0114,
43 LCD_SPU_V_H_ACTIVE = 0x0118,
44 LCD_SPU_H_PORCH = 0x011c,
45 LCD_SPU_V_PORCH = 0x0120,
46 LCD_SPU_BLANKCOLOR = 0x0124,
47 LCD_SPU_ALPHA_COLOR1 = 0x0128,
48 LCD_SPU_ALPHA_COLOR2 = 0x012c,
49 LCD_SPU_COLORKEY_Y = 0x0130,
50 LCD_SPU_COLORKEY_U = 0x0134,
51 LCD_SPU_COLORKEY_V = 0x0138,
52 LCD_CFG_RDREG4F = 0x013c, /* Armada 510 */
53 LCD_SPU_SPI_RXDATA = 0x0140,
54 LCD_SPU_ISA_RXDATA = 0x0144,
55 LCD_SPU_HWC_RDDAT = 0x0158,
56 LCD_SPU_GAMMA_RDDAT = 0x015c,
57 LCD_SPU_PALETTE_RDDAT = 0x0160,
58 LCD_SPU_IOPAD_IN = 0x0178,
59 LCD_CFG_RDREG5F = 0x017c,
60 LCD_SPU_SPI_CTRL = 0x0180,
61 LCD_SPU_SPI_TXDATA = 0x0184,
62 LCD_SPU_SMPN_CTRL = 0x0188,
63 LCD_SPU_DMA_CTRL0 = 0x0190,
64 LCD_SPU_DMA_CTRL1 = 0x0194,
65 LCD_SPU_SRAM_CTRL = 0x0198,
66 LCD_SPU_SRAM_WRDAT = 0x019c,
67 LCD_SPU_SRAM_PARA0 = 0x01a0, /* Armada 510 */
68 LCD_SPU_SRAM_PARA1 = 0x01a4,
69 LCD_CFG_SCLK_DIV = 0x01a8,
70 LCD_SPU_CONTRAST = 0x01ac,
71 LCD_SPU_SATURATION = 0x01b0,
72 LCD_SPU_CBSH_HUE = 0x01b4,
73 LCD_SPU_DUMB_CTRL = 0x01b8,
74 LCD_SPU_IOPAD_CONTROL = 0x01bc,
75 LCD_SPU_IRQ_ENA = 0x01c0,
76 LCD_SPU_IRQ_ISR = 0x01c4,
77};
78
79/* For LCD_SPU_ADV_REG */
80enum {
81 ADV_VSYNC_L_OFF = 0xfff << 20,
82 ADV_GRACOLORKEY = 1 << 19,
83 ADV_VIDCOLORKEY = 1 << 18,
84 ADV_HWC32BLEND = 1 << 15,
85 ADV_HWC32ARGB = 1 << 14,
86 ADV_HWC32ENABLE = 1 << 13,
87 ADV_VSYNCOFFEN = 1 << 12,
88 ADV_VSYNC_H_OFF = 0xfff << 0,
89};
90
91enum {
92 CFG_565 = 0,
93 CFG_1555 = 1,
94 CFG_888PACK = 2,
95 CFG_X888 = 3,
96 CFG_8888 = 4,
97 CFG_422PACK = 5,
98 CFG_422 = 6,
99 CFG_420 = 7,
100 CFG_PSEUDO4 = 9,
101 CFG_PSEUDO8 = 10,
102 CFG_SWAPRB = 1 << 4,
103 CFG_SWAPUV = 1 << 3,
104 CFG_SWAPYU = 1 << 2,
105 CFG_YUV2RGB = 1 << 1,
106};
107
108/* For LCD_SPU_DMA_CTRL0 */
109enum {
110 CFG_NOBLENDING = 1 << 31,
111 CFG_GAMMA_ENA = 1 << 30,
112 CFG_CBSH_ENA = 1 << 29,
113 CFG_PALETTE_ENA = 1 << 28,
114 CFG_ARBFAST_ENA = 1 << 27,
115 CFG_HWC_1BITMOD = 1 << 26,
116 CFG_HWC_1BITENA = 1 << 25,
117 CFG_HWC_ENA = 1 << 24,
118 CFG_DMAFORMAT = 0xf << 20,
119#define CFG_DMA_FMT(x) ((x) << 20)
120 CFG_GRAFORMAT = 0xf << 16,
121#define CFG_GRA_FMT(x) ((x) << 16)
122#define CFG_GRA_MOD(x) ((x) << 8)
123 CFG_GRA_FTOGGLE = 1 << 15,
124 CFG_GRA_HSMOOTH = 1 << 14,
125 CFG_GRA_TSTMODE = 1 << 13,
126 CFG_GRA_ENA = 1 << 8,
127#define CFG_DMA_MOD(x) ((x) << 0)
128 CFG_DMA_FTOGGLE = 1 << 7,
129 CFG_DMA_HSMOOTH = 1 << 6,
130 CFG_DMA_TSTMODE = 1 << 5,
131 CFG_DMA_ENA = 1 << 0,
132};
133
134enum {
135 CKMODE_DISABLE = 0,
136 CKMODE_Y = 1,
137 CKMODE_U = 2,
138 CKMODE_RGB = 3,
139 CKMODE_V = 4,
140 CKMODE_R = 5,
141 CKMODE_G = 6,
142 CKMODE_B = 7,
143};
144
145/* For LCD_SPU_DMA_CTRL1 */
146enum {
147 CFG_FRAME_TRIG = 1 << 31,
148 CFG_VSYNC_INV = 1 << 27,
149 CFG_CKMODE_MASK = 0x7 << 24,
150#define CFG_CKMODE(x) ((x) << 24)
151 CFG_CARRY = 1 << 23,
152 CFG_GATED_CLK = 1 << 21,
153 CFG_PWRDN_ENA = 1 << 20,
154 CFG_DSCALE_MASK = 0x3 << 18,
155 CFG_DSCALE_NONE = 0x0 << 18,
156 CFG_DSCALE_HALF = 0x1 << 18,
157 CFG_DSCALE_QUAR = 0x2 << 18,
158 CFG_ALPHAM_MASK = 0x3 << 16,
159 CFG_ALPHAM_VIDEO = 0x0 << 16,
160 CFG_ALPHAM_GRA = 0x1 << 16,
161 CFG_ALPHAM_CFG = 0x2 << 16,
162 CFG_ALPHA_MASK = 0xff << 8,
163 CFG_PIXCMD_MASK = 0xff,
164};
165
166/* For LCD_SPU_SRAM_CTRL */
167enum {
168 SRAM_READ = 0 << 14,
169 SRAM_WRITE = 2 << 14,
170 SRAM_INIT = 3 << 14,
171 SRAM_HWC32_RAM1 = 0xc << 8,
172 SRAM_HWC32_RAM2 = 0xd << 8,
173 SRAM_HWC32_RAMR = SRAM_HWC32_RAM1,
174 SRAM_HWC32_RAMG = SRAM_HWC32_RAM2,
175 SRAM_HWC32_RAMB = 0xe << 8,
176 SRAM_HWC32_TRAN = 0xf << 8,
177 SRAM_HWC = 0xf << 8,
178};
179
180/* For LCD_SPU_SRAM_PARA1 */
181enum {
182 CFG_CSB_256x32 = 1 << 15, /* cursor */
183 CFG_CSB_256x24 = 1 << 14, /* palette */
184 CFG_CSB_256x8 = 1 << 13, /* gamma */
185 CFG_PDWN1920x32 = 1 << 8, /* Armada 510: power down vscale ram */
186 CFG_PDWN256x32 = 1 << 7, /* power down cursor */
187 CFG_PDWN256x24 = 1 << 6, /* power down palette */
188 CFG_PDWN256x8 = 1 << 5, /* power down gamma */
189 CFG_PDWNHWC = 1 << 4, /* Armada 510: power down all hwc ram */
190 CFG_PDWN32x32 = 1 << 3, /* power down slave->smart ram */
191 CFG_PDWN16x66 = 1 << 2, /* power down UV fifo */
192 CFG_PDWN32x66 = 1 << 1, /* power down Y fifo */
193 CFG_PDWN64x66 = 1 << 0, /* power down graphic fifo */
194};
195
196/* For LCD_CFG_SCLK_DIV */
197enum {
198 /* Armada 510 */
199 SCLK_510_AXI = 0x0 << 30,
200 SCLK_510_EXTCLK0 = 0x1 << 30,
201 SCLK_510_PLL = 0x2 << 30,
202 SCLK_510_EXTCLK1 = 0x3 << 30,
203 SCLK_510_DIV_CHANGE = 1 << 29,
204 SCLK_510_FRAC_DIV_MASK = 0xfff << 16,
205 SCLK_510_INT_DIV_MASK = 0xffff << 0,
206
207 /* Armada 16x */
208 SCLK_16X_AHB = 0x0 << 28,
209 SCLK_16X_PCLK = 0x1 << 28,
210 SCLK_16X_AXI = 0x4 << 28,
211 SCLK_16X_PLL = 0x8 << 28,
212 SCLK_16X_FRAC_DIV_MASK = 0xfff << 16,
213 SCLK_16X_INT_DIV_MASK = 0xffff << 0,
214};
215
216/* For LCD_SPU_DUMB_CTRL */
217enum {
218 DUMB16_RGB565_0 = 0x0 << 28,
219 DUMB16_RGB565_1 = 0x1 << 28,
220 DUMB18_RGB666_0 = 0x2 << 28,
221 DUMB18_RGB666_1 = 0x3 << 28,
222 DUMB12_RGB444_0 = 0x4 << 28,
223 DUMB12_RGB444_1 = 0x5 << 28,
224 DUMB24_RGB888_0 = 0x6 << 28,
225 DUMB_BLANK = 0x7 << 28,
226 DUMB_MASK = 0xf << 28,
227 CFG_BIAS_OUT = 1 << 8,
228 CFG_REV_RGB = 1 << 7,
229 CFG_INV_CBLANK = 1 << 6,
230 CFG_INV_CSYNC = 1 << 5, /* Normally active high */
231 CFG_INV_HENA = 1 << 4,
232 CFG_INV_VSYNC = 1 << 3, /* Normally active high */
233 CFG_INV_HSYNC = 1 << 2, /* Normally active high */
234 CFG_INV_PCLK = 1 << 1,
235 CFG_DUMB_ENA = 1 << 0,
236};
237
238/* For LCD_SPU_IOPAD_CONTROL */
239enum {
240 CFG_VSCALE_LN_EN = 3 << 18,
241 CFG_GRA_VM_ENA = 1 << 15,
242 CFG_DMA_VM_ENA = 1 << 13,
243 CFG_CMD_VM_ENA = 1 << 11,
244 CFG_CSC_MASK = 3 << 8,
245 CFG_CSC_YUV_CCIR709 = 1 << 9,
246 CFG_CSC_YUV_CCIR601 = 0 << 9,
247 CFG_CSC_RGB_STUDIO = 1 << 8,
248 CFG_CSC_RGB_COMPUTER = 0 << 8,
249 CFG_IOPAD_MASK = 0xf << 0,
250 CFG_IOPAD_DUMB24 = 0x0 << 0,
251 CFG_IOPAD_DUMB18SPI = 0x1 << 0,
252 CFG_IOPAD_DUMB18GPIO = 0x2 << 0,
253 CFG_IOPAD_DUMB16SPI = 0x3 << 0,
254 CFG_IOPAD_DUMB16GPIO = 0x4 << 0,
255 CFG_IOPAD_DUMB12GPIO = 0x5 << 0,
256 CFG_IOPAD_SMART18 = 0x6 << 0,
257 CFG_IOPAD_SMART16 = 0x7 << 0,
258 CFG_IOPAD_SMART8 = 0x8 << 0,
259};
260
261#define IOPAD_DUMB24 0x0
262
263/* For LCD_SPU_IRQ_ENA */
264enum {
265 DMA_FRAME_IRQ0_ENA = 1 << 31,
266 DMA_FRAME_IRQ1_ENA = 1 << 30,
267 DMA_FRAME_IRQ_ENA = DMA_FRAME_IRQ0_ENA | DMA_FRAME_IRQ1_ENA,
268 DMA_FF_UNDERFLOW_ENA = 1 << 29,
269 GRA_FRAME_IRQ0_ENA = 1 << 27,
270 GRA_FRAME_IRQ1_ENA = 1 << 26,
271 GRA_FRAME_IRQ_ENA = GRA_FRAME_IRQ0_ENA | GRA_FRAME_IRQ1_ENA,
272 GRA_FF_UNDERFLOW_ENA = 1 << 25,
273 VSYNC_IRQ_ENA = 1 << 23,
274 DUMB_FRAMEDONE_ENA = 1 << 22,
275 TWC_FRAMEDONE_ENA = 1 << 21,
276 HWC_FRAMEDONE_ENA = 1 << 20,
277 SLV_IRQ_ENA = 1 << 19,
278 SPI_IRQ_ENA = 1 << 18,
279 PWRDN_IRQ_ENA = 1 << 17,
280 ERR_IRQ_ENA = 1 << 16,
281 CLEAN_SPU_IRQ_ISR = 0xffff,
282};
283
284/* For LCD_SPU_IRQ_ISR */
285enum {
286 DMA_FRAME_IRQ0 = 1 << 31,
287 DMA_FRAME_IRQ1 = 1 << 30,
288 DMA_FRAME_IRQ = DMA_FRAME_IRQ0 | DMA_FRAME_IRQ1,
289 DMA_FF_UNDERFLOW = 1 << 29,
290 GRA_FRAME_IRQ0 = 1 << 27,
291 GRA_FRAME_IRQ1 = 1 << 26,
292 GRA_FRAME_IRQ = GRA_FRAME_IRQ0 | GRA_FRAME_IRQ1,
293 GRA_FF_UNDERFLOW = 1 << 25,
294 VSYNC_IRQ = 1 << 23,
295 DUMB_FRAMEDONE = 1 << 22,
296 TWC_FRAMEDONE = 1 << 21,
297 HWC_FRAMEDONE = 1 << 20,
298 SLV_IRQ = 1 << 19,
299 SPI_IRQ = 1 << 18,
300 PWRDN_IRQ = 1 << 17,
301 ERR_IRQ = 1 << 16,
302 DMA_FRAME_IRQ0_LEVEL = 1 << 15,
303 DMA_FRAME_IRQ1_LEVEL = 1 << 14,
304 DMA_FRAME_CNT_ISR = 3 << 12,
305 GRA_FRAME_IRQ0_LEVEL = 1 << 11,
306 GRA_FRAME_IRQ1_LEVEL = 1 << 10,
307 GRA_FRAME_CNT_ISR = 3 << 8,
308 VSYNC_IRQ_LEVEL = 1 << 7,
309 DUMB_FRAMEDONE_LEVEL = 1 << 6,
310 TWC_FRAMEDONE_LEVEL = 1 << 5,
311 HWC_FRAMEDONE_LEVEL = 1 << 4,
312 SLV_FF_EMPTY = 1 << 3,
313 DMA_FF_ALLEMPTY = 1 << 2,
314 GRA_FF_ALLEMPTY = 1 << 1,
315 PWRDN_IRQ_LEVEL = 1 << 0,
316};
317
318#endif
diff --git a/drivers/gpu/drm/armada/armada_ioctlP.h b/drivers/gpu/drm/armada/armada_ioctlP.h
new file mode 100644
index 000000000000..bd8c4562066c
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_ioctlP.h
@@ -0,0 +1,18 @@
1/*
2 * Copyright (C) 2012 Russell King
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8#ifndef ARMADA_IOCTLP_H
9#define ARMADA_IOCTLP_H
10
11#define ARMADA_IOCTL_PROTO(name)\
12extern int armada_##name##_ioctl(struct drm_device *, void *, struct drm_file *)
13
14ARMADA_IOCTL_PROTO(gem_create);
15ARMADA_IOCTL_PROTO(gem_mmap);
16ARMADA_IOCTL_PROTO(gem_pwrite);
17
18#endif
diff --git a/drivers/gpu/drm/armada/armada_output.c b/drivers/gpu/drm/armada/armada_output.c
new file mode 100644
index 000000000000..d685a5421485
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_output.c
@@ -0,0 +1,158 @@
1/*
2 * Copyright (C) 2012 Russell King
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8#include <drm/drmP.h>
9#include <drm/drm_crtc_helper.h>
10#include <drm/drm_edid.h>
11#include <drm/drm_encoder_slave.h>
12#include "armada_output.h"
13#include "armada_drm.h"
14
15struct armada_connector {
16 struct drm_connector conn;
17 const struct armada_output_type *type;
18};
19
20#define drm_to_armada_conn(c) container_of(c, struct armada_connector, conn)
21
22struct drm_encoder *armada_drm_connector_encoder(struct drm_connector *conn)
23{
24 struct drm_encoder *enc = conn->encoder;
25
26 return enc ? enc : drm_encoder_find(conn->dev, conn->encoder_ids[0]);
27}
28
29static enum drm_connector_status armada_drm_connector_detect(
30 struct drm_connector *conn, bool force)
31{
32 struct armada_connector *dconn = drm_to_armada_conn(conn);
33 enum drm_connector_status status = connector_status_disconnected;
34
35 if (dconn->type->detect) {
36 status = dconn->type->detect(conn, force);
37 } else {
38 struct drm_encoder *enc = armada_drm_connector_encoder(conn);
39
40 if (enc)
41 status = encoder_helper_funcs(enc)->detect(enc, conn);
42 }
43
44 return status;
45}
46
47static void armada_drm_connector_destroy(struct drm_connector *conn)
48{
49 struct armada_connector *dconn = drm_to_armada_conn(conn);
50
51 drm_sysfs_connector_remove(conn);
52 drm_connector_cleanup(conn);
53 kfree(dconn);
54}
55
56static int armada_drm_connector_set_property(struct drm_connector *conn,
57 struct drm_property *property, uint64_t value)
58{
59 struct armada_connector *dconn = drm_to_armada_conn(conn);
60
61 if (!dconn->type->set_property)
62 return -EINVAL;
63
64 return dconn->type->set_property(conn, property, value);
65}
66
67static const struct drm_connector_funcs armada_drm_conn_funcs = {
68 .dpms = drm_helper_connector_dpms,
69 .fill_modes = drm_helper_probe_single_connector_modes,
70 .detect = armada_drm_connector_detect,
71 .destroy = armada_drm_connector_destroy,
72 .set_property = armada_drm_connector_set_property,
73};
74
75void armada_drm_encoder_prepare(struct drm_encoder *encoder)
76{
77 encoder_helper_funcs(encoder)->dpms(encoder, DRM_MODE_DPMS_OFF);
78}
79
80void armada_drm_encoder_commit(struct drm_encoder *encoder)
81{
82 encoder_helper_funcs(encoder)->dpms(encoder, DRM_MODE_DPMS_ON);
83}
84
85bool armada_drm_encoder_mode_fixup(struct drm_encoder *encoder,
86 const struct drm_display_mode *mode, struct drm_display_mode *adjusted)
87{
88 return true;
89}
90
91/* Shouldn't this be a generic helper function? */
92int armada_drm_slave_encoder_mode_valid(struct drm_connector *conn,
93 struct drm_display_mode *mode)
94{
95 struct drm_encoder *encoder = armada_drm_connector_encoder(conn);
96 int valid = MODE_BAD;
97
98 if (encoder) {
99 struct drm_encoder_slave *slave = to_encoder_slave(encoder);
100
101 valid = slave->slave_funcs->mode_valid(encoder, mode);
102 }
103 return valid;
104}
105
106int armada_drm_slave_encoder_set_property(struct drm_connector *conn,
107 struct drm_property *property, uint64_t value)
108{
109 struct drm_encoder *encoder = armada_drm_connector_encoder(conn);
110 int rc = -EINVAL;
111
112 if (encoder) {
113 struct drm_encoder_slave *slave = to_encoder_slave(encoder);
114
115 rc = slave->slave_funcs->set_property(encoder, conn, property,
116 value);
117 }
118 return rc;
119}
120
121int armada_output_create(struct drm_device *dev,
122 const struct armada_output_type *type, const void *data)
123{
124 struct armada_connector *dconn;
125 int ret;
126
127 dconn = kzalloc(sizeof(*dconn), GFP_KERNEL);
128 if (!dconn)
129 return -ENOMEM;
130
131 dconn->type = type;
132
133 ret = drm_connector_init(dev, &dconn->conn, &armada_drm_conn_funcs,
134 type->connector_type);
135 if (ret) {
136 DRM_ERROR("unable to init connector\n");
137 goto err_destroy_dconn;
138 }
139
140 ret = type->create(&dconn->conn, data);
141 if (ret)
142 goto err_conn;
143
144 ret = drm_sysfs_connector_add(&dconn->conn);
145 if (ret)
146 goto err_sysfs;
147
148 return 0;
149
150 err_sysfs:
151 if (dconn->conn.encoder)
152 dconn->conn.encoder->funcs->destroy(dconn->conn.encoder);
153 err_conn:
154 drm_connector_cleanup(&dconn->conn);
155 err_destroy_dconn:
156 kfree(dconn);
157 return ret;
158}
diff --git a/drivers/gpu/drm/armada/armada_output.h b/drivers/gpu/drm/armada/armada_output.h
new file mode 100644
index 000000000000..4126d43b5057
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_output.h
@@ -0,0 +1,39 @@
1/*
2 * Copyright (C) 2012 Russell King
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8#ifndef ARMADA_CONNETOR_H
9#define ARMADA_CONNETOR_H
10
11#define encoder_helper_funcs(encoder) \
12 ((struct drm_encoder_helper_funcs *)encoder->helper_private)
13
14struct armada_output_type {
15 int connector_type;
16 enum drm_connector_status (*detect)(struct drm_connector *, bool);
17 int (*create)(struct drm_connector *, const void *);
18 int (*set_property)(struct drm_connector *, struct drm_property *,
19 uint64_t);
20};
21
22struct drm_encoder *armada_drm_connector_encoder(struct drm_connector *conn);
23
24void armada_drm_encoder_prepare(struct drm_encoder *encoder);
25void armada_drm_encoder_commit(struct drm_encoder *encoder);
26
27bool armada_drm_encoder_mode_fixup(struct drm_encoder *encoder,
28 const struct drm_display_mode *mode, struct drm_display_mode *adj);
29
30int armada_drm_slave_encoder_mode_valid(struct drm_connector *conn,
31 struct drm_display_mode *mode);
32
33int armada_drm_slave_encoder_set_property(struct drm_connector *conn,
34 struct drm_property *property, uint64_t value);
35
36int armada_output_create(struct drm_device *dev,
37 const struct armada_output_type *type, const void *data);
38
39#endif
diff --git a/drivers/gpu/drm/armada/armada_overlay.c b/drivers/gpu/drm/armada/armada_overlay.c
new file mode 100644
index 000000000000..c5b06fdb459c
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_overlay.c
@@ -0,0 +1,477 @@
1/*
2 * Copyright (C) 2012 Russell King
3 * Rewritten from the dovefb driver, and Armada510 manuals.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9#include <drm/drmP.h>
10#include "armada_crtc.h"
11#include "armada_drm.h"
12#include "armada_fb.h"
13#include "armada_gem.h"
14#include "armada_hw.h"
15#include <drm/armada_drm.h>
16#include "armada_ioctlP.h"
17
18struct armada_plane_properties {
19 uint32_t colorkey_yr;
20 uint32_t colorkey_ug;
21 uint32_t colorkey_vb;
22#define K2R(val) (((val) >> 0) & 0xff)
23#define K2G(val) (((val) >> 8) & 0xff)
24#define K2B(val) (((val) >> 16) & 0xff)
25 int16_t brightness;
26 uint16_t contrast;
27 uint16_t saturation;
28 uint32_t colorkey_mode;
29};
30
31struct armada_plane {
32 struct drm_plane base;
33 spinlock_t lock;
34 struct drm_framebuffer *old_fb;
35 uint32_t src_hw;
36 uint32_t dst_hw;
37 uint32_t dst_yx;
38 uint32_t ctrl0;
39 struct {
40 struct armada_vbl_event update;
41 struct armada_regs regs[13];
42 wait_queue_head_t wait;
43 } vbl;
44 struct armada_plane_properties prop;
45};
46#define drm_to_armada_plane(p) container_of(p, struct armada_plane, base)
47
48
49static void
50armada_ovl_update_attr(struct armada_plane_properties *prop,
51 struct armada_crtc *dcrtc)
52{
53 writel_relaxed(prop->colorkey_yr, dcrtc->base + LCD_SPU_COLORKEY_Y);
54 writel_relaxed(prop->colorkey_ug, dcrtc->base + LCD_SPU_COLORKEY_U);
55 writel_relaxed(prop->colorkey_vb, dcrtc->base + LCD_SPU_COLORKEY_V);
56
57 writel_relaxed(prop->brightness << 16 | prop->contrast,
58 dcrtc->base + LCD_SPU_CONTRAST);
59 /* Docs say 15:0, but it seems to actually be 31:16 on Armada 510 */
60 writel_relaxed(prop->saturation << 16,
61 dcrtc->base + LCD_SPU_SATURATION);
62 writel_relaxed(0x00002000, dcrtc->base + LCD_SPU_CBSH_HUE);
63
64 spin_lock_irq(&dcrtc->irq_lock);
65 armada_updatel(prop->colorkey_mode | CFG_ALPHAM_GRA,
66 CFG_CKMODE_MASK | CFG_ALPHAM_MASK | CFG_ALPHA_MASK,
67 dcrtc->base + LCD_SPU_DMA_CTRL1);
68
69 armada_updatel(ADV_GRACOLORKEY, 0, dcrtc->base + LCD_SPU_ADV_REG);
70 spin_unlock_irq(&dcrtc->irq_lock);
71}
72
73/* === Plane support === */
74static void armada_plane_vbl(struct armada_crtc *dcrtc, void *data)
75{
76 struct armada_plane *dplane = data;
77 struct drm_framebuffer *fb;
78
79 armada_drm_crtc_update_regs(dcrtc, dplane->vbl.regs);
80
81 spin_lock(&dplane->lock);
82 fb = dplane->old_fb;
83 dplane->old_fb = NULL;
84 spin_unlock(&dplane->lock);
85
86 if (fb)
87 armada_drm_queue_unref_work(dcrtc->crtc.dev, fb);
88}
89
90static unsigned armada_limit(int start, unsigned size, unsigned max)
91{
92 int end = start + size;
93 if (end < 0)
94 return 0;
95 if (start < 0)
96 start = 0;
97 return (unsigned)end > max ? max - start : end - start;
98}
99
100static int
101armada_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
102 struct drm_framebuffer *fb,
103 int crtc_x, int crtc_y, unsigned crtc_w, unsigned crtc_h,
104 uint32_t src_x, uint32_t src_y, uint32_t src_w, uint32_t src_h)
105{
106 struct armada_plane *dplane = drm_to_armada_plane(plane);
107 struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
108 uint32_t val, ctrl0;
109 unsigned idx = 0;
110 int ret;
111
112 crtc_w = armada_limit(crtc_x, crtc_w, dcrtc->crtc.mode.hdisplay);
113 crtc_h = armada_limit(crtc_y, crtc_h, dcrtc->crtc.mode.vdisplay);
114 ctrl0 = CFG_DMA_FMT(drm_fb_to_armada_fb(fb)->fmt) |
115 CFG_DMA_MOD(drm_fb_to_armada_fb(fb)->mod) |
116 CFG_CBSH_ENA | CFG_DMA_HSMOOTH | CFG_DMA_ENA;
117
118 /* Does the position/size result in nothing to display? */
119 if (crtc_w == 0 || crtc_h == 0) {
120 ctrl0 &= ~CFG_DMA_ENA;
121 }
122
123 /*
124 * FIXME: if the starting point is off screen, we need to
125 * adjust src_x, src_y, src_w, src_h appropriately, and
126 * according to the scale.
127 */
128
129 if (!dcrtc->plane) {
130 dcrtc->plane = plane;
131 armada_ovl_update_attr(&dplane->prop, dcrtc);
132 }
133
134 /* FIXME: overlay on an interlaced display */
135 /* Just updating the position/size? */
136 if (plane->fb == fb && dplane->ctrl0 == ctrl0) {
137 val = (src_h & 0xffff0000) | src_w >> 16;
138 dplane->src_hw = val;
139 writel_relaxed(val, dcrtc->base + LCD_SPU_DMA_HPXL_VLN);
140 val = crtc_h << 16 | crtc_w;
141 dplane->dst_hw = val;
142 writel_relaxed(val, dcrtc->base + LCD_SPU_DZM_HPXL_VLN);
143 val = crtc_y << 16 | crtc_x;
144 dplane->dst_yx = val;
145 writel_relaxed(val, dcrtc->base + LCD_SPU_DMA_OVSA_HPXL_VLN);
146 return 0;
147 } else if (~dplane->ctrl0 & ctrl0 & CFG_DMA_ENA) {
148 /* Power up the Y/U/V FIFOs on ENA 0->1 transitions */
149 armada_updatel(0, CFG_PDWN16x66 | CFG_PDWN32x66,
150 dcrtc->base + LCD_SPU_SRAM_PARA1);
151 }
152
153 ret = wait_event_timeout(dplane->vbl.wait,
154 list_empty(&dplane->vbl.update.node),
155 HZ/25);
156 if (ret < 0)
157 return ret;
158
159 if (plane->fb != fb) {
160 struct armada_gem_object *obj = drm_fb_obj(fb);
161 uint32_t sy, su, sv;
162
163 /*
164 * Take a reference on the new framebuffer - we want to
165 * hold on to it while the hardware is displaying it.
166 */
167 drm_framebuffer_reference(fb);
168
169 if (plane->fb) {
170 struct drm_framebuffer *older_fb;
171
172 spin_lock_irq(&dplane->lock);
173 older_fb = dplane->old_fb;
174 dplane->old_fb = plane->fb;
175 spin_unlock_irq(&dplane->lock);
176 if (older_fb)
177 armada_drm_queue_unref_work(dcrtc->crtc.dev,
178 older_fb);
179 }
180
181 src_y >>= 16;
182 src_x >>= 16;
183 sy = obj->dev_addr + fb->offsets[0] + src_y * fb->pitches[0] +
184 src_x * fb->bits_per_pixel / 8;
185 su = obj->dev_addr + fb->offsets[1] + src_y * fb->pitches[1] +
186 src_x;
187 sv = obj->dev_addr + fb->offsets[2] + src_y * fb->pitches[2] +
188 src_x;
189
190 armada_reg_queue_set(dplane->vbl.regs, idx, sy,
191 LCD_SPU_DMA_START_ADDR_Y0);
192 armada_reg_queue_set(dplane->vbl.regs, idx, su,
193 LCD_SPU_DMA_START_ADDR_U0);
194 armada_reg_queue_set(dplane->vbl.regs, idx, sv,
195 LCD_SPU_DMA_START_ADDR_V0);
196 armada_reg_queue_set(dplane->vbl.regs, idx, sy,
197 LCD_SPU_DMA_START_ADDR_Y1);
198 armada_reg_queue_set(dplane->vbl.regs, idx, su,
199 LCD_SPU_DMA_START_ADDR_U1);
200 armada_reg_queue_set(dplane->vbl.regs, idx, sv,
201 LCD_SPU_DMA_START_ADDR_V1);
202
203 val = fb->pitches[0] << 16 | fb->pitches[0];
204 armada_reg_queue_set(dplane->vbl.regs, idx, val,
205 LCD_SPU_DMA_PITCH_YC);
206 val = fb->pitches[1] << 16 | fb->pitches[2];
207 armada_reg_queue_set(dplane->vbl.regs, idx, val,
208 LCD_SPU_DMA_PITCH_UV);
209 }
210
211 val = (src_h & 0xffff0000) | src_w >> 16;
212 if (dplane->src_hw != val) {
213 dplane->src_hw = val;
214 armada_reg_queue_set(dplane->vbl.regs, idx, val,
215 LCD_SPU_DMA_HPXL_VLN);
216 }
217 val = crtc_h << 16 | crtc_w;
218 if (dplane->dst_hw != val) {
219 dplane->dst_hw = val;
220 armada_reg_queue_set(dplane->vbl.regs, idx, val,
221 LCD_SPU_DZM_HPXL_VLN);
222 }
223 val = crtc_y << 16 | crtc_x;
224 if (dplane->dst_yx != val) {
225 dplane->dst_yx = val;
226 armada_reg_queue_set(dplane->vbl.regs, idx, val,
227 LCD_SPU_DMA_OVSA_HPXL_VLN);
228 }
229 if (dplane->ctrl0 != ctrl0) {
230 dplane->ctrl0 = ctrl0;
231 armada_reg_queue_mod(dplane->vbl.regs, idx, ctrl0,
232 CFG_CBSH_ENA | CFG_DMAFORMAT | CFG_DMA_FTOGGLE |
233 CFG_DMA_HSMOOTH | CFG_DMA_TSTMODE |
234 CFG_DMA_MOD(CFG_SWAPRB | CFG_SWAPUV | CFG_SWAPYU |
235 CFG_YUV2RGB) | CFG_DMA_ENA,
236 LCD_SPU_DMA_CTRL0);
237 }
238 if (idx) {
239 armada_reg_queue_end(dplane->vbl.regs, idx);
240 armada_drm_vbl_event_add(dcrtc, &dplane->vbl.update);
241 }
242 return 0;
243}
244
245static int armada_plane_disable(struct drm_plane *plane)
246{
247 struct armada_plane *dplane = drm_to_armada_plane(plane);
248 struct drm_framebuffer *fb;
249 struct armada_crtc *dcrtc;
250
251 if (!dplane->base.crtc)
252 return 0;
253
254 dcrtc = drm_to_armada_crtc(dplane->base.crtc);
255 dcrtc->plane = NULL;
256
257 spin_lock_irq(&dcrtc->irq_lock);
258 armada_drm_vbl_event_remove(dcrtc, &dplane->vbl.update);
259 armada_updatel(0, CFG_DMA_ENA, dcrtc->base + LCD_SPU_DMA_CTRL0);
260 dplane->ctrl0 = 0;
261 spin_unlock_irq(&dcrtc->irq_lock);
262
263 /* Power down the Y/U/V FIFOs */
264 armada_updatel(CFG_PDWN16x66 | CFG_PDWN32x66, 0,
265 dcrtc->base + LCD_SPU_SRAM_PARA1);
266
267 if (plane->fb)
268 drm_framebuffer_unreference(plane->fb);
269
270 spin_lock_irq(&dplane->lock);
271 fb = dplane->old_fb;
272 dplane->old_fb = NULL;
273 spin_unlock_irq(&dplane->lock);
274 if (fb)
275 drm_framebuffer_unreference(fb);
276
277 return 0;
278}
279
280static void armada_plane_destroy(struct drm_plane *plane)
281{
282 kfree(plane);
283}
284
285static int armada_plane_set_property(struct drm_plane *plane,
286 struct drm_property *property, uint64_t val)
287{
288 struct armada_private *priv = plane->dev->dev_private;
289 struct armada_plane *dplane = drm_to_armada_plane(plane);
290 bool update_attr = false;
291
292 if (property == priv->colorkey_prop) {
293#define CCC(v) ((v) << 24 | (v) << 16 | (v) << 8)
294 dplane->prop.colorkey_yr = CCC(K2R(val));
295 dplane->prop.colorkey_ug = CCC(K2G(val));
296 dplane->prop.colorkey_vb = CCC(K2B(val));
297#undef CCC
298 update_attr = true;
299 } else if (property == priv->colorkey_min_prop) {
300 dplane->prop.colorkey_yr &= ~0x00ff0000;
301 dplane->prop.colorkey_yr |= K2R(val) << 16;
302 dplane->prop.colorkey_ug &= ~0x00ff0000;
303 dplane->prop.colorkey_ug |= K2G(val) << 16;
304 dplane->prop.colorkey_vb &= ~0x00ff0000;
305 dplane->prop.colorkey_vb |= K2B(val) << 16;
306 update_attr = true;
307 } else if (property == priv->colorkey_max_prop) {
308 dplane->prop.colorkey_yr &= ~0xff000000;
309 dplane->prop.colorkey_yr |= K2R(val) << 24;
310 dplane->prop.colorkey_ug &= ~0xff000000;
311 dplane->prop.colorkey_ug |= K2G(val) << 24;
312 dplane->prop.colorkey_vb &= ~0xff000000;
313 dplane->prop.colorkey_vb |= K2B(val) << 24;
314 update_attr = true;
315 } else if (property == priv->colorkey_val_prop) {
316 dplane->prop.colorkey_yr &= ~0x0000ff00;
317 dplane->prop.colorkey_yr |= K2R(val) << 8;
318 dplane->prop.colorkey_ug &= ~0x0000ff00;
319 dplane->prop.colorkey_ug |= K2G(val) << 8;
320 dplane->prop.colorkey_vb &= ~0x0000ff00;
321 dplane->prop.colorkey_vb |= K2B(val) << 8;
322 update_attr = true;
323 } else if (property == priv->colorkey_alpha_prop) {
324 dplane->prop.colorkey_yr &= ~0x000000ff;
325 dplane->prop.colorkey_yr |= K2R(val);
326 dplane->prop.colorkey_ug &= ~0x000000ff;
327 dplane->prop.colorkey_ug |= K2G(val);
328 dplane->prop.colorkey_vb &= ~0x000000ff;
329 dplane->prop.colorkey_vb |= K2B(val);
330 update_attr = true;
331 } else if (property == priv->colorkey_mode_prop) {
332 dplane->prop.colorkey_mode &= ~CFG_CKMODE_MASK;
333 dplane->prop.colorkey_mode |= CFG_CKMODE(val);
334 update_attr = true;
335 } else if (property == priv->brightness_prop) {
336 dplane->prop.brightness = val - 256;
337 update_attr = true;
338 } else if (property == priv->contrast_prop) {
339 dplane->prop.contrast = val;
340 update_attr = true;
341 } else if (property == priv->saturation_prop) {
342 dplane->prop.saturation = val;
343 update_attr = true;
344 }
345
346 if (update_attr && dplane->base.crtc)
347 armada_ovl_update_attr(&dplane->prop,
348 drm_to_armada_crtc(dplane->base.crtc));
349
350 return 0;
351}
352
353static const struct drm_plane_funcs armada_plane_funcs = {
354 .update_plane = armada_plane_update,
355 .disable_plane = armada_plane_disable,
356 .destroy = armada_plane_destroy,
357 .set_property = armada_plane_set_property,
358};
359
360static const uint32_t armada_formats[] = {
361 DRM_FORMAT_UYVY,
362 DRM_FORMAT_YUYV,
363 DRM_FORMAT_YUV420,
364 DRM_FORMAT_YVU420,
365 DRM_FORMAT_YUV422,
366 DRM_FORMAT_YVU422,
367 DRM_FORMAT_VYUY,
368 DRM_FORMAT_YVYU,
369 DRM_FORMAT_ARGB8888,
370 DRM_FORMAT_ABGR8888,
371 DRM_FORMAT_XRGB8888,
372 DRM_FORMAT_XBGR8888,
373 DRM_FORMAT_RGB888,
374 DRM_FORMAT_BGR888,
375 DRM_FORMAT_ARGB1555,
376 DRM_FORMAT_ABGR1555,
377 DRM_FORMAT_RGB565,
378 DRM_FORMAT_BGR565,
379};
380
381static struct drm_prop_enum_list armada_drm_colorkey_enum_list[] = {
382 { CKMODE_DISABLE, "disabled" },
383 { CKMODE_Y, "Y component" },
384 { CKMODE_U, "U component" },
385 { CKMODE_V, "V component" },
386 { CKMODE_RGB, "RGB" },
387 { CKMODE_R, "R component" },
388 { CKMODE_G, "G component" },
389 { CKMODE_B, "B component" },
390};
391
392static int armada_overlay_create_properties(struct drm_device *dev)
393{
394 struct armada_private *priv = dev->dev_private;
395
396 if (priv->colorkey_prop)
397 return 0;
398
399 priv->colorkey_prop = drm_property_create_range(dev, 0,
400 "colorkey", 0, 0xffffff);
401 priv->colorkey_min_prop = drm_property_create_range(dev, 0,
402 "colorkey_min", 0, 0xffffff);
403 priv->colorkey_max_prop = drm_property_create_range(dev, 0,
404 "colorkey_max", 0, 0xffffff);
405 priv->colorkey_val_prop = drm_property_create_range(dev, 0,
406 "colorkey_val", 0, 0xffffff);
407 priv->colorkey_alpha_prop = drm_property_create_range(dev, 0,
408 "colorkey_alpha", 0, 0xffffff);
409 priv->colorkey_mode_prop = drm_property_create_enum(dev, 0,
410 "colorkey_mode",
411 armada_drm_colorkey_enum_list,
412 ARRAY_SIZE(armada_drm_colorkey_enum_list));
413 priv->brightness_prop = drm_property_create_range(dev, 0,
414 "brightness", 0, 256 + 255);
415 priv->contrast_prop = drm_property_create_range(dev, 0,
416 "contrast", 0, 0x7fff);
417 priv->saturation_prop = drm_property_create_range(dev, 0,
418 "saturation", 0, 0x7fff);
419
420 if (!priv->colorkey_prop)
421 return -ENOMEM;
422
423 return 0;
424}
425
426int armada_overlay_plane_create(struct drm_device *dev, unsigned long crtcs)
427{
428 struct armada_private *priv = dev->dev_private;
429 struct drm_mode_object *mobj;
430 struct armada_plane *dplane;
431 int ret;
432
433 ret = armada_overlay_create_properties(dev);
434 if (ret)
435 return ret;
436
437 dplane = kzalloc(sizeof(*dplane), GFP_KERNEL);
438 if (!dplane)
439 return -ENOMEM;
440
441 spin_lock_init(&dplane->lock);
442 init_waitqueue_head(&dplane->vbl.wait);
443 armada_drm_vbl_event_init(&dplane->vbl.update, armada_plane_vbl,
444 dplane);
445
446 drm_plane_init(dev, &dplane->base, crtcs, &armada_plane_funcs,
447 armada_formats, ARRAY_SIZE(armada_formats), false);
448
449 dplane->prop.colorkey_yr = 0xfefefe00;
450 dplane->prop.colorkey_ug = 0x01010100;
451 dplane->prop.colorkey_vb = 0x01010100;
452 dplane->prop.colorkey_mode = CFG_CKMODE(CKMODE_RGB);
453 dplane->prop.brightness = 0;
454 dplane->prop.contrast = 0x4000;
455 dplane->prop.saturation = 0x4000;
456
457 mobj = &dplane->base.base;
458 drm_object_attach_property(mobj, priv->colorkey_prop,
459 0x0101fe);
460 drm_object_attach_property(mobj, priv->colorkey_min_prop,
461 0x0101fe);
462 drm_object_attach_property(mobj, priv->colorkey_max_prop,
463 0x0101fe);
464 drm_object_attach_property(mobj, priv->colorkey_val_prop,
465 0x0101fe);
466 drm_object_attach_property(mobj, priv->colorkey_alpha_prop,
467 0x000000);
468 drm_object_attach_property(mobj, priv->colorkey_mode_prop,
469 CKMODE_RGB);
470 drm_object_attach_property(mobj, priv->brightness_prop, 256);
471 drm_object_attach_property(mobj, priv->contrast_prop,
472 dplane->prop.contrast);
473 drm_object_attach_property(mobj, priv->saturation_prop,
474 dplane->prop.saturation);
475
476 return 0;
477}
diff --git a/drivers/gpu/drm/armada/armada_slave.c b/drivers/gpu/drm/armada/armada_slave.c
new file mode 100644
index 000000000000..00d0facb42f3
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_slave.c
@@ -0,0 +1,139 @@
1/*
2 * Copyright (C) 2012 Russell King
3 * Rewritten from the dovefb driver, and Armada510 manuals.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9#include <drm/drmP.h>
10#include <drm/drm_crtc_helper.h>
11#include <drm/drm_edid.h>
12#include <drm/drm_encoder_slave.h>
13#include "armada_drm.h"
14#include "armada_output.h"
15#include "armada_slave.h"
16
17static int armada_drm_slave_get_modes(struct drm_connector *conn)
18{
19 struct drm_encoder *enc = armada_drm_connector_encoder(conn);
20 int count = 0;
21
22 if (enc) {
23 struct drm_encoder_slave *slave = to_encoder_slave(enc);
24
25 count = slave->slave_funcs->get_modes(enc, conn);
26 }
27
28 return count;
29}
30
31static void armada_drm_slave_destroy(struct drm_encoder *enc)
32{
33 struct drm_encoder_slave *slave = to_encoder_slave(enc);
34 struct i2c_client *client = drm_i2c_encoder_get_client(enc);
35
36 if (slave->slave_funcs)
37 slave->slave_funcs->destroy(enc);
38 if (client)
39 i2c_put_adapter(client->adapter);
40
41 drm_encoder_cleanup(&slave->base);
42 kfree(slave);
43}
44
45static const struct drm_encoder_funcs armada_drm_slave_encoder_funcs = {
46 .destroy = armada_drm_slave_destroy,
47};
48
49static const struct drm_connector_helper_funcs armada_drm_slave_helper_funcs = {
50 .get_modes = armada_drm_slave_get_modes,
51 .mode_valid = armada_drm_slave_encoder_mode_valid,
52 .best_encoder = armada_drm_connector_encoder,
53};
54
55static const struct drm_encoder_helper_funcs drm_slave_encoder_helpers = {
56 .dpms = drm_i2c_encoder_dpms,
57 .save = drm_i2c_encoder_save,
58 .restore = drm_i2c_encoder_restore,
59 .mode_fixup = drm_i2c_encoder_mode_fixup,
60 .prepare = drm_i2c_encoder_prepare,
61 .commit = drm_i2c_encoder_commit,
62 .mode_set = drm_i2c_encoder_mode_set,
63 .detect = drm_i2c_encoder_detect,
64};
65
66static int
67armada_drm_conn_slave_create(struct drm_connector *conn, const void *data)
68{
69 const struct armada_drm_slave_config *config = data;
70 struct drm_encoder_slave *slave;
71 struct i2c_adapter *adap;
72 int ret;
73
74 conn->interlace_allowed = config->interlace_allowed;
75 conn->doublescan_allowed = config->doublescan_allowed;
76 conn->polled = config->polled;
77
78 drm_connector_helper_add(conn, &armada_drm_slave_helper_funcs);
79
80 slave = kzalloc(sizeof(*slave), GFP_KERNEL);
81 if (!slave)
82 return -ENOMEM;
83
84 slave->base.possible_crtcs = config->crtcs;
85
86 adap = i2c_get_adapter(config->i2c_adapter_id);
87 if (!adap) {
88 kfree(slave);
89 return -EPROBE_DEFER;
90 }
91
92 ret = drm_encoder_init(conn->dev, &slave->base,
93 &armada_drm_slave_encoder_funcs,
94 DRM_MODE_ENCODER_TMDS);
95 if (ret) {
96 DRM_ERROR("unable to init encoder\n");
97 i2c_put_adapter(adap);
98 kfree(slave);
99 return ret;
100 }
101
102 ret = drm_i2c_encoder_init(conn->dev, slave, adap, &config->info);
103 i2c_put_adapter(adap);
104 if (ret) {
105 DRM_ERROR("unable to init encoder slave\n");
106 armada_drm_slave_destroy(&slave->base);
107 return ret;
108 }
109
110 drm_encoder_helper_add(&slave->base, &drm_slave_encoder_helpers);
111
112 ret = slave->slave_funcs->create_resources(&slave->base, conn);
113 if (ret) {
114 armada_drm_slave_destroy(&slave->base);
115 return ret;
116 }
117
118 ret = drm_mode_connector_attach_encoder(conn, &slave->base);
119 if (ret) {
120 armada_drm_slave_destroy(&slave->base);
121 return ret;
122 }
123
124 conn->encoder = &slave->base;
125
126 return ret;
127}
128
129static const struct armada_output_type armada_drm_conn_slave = {
130 .connector_type = DRM_MODE_CONNECTOR_HDMIA,
131 .create = armada_drm_conn_slave_create,
132 .set_property = armada_drm_slave_encoder_set_property,
133};
134
135int armada_drm_connector_slave_create(struct drm_device *dev,
136 const struct armada_drm_slave_config *config)
137{
138 return armada_output_create(dev, &armada_drm_conn_slave, config);
139}
diff --git a/drivers/gpu/drm/armada/armada_slave.h b/drivers/gpu/drm/armada/armada_slave.h
new file mode 100644
index 000000000000..bf2374c96fc1
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_slave.h
@@ -0,0 +1,26 @@
1/*
2 * Copyright (C) 2012 Russell King
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8#ifndef ARMADA_SLAVE_H
9#define ARMADA_SLAVE_H
10
11#include <linux/i2c.h>
12#include <drm/drmP.h>
13
14struct armada_drm_slave_config {
15 int i2c_adapter_id;
16 uint32_t crtcs;
17 uint8_t polled;
18 bool interlace_allowed;
19 bool doublescan_allowed;
20 struct i2c_board_info info;
21};
22
23int armada_drm_connector_slave_create(struct drm_device *dev,
24 const struct armada_drm_slave_config *);
25
26#endif
diff --git a/drivers/gpu/drm/cirrus/cirrus_mode.c b/drivers/gpu/drm/cirrus/cirrus_mode.c
index 60685b21cc36..adabc3daaa5b 100644
--- a/drivers/gpu/drm/cirrus/cirrus_mode.c
+++ b/drivers/gpu/drm/cirrus/cirrus_mode.c
@@ -494,13 +494,12 @@ static struct drm_encoder *cirrus_encoder_init(struct drm_device *dev)
494 494
495int cirrus_vga_get_modes(struct drm_connector *connector) 495int cirrus_vga_get_modes(struct drm_connector *connector)
496{ 496{
497 /* Just add a static list of modes */ 497 int count;
498 drm_add_modes_noedid(connector, 640, 480);
499 drm_add_modes_noedid(connector, 800, 600);
500 drm_add_modes_noedid(connector, 1024, 768);
501 drm_add_modes_noedid(connector, 1280, 1024);
502 498
503 return 4; 499 /* Just add a static list of modes */
500 count = drm_add_modes_noedid(connector, 1280, 1024);
501 drm_set_preferred_mode(connector, 1024, 768);
502 return count;
504} 503}
505 504
506static int cirrus_vga_mode_valid(struct drm_connector *connector, 505static int cirrus_vga_mode_valid(struct drm_connector *connector,
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index d7a8370e3cdc..d6cf77c472e7 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -1303,7 +1303,7 @@ static void drm_crtc_convert_to_umode(struct drm_mode_modeinfo *out,
1303} 1303}
1304 1304
1305/** 1305/**
1306 * drm_crtc_convert_to_umode - convert a modeinfo into a drm_display_mode 1306 * drm_crtc_convert_umode - convert a modeinfo into a drm_display_mode
1307 * @out: drm_display_mode to return to the user 1307 * @out: drm_display_mode to return to the user
1308 * @in: drm_mode_modeinfo to use 1308 * @in: drm_mode_modeinfo to use
1309 * 1309 *
@@ -1557,7 +1557,7 @@ int drm_mode_getcrtc(struct drm_device *dev,
1557 obj = drm_mode_object_find(dev, crtc_resp->crtc_id, 1557 obj = drm_mode_object_find(dev, crtc_resp->crtc_id,
1558 DRM_MODE_OBJECT_CRTC); 1558 DRM_MODE_OBJECT_CRTC);
1559 if (!obj) { 1559 if (!obj) {
1560 ret = -EINVAL; 1560 ret = -ENOENT;
1561 goto out; 1561 goto out;
1562 } 1562 }
1563 crtc = obj_to_crtc(obj); 1563 crtc = obj_to_crtc(obj);
@@ -1641,7 +1641,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
1641 obj = drm_mode_object_find(dev, out_resp->connector_id, 1641 obj = drm_mode_object_find(dev, out_resp->connector_id,
1642 DRM_MODE_OBJECT_CONNECTOR); 1642 DRM_MODE_OBJECT_CONNECTOR);
1643 if (!obj) { 1643 if (!obj) {
1644 ret = -EINVAL; 1644 ret = -ENOENT;
1645 goto out; 1645 goto out;
1646 } 1646 }
1647 connector = obj_to_connector(obj); 1647 connector = obj_to_connector(obj);
@@ -1757,7 +1757,7 @@ int drm_mode_getencoder(struct drm_device *dev, void *data,
1757 obj = drm_mode_object_find(dev, enc_resp->encoder_id, 1757 obj = drm_mode_object_find(dev, enc_resp->encoder_id,
1758 DRM_MODE_OBJECT_ENCODER); 1758 DRM_MODE_OBJECT_ENCODER);
1759 if (!obj) { 1759 if (!obj) {
1760 ret = -EINVAL; 1760 ret = -ENOENT;
1761 goto out; 1761 goto out;
1762 } 1762 }
1763 encoder = obj_to_encoder(obj); 1763 encoder = obj_to_encoder(obj);
@@ -2141,7 +2141,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
2141 DRM_MODE_OBJECT_CRTC); 2141 DRM_MODE_OBJECT_CRTC);
2142 if (!obj) { 2142 if (!obj) {
2143 DRM_DEBUG_KMS("Unknown CRTC ID %d\n", crtc_req->crtc_id); 2143 DRM_DEBUG_KMS("Unknown CRTC ID %d\n", crtc_req->crtc_id);
2144 ret = -EINVAL; 2144 ret = -ENOENT;
2145 goto out; 2145 goto out;
2146 } 2146 }
2147 crtc = obj_to_crtc(obj); 2147 crtc = obj_to_crtc(obj);
@@ -2164,7 +2164,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
2164 if (!fb) { 2164 if (!fb) {
2165 DRM_DEBUG_KMS("Unknown FB ID%d\n", 2165 DRM_DEBUG_KMS("Unknown FB ID%d\n",
2166 crtc_req->fb_id); 2166 crtc_req->fb_id);
2167 ret = -EINVAL; 2167 ret = -ENOENT;
2168 goto out; 2168 goto out;
2169 } 2169 }
2170 } 2170 }
@@ -2232,7 +2232,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
2232 if (!obj) { 2232 if (!obj) {
2233 DRM_DEBUG_KMS("Connector id %d unknown\n", 2233 DRM_DEBUG_KMS("Connector id %d unknown\n",
2234 out_id); 2234 out_id);
2235 ret = -EINVAL; 2235 ret = -ENOENT;
2236 goto out; 2236 goto out;
2237 } 2237 }
2238 connector = obj_to_connector(obj); 2238 connector = obj_to_connector(obj);
@@ -2280,7 +2280,7 @@ static int drm_mode_cursor_common(struct drm_device *dev,
2280 obj = drm_mode_object_find(dev, req->crtc_id, DRM_MODE_OBJECT_CRTC); 2280 obj = drm_mode_object_find(dev, req->crtc_id, DRM_MODE_OBJECT_CRTC);
2281 if (!obj) { 2281 if (!obj) {
2282 DRM_DEBUG_KMS("Unknown CRTC ID %d\n", req->crtc_id); 2282 DRM_DEBUG_KMS("Unknown CRTC ID %d\n", req->crtc_id);
2283 return -EINVAL; 2283 return -ENOENT;
2284 } 2284 }
2285 crtc = obj_to_crtc(obj); 2285 crtc = obj_to_crtc(obj);
2286 2286
@@ -2489,6 +2489,8 @@ static int format_check(const struct drm_mode_fb_cmd2 *r)
2489 case DRM_FORMAT_YVU444: 2489 case DRM_FORMAT_YVU444:
2490 return 0; 2490 return 0;
2491 default: 2491 default:
2492 DRM_DEBUG_KMS("invalid pixel format %s\n",
2493 drm_get_format_name(r->pixel_format));
2492 return -EINVAL; 2494 return -EINVAL;
2493 } 2495 }
2494} 2496}
@@ -2654,7 +2656,7 @@ fail_lookup:
2654 mutex_unlock(&dev->mode_config.fb_lock); 2656 mutex_unlock(&dev->mode_config.fb_lock);
2655 mutex_unlock(&file_priv->fbs_lock); 2657 mutex_unlock(&file_priv->fbs_lock);
2656 2658
2657 return -EINVAL; 2659 return -ENOENT;
2658} 2660}
2659 2661
2660/** 2662/**
@@ -2682,7 +2684,7 @@ int drm_mode_getfb(struct drm_device *dev,
2682 2684
2683 fb = drm_framebuffer_lookup(dev, r->fb_id); 2685 fb = drm_framebuffer_lookup(dev, r->fb_id);
2684 if (!fb) 2686 if (!fb)
2685 return -EINVAL; 2687 return -ENOENT;
2686 2688
2687 r->height = fb->height; 2689 r->height = fb->height;
2688 r->width = fb->width; 2690 r->width = fb->width;
@@ -2727,7 +2729,7 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
2727 2729
2728 fb = drm_framebuffer_lookup(dev, r->fb_id); 2730 fb = drm_framebuffer_lookup(dev, r->fb_id);
2729 if (!fb) 2731 if (!fb)
2730 return -EINVAL; 2732 return -ENOENT;
2731 2733
2732 num_clips = r->num_clips; 2734 num_clips = r->num_clips;
2733 clips_ptr = (struct drm_clip_rect __user *)(unsigned long)r->clips_ptr; 2735 clips_ptr = (struct drm_clip_rect __user *)(unsigned long)r->clips_ptr;
@@ -3059,7 +3061,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
3059 drm_modeset_lock_all(dev); 3061 drm_modeset_lock_all(dev);
3060 obj = drm_mode_object_find(dev, out_resp->prop_id, DRM_MODE_OBJECT_PROPERTY); 3062 obj = drm_mode_object_find(dev, out_resp->prop_id, DRM_MODE_OBJECT_PROPERTY);
3061 if (!obj) { 3063 if (!obj) {
3062 ret = -EINVAL; 3064 ret = -ENOENT;
3063 goto done; 3065 goto done;
3064 } 3066 }
3065 property = obj_to_property(obj); 3067 property = obj_to_property(obj);
@@ -3188,7 +3190,7 @@ int drm_mode_getblob_ioctl(struct drm_device *dev,
3188 drm_modeset_lock_all(dev); 3190 drm_modeset_lock_all(dev);
3189 obj = drm_mode_object_find(dev, out_resp->blob_id, DRM_MODE_OBJECT_BLOB); 3191 obj = drm_mode_object_find(dev, out_resp->blob_id, DRM_MODE_OBJECT_BLOB);
3190 if (!obj) { 3192 if (!obj) {
3191 ret = -EINVAL; 3193 ret = -ENOENT;
3192 goto done; 3194 goto done;
3193 } 3195 }
3194 blob = obj_to_blob(obj); 3196 blob = obj_to_blob(obj);
@@ -3349,7 +3351,7 @@ int drm_mode_obj_get_properties_ioctl(struct drm_device *dev, void *data,
3349 3351
3350 obj = drm_mode_object_find(dev, arg->obj_id, arg->obj_type); 3352 obj = drm_mode_object_find(dev, arg->obj_id, arg->obj_type);
3351 if (!obj) { 3353 if (!obj) {
3352 ret = -EINVAL; 3354 ret = -ENOENT;
3353 goto out; 3355 goto out;
3354 } 3356 }
3355 if (!obj->properties) { 3357 if (!obj->properties) {
@@ -3402,8 +3404,10 @@ int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data,
3402 drm_modeset_lock_all(dev); 3404 drm_modeset_lock_all(dev);
3403 3405
3404 arg_obj = drm_mode_object_find(dev, arg->obj_id, arg->obj_type); 3406 arg_obj = drm_mode_object_find(dev, arg->obj_id, arg->obj_type);
3405 if (!arg_obj) 3407 if (!arg_obj) {
3408 ret = -ENOENT;
3406 goto out; 3409 goto out;
3410 }
3407 if (!arg_obj->properties) 3411 if (!arg_obj->properties)
3408 goto out; 3412 goto out;
3409 3413
@@ -3416,8 +3420,10 @@ int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data,
3416 3420
3417 prop_obj = drm_mode_object_find(dev, arg->prop_id, 3421 prop_obj = drm_mode_object_find(dev, arg->prop_id,
3418 DRM_MODE_OBJECT_PROPERTY); 3422 DRM_MODE_OBJECT_PROPERTY);
3419 if (!prop_obj) 3423 if (!prop_obj) {
3424 ret = -ENOENT;
3420 goto out; 3425 goto out;
3426 }
3421 property = obj_to_property(prop_obj); 3427 property = obj_to_property(prop_obj);
3422 3428
3423 if (!drm_property_change_is_valid(property, arg->value)) 3429 if (!drm_property_change_is_valid(property, arg->value))
@@ -3502,7 +3508,7 @@ int drm_mode_gamma_set_ioctl(struct drm_device *dev,
3502 drm_modeset_lock_all(dev); 3508 drm_modeset_lock_all(dev);
3503 obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC); 3509 obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC);
3504 if (!obj) { 3510 if (!obj) {
3505 ret = -EINVAL; 3511 ret = -ENOENT;
3506 goto out; 3512 goto out;
3507 } 3513 }
3508 crtc = obj_to_crtc(obj); 3514 crtc = obj_to_crtc(obj);
@@ -3561,7 +3567,7 @@ int drm_mode_gamma_get_ioctl(struct drm_device *dev,
3561 drm_modeset_lock_all(dev); 3567 drm_modeset_lock_all(dev);
3562 obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC); 3568 obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC);
3563 if (!obj) { 3569 if (!obj) {
3564 ret = -EINVAL; 3570 ret = -ENOENT;
3565 goto out; 3571 goto out;
3566 } 3572 }
3567 crtc = obj_to_crtc(obj); 3573 crtc = obj_to_crtc(obj);
@@ -3615,7 +3621,7 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
3615 3621
3616 obj = drm_mode_object_find(dev, page_flip->crtc_id, DRM_MODE_OBJECT_CRTC); 3622 obj = drm_mode_object_find(dev, page_flip->crtc_id, DRM_MODE_OBJECT_CRTC);
3617 if (!obj) 3623 if (!obj)
3618 return -EINVAL; 3624 return -ENOENT;
3619 crtc = obj_to_crtc(obj); 3625 crtc = obj_to_crtc(obj);
3620 3626
3621 mutex_lock(&crtc->mutex); 3627 mutex_lock(&crtc->mutex);
@@ -3632,8 +3638,10 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
3632 goto out; 3638 goto out;
3633 3639
3634 fb = drm_framebuffer_lookup(dev, page_flip->fb_id); 3640 fb = drm_framebuffer_lookup(dev, page_flip->fb_id);
3635 if (!fb) 3641 if (!fb) {
3642 ret = -ENOENT;
3636 goto out; 3643 goto out;
3644 }
3637 3645
3638 ret = drm_crtc_check_viewport(crtc, crtc->x, crtc->y, &crtc->mode, fb); 3646 ret = drm_crtc_check_viewport(crtc, crtc->x, crtc->y, &crtc->mode, fb);
3639 if (ret) 3647 if (ret)
@@ -3822,7 +3830,8 @@ void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth,
3822 *bpp = 32; 3830 *bpp = 32;
3823 break; 3831 break;
3824 default: 3832 default:
3825 DRM_DEBUG_KMS("unsupported pixel format\n"); 3833 DRM_DEBUG_KMS("unsupported pixel format %s\n",
3834 drm_get_format_name(format));
3826 *depth = 0; 3835 *depth = 0;
3827 *bpp = 0; 3836 *bpp = 0;
3828 break; 3837 break;
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 0d6469d74be4..01361aba033b 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -405,22 +405,25 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
405 struct drm_framebuffer *old_fb) 405 struct drm_framebuffer *old_fb)
406{ 406{
407 struct drm_device *dev = crtc->dev; 407 struct drm_device *dev = crtc->dev;
408 struct drm_display_mode *adjusted_mode, saved_mode, saved_hwmode; 408 struct drm_display_mode *adjusted_mode, saved_mode;
409 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; 409 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
410 struct drm_encoder_helper_funcs *encoder_funcs; 410 struct drm_encoder_helper_funcs *encoder_funcs;
411 int saved_x, saved_y; 411 int saved_x, saved_y;
412 bool saved_enabled;
412 struct drm_encoder *encoder; 413 struct drm_encoder *encoder;
413 bool ret = true; 414 bool ret = true;
414 415
416 saved_enabled = crtc->enabled;
415 crtc->enabled = drm_helper_crtc_in_use(crtc); 417 crtc->enabled = drm_helper_crtc_in_use(crtc);
416 if (!crtc->enabled) 418 if (!crtc->enabled)
417 return true; 419 return true;
418 420
419 adjusted_mode = drm_mode_duplicate(dev, mode); 421 adjusted_mode = drm_mode_duplicate(dev, mode);
420 if (!adjusted_mode) 422 if (!adjusted_mode) {
423 crtc->enabled = saved_enabled;
421 return false; 424 return false;
425 }
422 426
423 saved_hwmode = crtc->hwmode;
424 saved_mode = crtc->mode; 427 saved_mode = crtc->mode;
425 saved_x = crtc->x; 428 saved_x = crtc->x;
426 saved_y = crtc->y; 429 saved_y = crtc->y;
@@ -539,7 +542,7 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
539done: 542done:
540 drm_mode_destroy(dev, adjusted_mode); 543 drm_mode_destroy(dev, adjusted_mode);
541 if (!ret) { 544 if (!ret) {
542 crtc->hwmode = saved_hwmode; 545 crtc->enabled = saved_enabled;
543 crtc->mode = saved_mode; 546 crtc->mode = saved_mode;
544 crtc->x = saved_x; 547 crtc->x = saved_x;
545 crtc->y = saved_y; 548 crtc->y = saved_y;
@@ -567,6 +570,14 @@ drm_crtc_helper_disable(struct drm_crtc *crtc)
567 continue; 570 continue;
568 571
569 connector->encoder = NULL; 572 connector->encoder = NULL;
573
574 /*
575 * drm_helper_disable_unused_functions() ought to be
576 * doing this, but since we've decoupled the encoder
577 * from the connector above, the required connection
578 * between them is henceforth no longer available.
579 */
580 connector->dpms = DRM_MODE_DPMS_OFF;
570 } 581 }
571 } 582 }
572 583
@@ -593,9 +604,8 @@ drm_crtc_helper_disable(struct drm_crtc *crtc)
593int drm_crtc_helper_set_config(struct drm_mode_set *set) 604int drm_crtc_helper_set_config(struct drm_mode_set *set)
594{ 605{
595 struct drm_device *dev; 606 struct drm_device *dev;
596 struct drm_crtc *save_crtcs, *new_crtc, *crtc; 607 struct drm_crtc *new_crtc;
597 struct drm_encoder *save_encoders, *new_encoder, *encoder; 608 struct drm_encoder *save_encoders, *new_encoder, *encoder;
598 struct drm_framebuffer *old_fb = NULL;
599 bool mode_changed = false; /* if true do a full mode set */ 609 bool mode_changed = false; /* if true do a full mode set */
600 bool fb_changed = false; /* if true and !mode_changed just do a flip */ 610 bool fb_changed = false; /* if true and !mode_changed just do a flip */
601 struct drm_connector *save_connectors, *connector; 611 struct drm_connector *save_connectors, *connector;
@@ -631,38 +641,28 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
631 641
632 dev = set->crtc->dev; 642 dev = set->crtc->dev;
633 643
634 /* Allocate space for the backup of all (non-pointer) crtc, encoder and 644 /*
635 * connector data. */ 645 * Allocate space for the backup of all (non-pointer) encoder and
636 save_crtcs = kzalloc(dev->mode_config.num_crtc * 646 * connector data.
637 sizeof(struct drm_crtc), GFP_KERNEL); 647 */
638 if (!save_crtcs)
639 return -ENOMEM;
640
641 save_encoders = kzalloc(dev->mode_config.num_encoder * 648 save_encoders = kzalloc(dev->mode_config.num_encoder *
642 sizeof(struct drm_encoder), GFP_KERNEL); 649 sizeof(struct drm_encoder), GFP_KERNEL);
643 if (!save_encoders) { 650 if (!save_encoders)
644 kfree(save_crtcs);
645 return -ENOMEM; 651 return -ENOMEM;
646 }
647 652
648 save_connectors = kzalloc(dev->mode_config.num_connector * 653 save_connectors = kzalloc(dev->mode_config.num_connector *
649 sizeof(struct drm_connector), GFP_KERNEL); 654 sizeof(struct drm_connector), GFP_KERNEL);
650 if (!save_connectors) { 655 if (!save_connectors) {
651 kfree(save_crtcs);
652 kfree(save_encoders); 656 kfree(save_encoders);
653 return -ENOMEM; 657 return -ENOMEM;
654 } 658 }
655 659
656 /* Copy data. Note that driver private data is not affected. 660 /*
661 * Copy data. Note that driver private data is not affected.
657 * Should anything bad happen only the expected state is 662 * Should anything bad happen only the expected state is
658 * restored, not the drivers personal bookkeeping. 663 * restored, not the drivers personal bookkeeping.
659 */ 664 */
660 count = 0; 665 count = 0;
661 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
662 save_crtcs[count++] = *crtc;
663 }
664
665 count = 0;
666 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 666 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
667 save_encoders[count++] = *encoder; 667 save_encoders[count++] = *encoder;
668 } 668 }
@@ -785,19 +785,17 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
785 mode_changed = true; 785 mode_changed = true;
786 786
787 if (mode_changed) { 787 if (mode_changed) {
788 set->crtc->enabled = drm_helper_crtc_in_use(set->crtc); 788 if (drm_helper_crtc_in_use(set->crtc)) {
789 if (set->crtc->enabled) {
790 DRM_DEBUG_KMS("attempting to set mode from" 789 DRM_DEBUG_KMS("attempting to set mode from"
791 " userspace\n"); 790 " userspace\n");
792 drm_mode_debug_printmodeline(set->mode); 791 drm_mode_debug_printmodeline(set->mode);
793 old_fb = set->crtc->fb;
794 set->crtc->fb = set->fb; 792 set->crtc->fb = set->fb;
795 if (!drm_crtc_helper_set_mode(set->crtc, set->mode, 793 if (!drm_crtc_helper_set_mode(set->crtc, set->mode,
796 set->x, set->y, 794 set->x, set->y,
797 old_fb)) { 795 save_set.fb)) {
798 DRM_ERROR("failed to set mode on [CRTC:%d]\n", 796 DRM_ERROR("failed to set mode on [CRTC:%d]\n",
799 set->crtc->base.id); 797 set->crtc->base.id);
800 set->crtc->fb = old_fb; 798 set->crtc->fb = save_set.fb;
801 ret = -EINVAL; 799 ret = -EINVAL;
802 goto fail; 800 goto fail;
803 } 801 }
@@ -812,31 +810,24 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
812 } else if (fb_changed) { 810 } else if (fb_changed) {
813 set->crtc->x = set->x; 811 set->crtc->x = set->x;
814 set->crtc->y = set->y; 812 set->crtc->y = set->y;
815 813 set->crtc->fb = set->fb;
816 old_fb = set->crtc->fb;
817 if (set->crtc->fb != set->fb)
818 set->crtc->fb = set->fb;
819 ret = crtc_funcs->mode_set_base(set->crtc, 814 ret = crtc_funcs->mode_set_base(set->crtc,
820 set->x, set->y, old_fb); 815 set->x, set->y, save_set.fb);
821 if (ret != 0) { 816 if (ret != 0) {
822 set->crtc->fb = old_fb; 817 set->crtc->x = save_set.x;
818 set->crtc->y = save_set.y;
819 set->crtc->fb = save_set.fb;
823 goto fail; 820 goto fail;
824 } 821 }
825 } 822 }
826 823
827 kfree(save_connectors); 824 kfree(save_connectors);
828 kfree(save_encoders); 825 kfree(save_encoders);
829 kfree(save_crtcs);
830 return 0; 826 return 0;
831 827
832fail: 828fail:
833 /* Restore all previous data. */ 829 /* Restore all previous data. */
834 count = 0; 830 count = 0;
835 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
836 *crtc = save_crtcs[count++];
837 }
838
839 count = 0;
840 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 831 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
841 *encoder = save_encoders[count++]; 832 *encoder = save_encoders[count++];
842 } 833 }
@@ -854,7 +845,6 @@ fail:
854 845
855 kfree(save_connectors); 846 kfree(save_connectors);
856 kfree(save_encoders); 847 kfree(save_encoders);
857 kfree(save_crtcs);
858 return ret; 848 return ret;
859} 849}
860EXPORT_SYMBOL(drm_crtc_helper_set_config); 850EXPORT_SYMBOL(drm_crtc_helper_set_config);
@@ -1135,14 +1125,14 @@ void drm_kms_helper_poll_fini(struct drm_device *dev)
1135} 1125}
1136EXPORT_SYMBOL(drm_kms_helper_poll_fini); 1126EXPORT_SYMBOL(drm_kms_helper_poll_fini);
1137 1127
1138void drm_helper_hpd_irq_event(struct drm_device *dev) 1128bool drm_helper_hpd_irq_event(struct drm_device *dev)
1139{ 1129{
1140 struct drm_connector *connector; 1130 struct drm_connector *connector;
1141 enum drm_connector_status old_status; 1131 enum drm_connector_status old_status;
1142 bool changed = false; 1132 bool changed = false;
1143 1133
1144 if (!dev->mode_config.poll_enabled) 1134 if (!dev->mode_config.poll_enabled)
1145 return; 1135 return false;
1146 1136
1147 mutex_lock(&dev->mode_config.mutex); 1137 mutex_lock(&dev->mode_config.mutex);
1148 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 1138 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
@@ -1167,5 +1157,7 @@ void drm_helper_hpd_irq_event(struct drm_device *dev)
1167 1157
1168 if (changed) 1158 if (changed)
1169 drm_kms_helper_hotplug_event(dev); 1159 drm_kms_helper_hotplug_event(dev);
1160
1161 return changed;
1170} 1162}
1171EXPORT_SYMBOL(drm_helper_hpd_irq_event); 1163EXPORT_SYMBOL(drm_helper_hpd_irq_event);
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
index a05087cf846d..b4b51d46f339 100644
--- a/drivers/gpu/drm/drm_debugfs.c
+++ b/drivers/gpu/drm/drm_debugfs.c
@@ -42,7 +42,7 @@
42 * Initialization, etc. 42 * Initialization, etc.
43 **************************************************/ 43 **************************************************/
44 44
45static struct drm_info_list drm_debugfs_list[] = { 45static const struct drm_info_list drm_debugfs_list[] = {
46 {"name", drm_name_info, 0}, 46 {"name", drm_name_info, 0},
47 {"vm", drm_vm_info, 0}, 47 {"vm", drm_vm_info, 0},
48 {"clients", drm_clients_info, 0}, 48 {"clients", drm_clients_info, 0},
@@ -84,7 +84,7 @@ static const struct file_operations drm_debugfs_fops = {
84 * Create a given set of debugfs files represented by an array of 84 * Create a given set of debugfs files represented by an array of
85 * gdm_debugfs_lists in the given root directory. 85 * gdm_debugfs_lists in the given root directory.
86 */ 86 */
87int drm_debugfs_create_files(struct drm_info_list *files, int count, 87int drm_debugfs_create_files(const struct drm_info_list *files, int count,
88 struct dentry *root, struct drm_minor *minor) 88 struct dentry *root, struct drm_minor *minor)
89{ 89{
90 struct drm_device *dev = minor->dev; 90 struct drm_device *dev = minor->dev;
@@ -188,7 +188,7 @@ int drm_debugfs_init(struct drm_minor *minor, int minor_id,
188 * 188 *
189 * Remove all debugfs entries created by debugfs_init(). 189 * Remove all debugfs entries created by debugfs_init().
190 */ 190 */
191int drm_debugfs_remove_files(struct drm_info_list *files, int count, 191int drm_debugfs_remove_files(const struct drm_info_list *files, int count,
192 struct drm_minor *minor) 192 struct drm_minor *minor)
193{ 193{
194 struct list_head *pos, *q; 194 struct list_head *pos, *q;
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 05e197d32c45..d9137e49c4e8 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -403,7 +403,7 @@ long drm_ioctl(struct file *filp,
403 403
404 err_i1: 404 err_i1:
405 if (!ioctl) 405 if (!ioctl)
406 DRM_DEBUG("invalid iotcl: pid=%d, dev=0x%lx, auth=%d, cmd=0x%02x, nr=0x%02x\n", 406 DRM_DEBUG("invalid ioctl: pid=%d, dev=0x%lx, auth=%d, cmd=0x%02x, nr=0x%02x\n",
407 task_pid_nr(current), 407 task_pid_nr(current),
408 (long)old_encode_dev(file_priv->minor->device), 408 (long)old_encode_dev(file_priv->minor->device),
409 file_priv->authenticated, cmd, nr); 409 file_priv->authenticated, cmd, nr);
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index f1764ec5818b..fb7cf0e796f6 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -458,6 +458,15 @@ static const struct drm_display_mode drm_dmt_modes[] = {
458 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, 458 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
459}; 459};
460 460
461/*
462 * These more or less come from the DMT spec. The 720x400 modes are
463 * inferred from historical 80x25 practice. The 640x480@67 and 832x624@75
464 * modes are old-school Mac modes. The EDID spec says the 1152x864@75 mode
465 * should be 1152x870, again for the Mac, but instead we use the x864 DMT
466 * mode.
467 *
468 * The DMT modes have been fact-checked; the rest are mild guesses.
469 */
461static const struct drm_display_mode edid_est_modes[] = { 470static const struct drm_display_mode edid_est_modes[] = {
462 { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840, 471 { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
463 968, 1056, 0, 600, 601, 605, 628, 0, 472 968, 1056, 0, 600, 601, 605, 628, 0,
@@ -560,7 +569,7 @@ static const struct minimode est3_modes[] = {
560 { 1600, 1200, 75, 0 }, 569 { 1600, 1200, 75, 0 },
561 { 1600, 1200, 85, 0 }, 570 { 1600, 1200, 85, 0 },
562 { 1792, 1344, 60, 0 }, 571 { 1792, 1344, 60, 0 },
563 { 1792, 1344, 85, 0 }, 572 { 1792, 1344, 75, 0 },
564 { 1856, 1392, 60, 0 }, 573 { 1856, 1392, 60, 0 },
565 { 1856, 1392, 75, 0 }, 574 { 1856, 1392, 75, 0 },
566 { 1920, 1200, 60, 1 }, 575 { 1920, 1200, 60, 1 },
@@ -1320,7 +1329,7 @@ static u32 edid_get_quirks(struct edid *edid)
1320} 1329}
1321 1330
1322#define MODE_SIZE(m) ((m)->hdisplay * (m)->vdisplay) 1331#define MODE_SIZE(m) ((m)->hdisplay * (m)->vdisplay)
1323#define MODE_REFRESH_DIFF(m,r) (abs((m)->vrefresh - target_refresh)) 1332#define MODE_REFRESH_DIFF(c,t) (abs((c) - (t)))
1324 1333
1325/** 1334/**
1326 * edid_fixup_preferred - set preferred modes based on quirk list 1335 * edid_fixup_preferred - set preferred modes based on quirk list
@@ -1335,6 +1344,7 @@ static void edid_fixup_preferred(struct drm_connector *connector,
1335{ 1344{
1336 struct drm_display_mode *t, *cur_mode, *preferred_mode; 1345 struct drm_display_mode *t, *cur_mode, *preferred_mode;
1337 int target_refresh = 0; 1346 int target_refresh = 0;
1347 int cur_vrefresh, preferred_vrefresh;
1338 1348
1339 if (list_empty(&connector->probed_modes)) 1349 if (list_empty(&connector->probed_modes))
1340 return; 1350 return;
@@ -1357,10 +1367,14 @@ static void edid_fixup_preferred(struct drm_connector *connector,
1357 if (MODE_SIZE(cur_mode) > MODE_SIZE(preferred_mode)) 1367 if (MODE_SIZE(cur_mode) > MODE_SIZE(preferred_mode))
1358 preferred_mode = cur_mode; 1368 preferred_mode = cur_mode;
1359 1369
1370 cur_vrefresh = cur_mode->vrefresh ?
1371 cur_mode->vrefresh : drm_mode_vrefresh(cur_mode);
1372 preferred_vrefresh = preferred_mode->vrefresh ?
1373 preferred_mode->vrefresh : drm_mode_vrefresh(preferred_mode);
1360 /* At a given size, try to get closest to target refresh */ 1374 /* At a given size, try to get closest to target refresh */
1361 if ((MODE_SIZE(cur_mode) == MODE_SIZE(preferred_mode)) && 1375 if ((MODE_SIZE(cur_mode) == MODE_SIZE(preferred_mode)) &&
1362 MODE_REFRESH_DIFF(cur_mode, target_refresh) < 1376 MODE_REFRESH_DIFF(cur_vrefresh, target_refresh) <
1363 MODE_REFRESH_DIFF(preferred_mode, target_refresh)) { 1377 MODE_REFRESH_DIFF(preferred_vrefresh, target_refresh)) {
1364 preferred_mode = cur_mode; 1378 preferred_mode = cur_mode;
1365 } 1379 }
1366 } 1380 }
@@ -2080,7 +2094,7 @@ drm_est3_modes(struct drm_connector *connector, struct detailed_timing *timing)
2080 u8 *est = ((u8 *)timing) + 5; 2094 u8 *est = ((u8 *)timing) + 5;
2081 2095
2082 for (i = 0; i < 6; i++) { 2096 for (i = 0; i < 6; i++) {
2083 for (j = 7; j > 0; j--) { 2097 for (j = 7; j >= 0; j--) {
2084 m = (i * 8) + (7 - j); 2098 m = (i * 8) + (7 - j);
2085 if (m >= ARRAY_SIZE(est3_modes)) 2099 if (m >= ARRAY_SIZE(est3_modes))
2086 break; 2100 break;
@@ -3473,6 +3487,19 @@ int drm_add_modes_noedid(struct drm_connector *connector,
3473} 3487}
3474EXPORT_SYMBOL(drm_add_modes_noedid); 3488EXPORT_SYMBOL(drm_add_modes_noedid);
3475 3489
3490void drm_set_preferred_mode(struct drm_connector *connector,
3491 int hpref, int vpref)
3492{
3493 struct drm_display_mode *mode;
3494
3495 list_for_each_entry(mode, &connector->probed_modes, head) {
3496 if (drm_mode_width(mode) == hpref &&
3497 drm_mode_height(mode) == vpref)
3498 mode->type |= DRM_MODE_TYPE_PREFERRED;
3499 }
3500}
3501EXPORT_SYMBOL(drm_set_preferred_mode);
3502
3476/** 3503/**
3477 * drm_hdmi_avi_infoframe_from_display_mode() - fill an HDMI AVI infoframe with 3504 * drm_hdmi_avi_infoframe_from_display_mode() - fill an HDMI AVI infoframe with
3478 * data from a DRM display mode 3505 * data from a DRM display mode
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index d0e27667a4eb..3a7176ce2540 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -239,7 +239,8 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
239 239
240 priv->ioctl_count = 0; 240 priv->ioctl_count = 0;
241 /* for compatibility root is always authenticated */ 241 /* for compatibility root is always authenticated */
242 priv->authenticated = capable(CAP_SYS_ADMIN); 242 priv->always_authenticated = capable(CAP_SYS_ADMIN);
243 priv->authenticated = priv->always_authenticated;
243 priv->lock_count = 0; 244 priv->lock_count = 0;
244 245
245 INIT_LIST_HEAD(&priv->lhead); 246 INIT_LIST_HEAD(&priv->lhead);
@@ -378,8 +379,10 @@ static void drm_events_release(struct drm_file *file_priv)
378 } 379 }
379 380
380 /* Remove unconsumed events */ 381 /* Remove unconsumed events */
381 list_for_each_entry_safe(e, et, &file_priv->event_list, link) 382 list_for_each_entry_safe(e, et, &file_priv->event_list, link) {
383 list_del(&e->link);
382 e->destroy(e); 384 e->destroy(e);
385 }
383 386
384 spin_unlock_irqrestore(&dev->event_lock, flags); 387 spin_unlock_irqrestore(&dev->event_lock, flags);
385} 388}
@@ -531,7 +534,7 @@ int drm_release(struct inode *inode, struct file *filp)
531 list_for_each_entry(temp, &dev->filelist, lhead) { 534 list_for_each_entry(temp, &dev->filelist, lhead) {
532 if ((temp->master == file_priv->master) && 535 if ((temp->master == file_priv->master) &&
533 (temp != file_priv)) 536 (temp != file_priv))
534 temp->authenticated = 0; 537 temp->authenticated = temp->always_authenticated;
535 } 538 }
536 539
537 /** 540 /**
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index f9af048828ea..d80d95289e10 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -219,7 +219,7 @@ int drm_vblank_init(struct drm_device *dev, int num_crtcs)
219 for (i = 0; i < num_crtcs; i++) 219 for (i = 0; i < num_crtcs; i++)
220 init_waitqueue_head(&dev->vblank[i].queue); 220 init_waitqueue_head(&dev->vblank[i].queue);
221 221
222 DRM_INFO("Supports vblank timestamp caching Rev 1 (10.10.2010).\n"); 222 DRM_INFO("Supports vblank timestamp caching Rev 2 (21.10.2013).\n");
223 223
224 /* Driver specific high-precision vblank timestamping supported? */ 224 /* Driver specific high-precision vblank timestamping supported? */
225 if (dev->driver->get_vblank_timestamp) 225 if (dev->driver->get_vblank_timestamp)
@@ -586,24 +586,20 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
586 * code gets preempted or delayed for some reason. 586 * code gets preempted or delayed for some reason.
587 */ 587 */
588 for (i = 0; i < DRM_TIMESTAMP_MAXRETRIES; i++) { 588 for (i = 0; i < DRM_TIMESTAMP_MAXRETRIES; i++) {
589 /* Disable preemption to make it very likely to 589 /*
590 * succeed in the first iteration even on PREEMPT_RT kernel. 590 * Get vertical and horizontal scanout position vpos, hpos,
591 * and bounding timestamps stime, etime, pre/post query.
591 */ 592 */
592 preempt_disable(); 593 vbl_status = dev->driver->get_scanout_position(dev, crtc, &vpos,
594 &hpos, &stime, &etime);
593 595
594 /* Get system timestamp before query. */ 596 /*
595 stime = ktime_get(); 597 * Get correction for CLOCK_MONOTONIC -> CLOCK_REALTIME if
596 598 * CLOCK_REALTIME is requested.
597 /* Get vertical and horizontal scanout pos. vpos, hpos. */ 599 */
598 vbl_status = dev->driver->get_scanout_position(dev, crtc, &vpos, &hpos);
599
600 /* Get system timestamp after query. */
601 etime = ktime_get();
602 if (!drm_timestamp_monotonic) 600 if (!drm_timestamp_monotonic)
603 mono_time_offset = ktime_get_monotonic_offset(); 601 mono_time_offset = ktime_get_monotonic_offset();
604 602
605 preempt_enable();
606
607 /* Return as no-op if scanout query unsupported or failed. */ 603 /* Return as no-op if scanout query unsupported or failed. */
608 if (!(vbl_status & DRM_SCANOUTPOS_VALID)) { 604 if (!(vbl_status & DRM_SCANOUTPOS_VALID)) {
609 DRM_DEBUG("crtc %d : scanoutpos query failed [%d].\n", 605 DRM_DEBUG("crtc %d : scanoutpos query failed [%d].\n",
@@ -611,6 +607,7 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
611 return -EIO; 607 return -EIO;
612 } 608 }
613 609
610 /* Compute uncertainty in timestamp of scanout position query. */
614 duration_ns = ktime_to_ns(etime) - ktime_to_ns(stime); 611 duration_ns = ktime_to_ns(etime) - ktime_to_ns(stime);
615 612
616 /* Accept result with < max_error nsecs timing uncertainty. */ 613 /* Accept result with < max_error nsecs timing uncertainty. */
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index b0733153dfd2..85071a1c4547 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -1041,7 +1041,7 @@ void drm_mode_connector_list_update(struct drm_connector *connector)
1041 /* if equal delete the probed mode */ 1041 /* if equal delete the probed mode */
1042 mode->status = pmode->status; 1042 mode->status = pmode->status;
1043 /* Merge type bits together */ 1043 /* Merge type bits together */
1044 mode->type |= pmode->type; 1044 mode->type = pmode->type;
1045 list_del(&pmode->head); 1045 list_del(&pmode->head);
1046 drm_mode_destroy(connector->dev, pmode); 1046 drm_mode_destroy(connector->dev, pmode);
1047 break; 1047 break;
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index f00d7a9671ea..02679793c9e2 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -80,7 +80,7 @@ drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t ali
80 /* Reserve */ 80 /* Reserve */
81 for (addr = (unsigned long)dmah->vaddr, sz = size; 81 for (addr = (unsigned long)dmah->vaddr, sz = size;
82 sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) { 82 sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
83 SetPageReserved(virt_to_page(addr)); 83 SetPageReserved(virt_to_page((void *)addr));
84 } 84 }
85 85
86 return dmah; 86 return dmah;
@@ -103,7 +103,7 @@ void __drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
103 /* Unreserve */ 103 /* Unreserve */
104 for (addr = (unsigned long)dmah->vaddr, sz = dmah->size; 104 for (addr = (unsigned long)dmah->vaddr, sz = dmah->size;
105 sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) { 105 sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
106 ClearPageReserved(virt_to_page(addr)); 106 ClearPageReserved(virt_to_page((void *)addr));
107 } 107 }
108 dma_free_coherent(&dev->pdev->dev, dmah->size, dmah->vaddr, 108 dma_free_coherent(&dev->pdev->dev, dmah->size, dmah->vaddr,
109 dmah->busaddr); 109 dmah->busaddr);
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index 26055abf94ee..c200136a5d8e 100644
--- a/drivers/gpu/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
@@ -255,16 +255,20 @@ int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
255} 255}
256 256
257/** 257/**
258 * Get a secondary minor number. 258 * drm_get_minor - Allocate and register new DRM minor
259 * @dev: DRM device
260 * @minor: Pointer to where new minor is stored
261 * @type: Type of minor
259 * 262 *
260 * \param dev device data structure 263 * Allocate a new minor of the given type and register it. A pointer to the new
261 * \param sec-minor structure to hold the assigned minor 264 * minor is returned in @minor.
262 * \return negative number on failure. 265 * Caller must hold the global DRM mutex.
263 * 266 *
264 * Search an empty entry and initialize it to the given parameters. This 267 * RETURNS:
265 * routines assigns minor numbers to secondary heads of multi-headed cards 268 * 0 on success, negative error code on failure.
266 */ 269 */
267int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int type) 270static int drm_get_minor(struct drm_device *dev, struct drm_minor **minor,
271 int type)
268{ 272{
269 struct drm_minor *new_minor; 273 struct drm_minor *new_minor;
270 int ret; 274 int ret;
@@ -321,37 +325,48 @@ err_idr:
321 *minor = NULL; 325 *minor = NULL;
322 return ret; 326 return ret;
323} 327}
324EXPORT_SYMBOL(drm_get_minor);
325 328
326/** 329/**
327 * Put a secondary minor number. 330 * drm_unplug_minor - Unplug DRM minor
331 * @minor: Minor to unplug
328 * 332 *
329 * \param sec_minor - structure to be released 333 * Unplugs the given DRM minor but keeps the object. So after this returns,
330 * \return always zero 334 * minor->dev is still valid so existing open-files can still access it to get
335 * device information from their drm_file ojects.
336 * If the minor is already unplugged or if @minor is NULL, nothing is done.
337 * The global DRM mutex must be held by the caller.
331 */ 338 */
332int drm_put_minor(struct drm_minor **minor_p) 339static void drm_unplug_minor(struct drm_minor *minor)
333{ 340{
334 struct drm_minor *minor = *minor_p; 341 if (!minor || !device_is_registered(minor->kdev))
335 342 return;
336 DRM_DEBUG("release secondary minor %d\n", minor->index);
337 343
338#if defined(CONFIG_DEBUG_FS) 344#if defined(CONFIG_DEBUG_FS)
339 drm_debugfs_cleanup(minor); 345 drm_debugfs_cleanup(minor);
340#endif 346#endif
341 347
342 drm_sysfs_device_remove(minor); 348 drm_sysfs_device_remove(minor);
343
344 idr_remove(&drm_minors_idr, minor->index); 349 idr_remove(&drm_minors_idr, minor->index);
345
346 kfree(minor);
347 *minor_p = NULL;
348 return 0;
349} 350}
350EXPORT_SYMBOL(drm_put_minor);
351 351
352static void drm_unplug_minor(struct drm_minor *minor) 352/**
353 * drm_put_minor - Destroy DRM minor
354 * @minor: Minor to destroy
355 *
356 * This calls drm_unplug_minor() on the given minor and then frees it. Nothing
357 * is done if @minor is NULL. It is fine to call this on already unplugged
358 * minors.
359 * The global DRM mutex must be held by the caller.
360 */
361static void drm_put_minor(struct drm_minor *minor)
353{ 362{
354 drm_sysfs_device_remove(minor); 363 if (!minor)
364 return;
365
366 DRM_DEBUG("release secondary minor %d\n", minor->index);
367
368 drm_unplug_minor(minor);
369 kfree(minor);
355} 370}
356 371
357/** 372/**
@@ -472,6 +487,10 @@ EXPORT_SYMBOL(drm_dev_alloc);
472 */ 487 */
473void drm_dev_free(struct drm_device *dev) 488void drm_dev_free(struct drm_device *dev)
474{ 489{
490 drm_put_minor(dev->control);
491 drm_put_minor(dev->render);
492 drm_put_minor(dev->primary);
493
475 if (dev->driver->driver_features & DRIVER_GEM) 494 if (dev->driver->driver_features & DRIVER_GEM)
476 drm_gem_destroy(dev); 495 drm_gem_destroy(dev);
477 496
@@ -547,13 +566,11 @@ err_unload:
547 if (dev->driver->unload) 566 if (dev->driver->unload)
548 dev->driver->unload(dev); 567 dev->driver->unload(dev);
549err_primary_node: 568err_primary_node:
550 drm_put_minor(&dev->primary); 569 drm_put_minor(dev->primary);
551err_render_node: 570err_render_node:
552 if (dev->render) 571 drm_put_minor(dev->render);
553 drm_put_minor(&dev->render);
554err_control_node: 572err_control_node:
555 if (dev->control) 573 drm_put_minor(dev->control);
556 drm_put_minor(&dev->control);
557err_agp: 574err_agp:
558 if (dev->driver->bus->agp_destroy) 575 if (dev->driver->bus->agp_destroy)
559 dev->driver->bus->agp_destroy(dev); 576 dev->driver->bus->agp_destroy(dev);
@@ -588,11 +605,9 @@ void drm_dev_unregister(struct drm_device *dev)
588 list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) 605 list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head)
589 drm_rmmap(dev, r_list->map); 606 drm_rmmap(dev, r_list->map);
590 607
591 if (dev->control) 608 drm_unplug_minor(dev->control);
592 drm_put_minor(&dev->control); 609 drm_unplug_minor(dev->render);
593 if (dev->render) 610 drm_unplug_minor(dev->primary);
594 drm_put_minor(&dev->render);
595 drm_put_minor(&dev->primary);
596 611
597 list_del(&dev->driver_item); 612 list_del(&dev->driver_item);
598} 613}
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index 2290b3b73832..1a35ea53106b 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -22,8 +22,8 @@
22#include <drm/drm_core.h> 22#include <drm/drm_core.h>
23#include <drm/drmP.h> 23#include <drm/drmP.h>
24 24
25#define to_drm_minor(d) container_of(d, struct drm_minor, kdev) 25#define to_drm_minor(d) dev_get_drvdata(d)
26#define to_drm_connector(d) container_of(d, struct drm_connector, kdev) 26#define to_drm_connector(d) dev_get_drvdata(d)
27 27
28static struct device_type drm_sysfs_device_minor = { 28static struct device_type drm_sysfs_device_minor = {
29 .name = "drm_minor" 29 .name = "drm_minor"
@@ -162,20 +162,6 @@ void drm_sysfs_destroy(void)
162 drm_class = NULL; 162 drm_class = NULL;
163} 163}
164 164
165/**
166 * drm_sysfs_device_release - do nothing
167 * @dev: Linux device
168 *
169 * Normally, this would free the DRM device associated with @dev, along
170 * with cleaning up any other stuff. But we do that in the DRM core, so
171 * this function can just return and hope that the core does its job.
172 */
173static void drm_sysfs_device_release(struct device *dev)
174{
175 memset(dev, 0, sizeof(struct device));
176 return;
177}
178
179/* 165/*
180 * Connector properties 166 * Connector properties
181 */ 167 */
@@ -380,11 +366,6 @@ static struct bin_attribute edid_attr = {
380 * properties (so far, connection status, dpms, mode list & edid) and 366 * properties (so far, connection status, dpms, mode list & edid) and
381 * generate a hotplug event so userspace knows there's a new connector 367 * generate a hotplug event so userspace knows there's a new connector
382 * available. 368 * available.
383 *
384 * Note:
385 * This routine should only be called *once* for each registered connector.
386 * A second call for an already registered connector will trigger the BUG_ON
387 * below.
388 */ 369 */
389int drm_sysfs_connector_add(struct drm_connector *connector) 370int drm_sysfs_connector_add(struct drm_connector *connector)
390{ 371{
@@ -394,29 +375,25 @@ int drm_sysfs_connector_add(struct drm_connector *connector)
394 int i; 375 int i;
395 int ret; 376 int ret;
396 377
397 /* We shouldn't get called more than once for the same connector */ 378 if (connector->kdev)
398 BUG_ON(device_is_registered(&connector->kdev)); 379 return 0;
399
400 connector->kdev.parent = &dev->primary->kdev;
401 connector->kdev.class = drm_class;
402 connector->kdev.release = drm_sysfs_device_release;
403 380
381 connector->kdev = device_create(drm_class, dev->primary->kdev,
382 0, connector, "card%d-%s",
383 dev->primary->index, drm_get_connector_name(connector));
404 DRM_DEBUG("adding \"%s\" to sysfs\n", 384 DRM_DEBUG("adding \"%s\" to sysfs\n",
405 drm_get_connector_name(connector)); 385 drm_get_connector_name(connector));
406 386
407 dev_set_name(&connector->kdev, "card%d-%s", 387 if (IS_ERR(connector->kdev)) {
408 dev->primary->index, drm_get_connector_name(connector)); 388 DRM_ERROR("failed to register connector device: %ld\n", PTR_ERR(connector->kdev));
409 ret = device_register(&connector->kdev); 389 ret = PTR_ERR(connector->kdev);
410
411 if (ret) {
412 DRM_ERROR("failed to register connector device: %d\n", ret);
413 goto out; 390 goto out;
414 } 391 }
415 392
416 /* Standard attributes */ 393 /* Standard attributes */
417 394
418 for (attr_cnt = 0; attr_cnt < ARRAY_SIZE(connector_attrs); attr_cnt++) { 395 for (attr_cnt = 0; attr_cnt < ARRAY_SIZE(connector_attrs); attr_cnt++) {
419 ret = device_create_file(&connector->kdev, &connector_attrs[attr_cnt]); 396 ret = device_create_file(connector->kdev, &connector_attrs[attr_cnt]);
420 if (ret) 397 if (ret)
421 goto err_out_files; 398 goto err_out_files;
422 } 399 }
@@ -433,7 +410,7 @@ int drm_sysfs_connector_add(struct drm_connector *connector)
433 case DRM_MODE_CONNECTOR_Component: 410 case DRM_MODE_CONNECTOR_Component:
434 case DRM_MODE_CONNECTOR_TV: 411 case DRM_MODE_CONNECTOR_TV:
435 for (opt_cnt = 0; opt_cnt < ARRAY_SIZE(connector_attrs_opt1); opt_cnt++) { 412 for (opt_cnt = 0; opt_cnt < ARRAY_SIZE(connector_attrs_opt1); opt_cnt++) {
436 ret = device_create_file(&connector->kdev, &connector_attrs_opt1[opt_cnt]); 413 ret = device_create_file(connector->kdev, &connector_attrs_opt1[opt_cnt]);
437 if (ret) 414 if (ret)
438 goto err_out_files; 415 goto err_out_files;
439 } 416 }
@@ -442,7 +419,7 @@ int drm_sysfs_connector_add(struct drm_connector *connector)
442 break; 419 break;
443 } 420 }
444 421
445 ret = sysfs_create_bin_file(&connector->kdev.kobj, &edid_attr); 422 ret = sysfs_create_bin_file(&connector->kdev->kobj, &edid_attr);
446 if (ret) 423 if (ret)
447 goto err_out_files; 424 goto err_out_files;
448 425
@@ -453,10 +430,10 @@ int drm_sysfs_connector_add(struct drm_connector *connector)
453 430
454err_out_files: 431err_out_files:
455 for (i = 0; i < opt_cnt; i++) 432 for (i = 0; i < opt_cnt; i++)
456 device_remove_file(&connector->kdev, &connector_attrs_opt1[i]); 433 device_remove_file(connector->kdev, &connector_attrs_opt1[i]);
457 for (i = 0; i < attr_cnt; i++) 434 for (i = 0; i < attr_cnt; i++)
458 device_remove_file(&connector->kdev, &connector_attrs[i]); 435 device_remove_file(connector->kdev, &connector_attrs[i]);
459 device_unregister(&connector->kdev); 436 device_unregister(connector->kdev);
460 437
461out: 438out:
462 return ret; 439 return ret;
@@ -480,16 +457,16 @@ void drm_sysfs_connector_remove(struct drm_connector *connector)
480{ 457{
481 int i; 458 int i;
482 459
483 if (!connector->kdev.parent) 460 if (!connector->kdev)
484 return; 461 return;
485 DRM_DEBUG("removing \"%s\" from sysfs\n", 462 DRM_DEBUG("removing \"%s\" from sysfs\n",
486 drm_get_connector_name(connector)); 463 drm_get_connector_name(connector));
487 464
488 for (i = 0; i < ARRAY_SIZE(connector_attrs); i++) 465 for (i = 0; i < ARRAY_SIZE(connector_attrs); i++)
489 device_remove_file(&connector->kdev, &connector_attrs[i]); 466 device_remove_file(connector->kdev, &connector_attrs[i]);
490 sysfs_remove_bin_file(&connector->kdev.kobj, &edid_attr); 467 sysfs_remove_bin_file(&connector->kdev->kobj, &edid_attr);
491 device_unregister(&connector->kdev); 468 device_unregister(connector->kdev);
492 connector->kdev.parent = NULL; 469 connector->kdev = NULL;
493} 470}
494EXPORT_SYMBOL(drm_sysfs_connector_remove); 471EXPORT_SYMBOL(drm_sysfs_connector_remove);
495 472
@@ -508,7 +485,7 @@ void drm_sysfs_hotplug_event(struct drm_device *dev)
508 485
509 DRM_DEBUG("generating hotplug event\n"); 486 DRM_DEBUG("generating hotplug event\n");
510 487
511 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, envp); 488 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, envp);
512} 489}
513EXPORT_SYMBOL(drm_sysfs_hotplug_event); 490EXPORT_SYMBOL(drm_sysfs_hotplug_event);
514 491
@@ -523,15 +500,8 @@ EXPORT_SYMBOL(drm_sysfs_hotplug_event);
523 */ 500 */
524int drm_sysfs_device_add(struct drm_minor *minor) 501int drm_sysfs_device_add(struct drm_minor *minor)
525{ 502{
526 int err;
527 char *minor_str; 503 char *minor_str;
528 504
529 minor->kdev.parent = minor->dev->dev;
530
531 minor->kdev.class = drm_class;
532 minor->kdev.release = drm_sysfs_device_release;
533 minor->kdev.devt = minor->device;
534 minor->kdev.type = &drm_sysfs_device_minor;
535 if (minor->type == DRM_MINOR_CONTROL) 505 if (minor->type == DRM_MINOR_CONTROL)
536 minor_str = "controlD%d"; 506 minor_str = "controlD%d";
537 else if (minor->type == DRM_MINOR_RENDER) 507 else if (minor->type == DRM_MINOR_RENDER)
@@ -539,18 +509,14 @@ int drm_sysfs_device_add(struct drm_minor *minor)
539 else 509 else
540 minor_str = "card%d"; 510 minor_str = "card%d";
541 511
542 dev_set_name(&minor->kdev, minor_str, minor->index); 512 minor->kdev = device_create(drm_class, minor->dev->dev,
543 513 MKDEV(DRM_MAJOR, minor->index),
544 err = device_register(&minor->kdev); 514 minor, minor_str, minor->index);
545 if (err) { 515 if (IS_ERR(minor->kdev)) {
546 DRM_ERROR("device add failed: %d\n", err); 516 DRM_ERROR("device create failed %ld\n", PTR_ERR(minor->kdev));
547 goto err_out; 517 return PTR_ERR(minor->kdev);
548 } 518 }
549
550 return 0; 519 return 0;
551
552err_out:
553 return err;
554} 520}
555 521
556/** 522/**
@@ -562,9 +528,9 @@ err_out:
562 */ 528 */
563void drm_sysfs_device_remove(struct drm_minor *minor) 529void drm_sysfs_device_remove(struct drm_minor *minor)
564{ 530{
565 if (minor->kdev.parent) 531 if (minor->kdev)
566 device_unregister(&minor->kdev); 532 device_destroy(drm_class, MKDEV(DRM_MAJOR, minor->index));
567 minor->kdev.parent = NULL; 533 minor->kdev = NULL;
568} 534}
569 535
570 536
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
index b5c5af7328df..93e95d7efd57 100644
--- a/drivers/gpu/drm/drm_vm.c
+++ b/drivers/gpu/drm/drm_vm.c
@@ -301,7 +301,7 @@ static int drm_do_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
301 301
302 offset = (unsigned long)vmf->virtual_address - vma->vm_start; /* vm_[pg]off[set] should be 0 */ 302 offset = (unsigned long)vmf->virtual_address - vma->vm_start; /* vm_[pg]off[set] should be 0 */
303 page_nr = offset >> PAGE_SHIFT; /* page_nr could just be vmf->pgoff */ 303 page_nr = offset >> PAGE_SHIFT; /* page_nr could just be vmf->pgoff */
304 page = virt_to_page((dma->pagelist[page_nr] + (offset & (~PAGE_MASK)))); 304 page = virt_to_page((void *)dma->pagelist[page_nr]);
305 305
306 get_page(page); 306 get_page(page);
307 vmf->page = page; 307 vmf->page = page;
diff --git a/drivers/gpu/drm/gma500/cdv_device.c b/drivers/gpu/drm/gma500/cdv_device.c
index 162f686c532d..5a9a6a3063a8 100644
--- a/drivers/gpu/drm/gma500/cdv_device.c
+++ b/drivers/gpu/drm/gma500/cdv_device.c
@@ -634,6 +634,7 @@ const struct psb_ops cdv_chip_ops = {
634 .crtcs = 2, 634 .crtcs = 2,
635 .hdmi_mask = (1 << 0) | (1 << 1), 635 .hdmi_mask = (1 << 0) | (1 << 1),
636 .lvds_mask = (1 << 1), 636 .lvds_mask = (1 << 1),
637 .sdvo_mask = (1 << 0),
637 .cursor_needs_phys = 0, 638 .cursor_needs_phys = 0,
638 .sgx_offset = MRST_SGX_OFFSET, 639 .sgx_offset = MRST_SGX_OFFSET,
639 .chip_setup = cdv_chip_setup, 640 .chip_setup = cdv_chip_setup,
diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c
index f4eb43573cad..f88a1815d87c 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_dp.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c
@@ -666,7 +666,7 @@ cdv_intel_dp_i2c_init(struct gma_connector *connector,
666 strncpy (intel_dp->adapter.name, name, sizeof(intel_dp->adapter.name) - 1); 666 strncpy (intel_dp->adapter.name, name, sizeof(intel_dp->adapter.name) - 1);
667 intel_dp->adapter.name[sizeof(intel_dp->adapter.name) - 1] = '\0'; 667 intel_dp->adapter.name[sizeof(intel_dp->adapter.name) - 1] = '\0';
668 intel_dp->adapter.algo_data = &intel_dp->algo; 668 intel_dp->adapter.algo_data = &intel_dp->algo;
669 intel_dp->adapter.dev.parent = &connector->base.kdev; 669 intel_dp->adapter.dev.parent = connector->base.kdev;
670 670
671 if (is_edp(encoder)) 671 if (is_edp(encoder))
672 cdv_intel_edp_panel_vdd_on(encoder); 672 cdv_intel_edp_panel_vdd_on(encoder);
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index 01dd7d225762..94b3fec22c28 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -714,7 +714,7 @@ static void psb_setup_outputs(struct drm_device *dev)
714 clone_mask = (1 << INTEL_OUTPUT_ANALOG); 714 clone_mask = (1 << INTEL_OUTPUT_ANALOG);
715 break; 715 break;
716 case INTEL_OUTPUT_SDVO: 716 case INTEL_OUTPUT_SDVO:
717 crtc_mask = ((1 << 0) | (1 << 1)); 717 crtc_mask = dev_priv->ops->sdvo_mask;
718 clone_mask = (1 << INTEL_OUTPUT_SDVO); 718 clone_mask = (1 << INTEL_OUTPUT_SDVO);
719 break; 719 break;
720 case INTEL_OUTPUT_LVDS: 720 case INTEL_OUTPUT_LVDS:
diff --git a/drivers/gpu/drm/gma500/intel_gmbus.c b/drivers/gpu/drm/gma500/intel_gmbus.c
index 62cd42e88f28..566d330aaeea 100644
--- a/drivers/gpu/drm/gma500/intel_gmbus.c
+++ b/drivers/gpu/drm/gma500/intel_gmbus.c
@@ -51,6 +51,9 @@
51#define wait_for(COND, MS) _wait_for(COND, MS, 1) 51#define wait_for(COND, MS) _wait_for(COND, MS, 1)
52#define wait_for_atomic(COND, MS) _wait_for(COND, MS, 0) 52#define wait_for_atomic(COND, MS) _wait_for(COND, MS, 0)
53 53
54#define GMBUS_REG_READ(reg) ioread32(dev_priv->gmbus_reg + (reg))
55#define GMBUS_REG_WRITE(reg, val) iowrite32((val), dev_priv->gmbus_reg + (reg))
56
54/* Intel GPIO access functions */ 57/* Intel GPIO access functions */
55 58
56#define I2C_RISEFALL_TIME 20 59#define I2C_RISEFALL_TIME 20
@@ -71,7 +74,8 @@ struct intel_gpio {
71void 74void
72gma_intel_i2c_reset(struct drm_device *dev) 75gma_intel_i2c_reset(struct drm_device *dev)
73{ 76{
74 REG_WRITE(GMBUS0, 0); 77 struct drm_psb_private *dev_priv = dev->dev_private;
78 GMBUS_REG_WRITE(GMBUS0, 0);
75} 79}
76 80
77static void intel_i2c_quirk_set(struct drm_psb_private *dev_priv, bool enable) 81static void intel_i2c_quirk_set(struct drm_psb_private *dev_priv, bool enable)
@@ -98,11 +102,10 @@ static void intel_i2c_quirk_set(struct drm_psb_private *dev_priv, bool enable)
98static u32 get_reserved(struct intel_gpio *gpio) 102static u32 get_reserved(struct intel_gpio *gpio)
99{ 103{
100 struct drm_psb_private *dev_priv = gpio->dev_priv; 104 struct drm_psb_private *dev_priv = gpio->dev_priv;
101 struct drm_device *dev = dev_priv->dev;
102 u32 reserved = 0; 105 u32 reserved = 0;
103 106
104 /* On most chips, these bits must be preserved in software. */ 107 /* On most chips, these bits must be preserved in software. */
105 reserved = REG_READ(gpio->reg) & 108 reserved = GMBUS_REG_READ(gpio->reg) &
106 (GPIO_DATA_PULLUP_DISABLE | 109 (GPIO_DATA_PULLUP_DISABLE |
107 GPIO_CLOCK_PULLUP_DISABLE); 110 GPIO_CLOCK_PULLUP_DISABLE);
108 111
@@ -113,29 +116,26 @@ static int get_clock(void *data)
113{ 116{
114 struct intel_gpio *gpio = data; 117 struct intel_gpio *gpio = data;
115 struct drm_psb_private *dev_priv = gpio->dev_priv; 118 struct drm_psb_private *dev_priv = gpio->dev_priv;
116 struct drm_device *dev = dev_priv->dev;
117 u32 reserved = get_reserved(gpio); 119 u32 reserved = get_reserved(gpio);
118 REG_WRITE(gpio->reg, reserved | GPIO_CLOCK_DIR_MASK); 120 GMBUS_REG_WRITE(gpio->reg, reserved | GPIO_CLOCK_DIR_MASK);
119 REG_WRITE(gpio->reg, reserved); 121 GMBUS_REG_WRITE(gpio->reg, reserved);
120 return (REG_READ(gpio->reg) & GPIO_CLOCK_VAL_IN) != 0; 122 return (GMBUS_REG_READ(gpio->reg) & GPIO_CLOCK_VAL_IN) != 0;
121} 123}
122 124
123static int get_data(void *data) 125static int get_data(void *data)
124{ 126{
125 struct intel_gpio *gpio = data; 127 struct intel_gpio *gpio = data;
126 struct drm_psb_private *dev_priv = gpio->dev_priv; 128 struct drm_psb_private *dev_priv = gpio->dev_priv;
127 struct drm_device *dev = dev_priv->dev;
128 u32 reserved = get_reserved(gpio); 129 u32 reserved = get_reserved(gpio);
129 REG_WRITE(gpio->reg, reserved | GPIO_DATA_DIR_MASK); 130 GMBUS_REG_WRITE(gpio->reg, reserved | GPIO_DATA_DIR_MASK);
130 REG_WRITE(gpio->reg, reserved); 131 GMBUS_REG_WRITE(gpio->reg, reserved);
131 return (REG_READ(gpio->reg) & GPIO_DATA_VAL_IN) != 0; 132 return (GMBUS_REG_READ(gpio->reg) & GPIO_DATA_VAL_IN) != 0;
132} 133}
133 134
134static void set_clock(void *data, int state_high) 135static void set_clock(void *data, int state_high)
135{ 136{
136 struct intel_gpio *gpio = data; 137 struct intel_gpio *gpio = data;
137 struct drm_psb_private *dev_priv = gpio->dev_priv; 138 struct drm_psb_private *dev_priv = gpio->dev_priv;
138 struct drm_device *dev = dev_priv->dev;
139 u32 reserved = get_reserved(gpio); 139 u32 reserved = get_reserved(gpio);
140 u32 clock_bits; 140 u32 clock_bits;
141 141
@@ -145,15 +145,14 @@ static void set_clock(void *data, int state_high)
145 clock_bits = GPIO_CLOCK_DIR_OUT | GPIO_CLOCK_DIR_MASK | 145 clock_bits = GPIO_CLOCK_DIR_OUT | GPIO_CLOCK_DIR_MASK |
146 GPIO_CLOCK_VAL_MASK; 146 GPIO_CLOCK_VAL_MASK;
147 147
148 REG_WRITE(gpio->reg, reserved | clock_bits); 148 GMBUS_REG_WRITE(gpio->reg, reserved | clock_bits);
149 REG_READ(gpio->reg); /* Posting */ 149 GMBUS_REG_READ(gpio->reg); /* Posting */
150} 150}
151 151
152static void set_data(void *data, int state_high) 152static void set_data(void *data, int state_high)
153{ 153{
154 struct intel_gpio *gpio = data; 154 struct intel_gpio *gpio = data;
155 struct drm_psb_private *dev_priv = gpio->dev_priv; 155 struct drm_psb_private *dev_priv = gpio->dev_priv;
156 struct drm_device *dev = dev_priv->dev;
157 u32 reserved = get_reserved(gpio); 156 u32 reserved = get_reserved(gpio);
158 u32 data_bits; 157 u32 data_bits;
159 158
@@ -163,8 +162,8 @@ static void set_data(void *data, int state_high)
163 data_bits = GPIO_DATA_DIR_OUT | GPIO_DATA_DIR_MASK | 162 data_bits = GPIO_DATA_DIR_OUT | GPIO_DATA_DIR_MASK |
164 GPIO_DATA_VAL_MASK; 163 GPIO_DATA_VAL_MASK;
165 164
166 REG_WRITE(gpio->reg, reserved | data_bits); 165 GMBUS_REG_WRITE(gpio->reg, reserved | data_bits);
167 REG_READ(gpio->reg); 166 GMBUS_REG_READ(gpio->reg);
168} 167}
169 168
170static struct i2c_adapter * 169static struct i2c_adapter *
@@ -251,7 +250,6 @@ gmbus_xfer(struct i2c_adapter *adapter,
251 struct intel_gmbus, 250 struct intel_gmbus,
252 adapter); 251 adapter);
253 struct drm_psb_private *dev_priv = adapter->algo_data; 252 struct drm_psb_private *dev_priv = adapter->algo_data;
254 struct drm_device *dev = dev_priv->dev;
255 int i, reg_offset; 253 int i, reg_offset;
256 254
257 if (bus->force_bit) 255 if (bus->force_bit)
@@ -260,28 +258,30 @@ gmbus_xfer(struct i2c_adapter *adapter,
260 258
261 reg_offset = 0; 259 reg_offset = 0;
262 260
263 REG_WRITE(GMBUS0 + reg_offset, bus->reg0); 261 GMBUS_REG_WRITE(GMBUS0 + reg_offset, bus->reg0);
264 262
265 for (i = 0; i < num; i++) { 263 for (i = 0; i < num; i++) {
266 u16 len = msgs[i].len; 264 u16 len = msgs[i].len;
267 u8 *buf = msgs[i].buf; 265 u8 *buf = msgs[i].buf;
268 266
269 if (msgs[i].flags & I2C_M_RD) { 267 if (msgs[i].flags & I2C_M_RD) {
270 REG_WRITE(GMBUS1 + reg_offset, 268 GMBUS_REG_WRITE(GMBUS1 + reg_offset,
271 GMBUS_CYCLE_WAIT | (i + 1 == num ? GMBUS_CYCLE_STOP : 0) | 269 GMBUS_CYCLE_WAIT |
272 (len << GMBUS_BYTE_COUNT_SHIFT) | 270 (i + 1 == num ? GMBUS_CYCLE_STOP : 0) |
273 (msgs[i].addr << GMBUS_SLAVE_ADDR_SHIFT) | 271 (len << GMBUS_BYTE_COUNT_SHIFT) |
274 GMBUS_SLAVE_READ | GMBUS_SW_RDY); 272 (msgs[i].addr << GMBUS_SLAVE_ADDR_SHIFT) |
275 REG_READ(GMBUS2+reg_offset); 273 GMBUS_SLAVE_READ | GMBUS_SW_RDY);
274 GMBUS_REG_READ(GMBUS2+reg_offset);
276 do { 275 do {
277 u32 val, loop = 0; 276 u32 val, loop = 0;
278 277
279 if (wait_for(REG_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_RDY), 50)) 278 if (wait_for(GMBUS_REG_READ(GMBUS2 + reg_offset) &
279 (GMBUS_SATOER | GMBUS_HW_RDY), 50))
280 goto timeout; 280 goto timeout;
281 if (REG_READ(GMBUS2 + reg_offset) & GMBUS_SATOER) 281 if (GMBUS_REG_READ(GMBUS2 + reg_offset) & GMBUS_SATOER)
282 goto clear_err; 282 goto clear_err;
283 283
284 val = REG_READ(GMBUS3 + reg_offset); 284 val = GMBUS_REG_READ(GMBUS3 + reg_offset);
285 do { 285 do {
286 *buf++ = val & 0xff; 286 *buf++ = val & 0xff;
287 val >>= 8; 287 val >>= 8;
@@ -295,18 +295,20 @@ gmbus_xfer(struct i2c_adapter *adapter,
295 val |= *buf++ << (8 * loop); 295 val |= *buf++ << (8 * loop);
296 } while (--len && ++loop < 4); 296 } while (--len && ++loop < 4);
297 297
298 REG_WRITE(GMBUS3 + reg_offset, val); 298 GMBUS_REG_WRITE(GMBUS3 + reg_offset, val);
299 REG_WRITE(GMBUS1 + reg_offset, 299 GMBUS_REG_WRITE(GMBUS1 + reg_offset,
300 (i + 1 == num ? GMBUS_CYCLE_STOP : GMBUS_CYCLE_WAIT) | 300 (i + 1 == num ? GMBUS_CYCLE_STOP : GMBUS_CYCLE_WAIT) |
301 (msgs[i].len << GMBUS_BYTE_COUNT_SHIFT) | 301 (msgs[i].len << GMBUS_BYTE_COUNT_SHIFT) |
302 (msgs[i].addr << GMBUS_SLAVE_ADDR_SHIFT) | 302 (msgs[i].addr << GMBUS_SLAVE_ADDR_SHIFT) |
303 GMBUS_SLAVE_WRITE | GMBUS_SW_RDY); 303 GMBUS_SLAVE_WRITE | GMBUS_SW_RDY);
304 REG_READ(GMBUS2+reg_offset); 304 GMBUS_REG_READ(GMBUS2+reg_offset);
305 305
306 while (len) { 306 while (len) {
307 if (wait_for(REG_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_RDY), 50)) 307 if (wait_for(GMBUS_REG_READ(GMBUS2 + reg_offset) &
308 (GMBUS_SATOER | GMBUS_HW_RDY), 50))
308 goto timeout; 309 goto timeout;
309 if (REG_READ(GMBUS2 + reg_offset) & GMBUS_SATOER) 310 if (GMBUS_REG_READ(GMBUS2 + reg_offset) &
311 GMBUS_SATOER)
310 goto clear_err; 312 goto clear_err;
311 313
312 val = loop = 0; 314 val = loop = 0;
@@ -314,14 +316,14 @@ gmbus_xfer(struct i2c_adapter *adapter,
314 val |= *buf++ << (8 * loop); 316 val |= *buf++ << (8 * loop);
315 } while (--len && ++loop < 4); 317 } while (--len && ++loop < 4);
316 318
317 REG_WRITE(GMBUS3 + reg_offset, val); 319 GMBUS_REG_WRITE(GMBUS3 + reg_offset, val);
318 REG_READ(GMBUS2+reg_offset); 320 GMBUS_REG_READ(GMBUS2+reg_offset);
319 } 321 }
320 } 322 }
321 323
322 if (i + 1 < num && wait_for(REG_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_WAIT_PHASE), 50)) 324 if (i + 1 < num && wait_for(GMBUS_REG_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_WAIT_PHASE), 50))
323 goto timeout; 325 goto timeout;
324 if (REG_READ(GMBUS2 + reg_offset) & GMBUS_SATOER) 326 if (GMBUS_REG_READ(GMBUS2 + reg_offset) & GMBUS_SATOER)
325 goto clear_err; 327 goto clear_err;
326 } 328 }
327 329
@@ -332,20 +334,20 @@ clear_err:
332 * of resetting the GMBUS controller and so clearing the 334 * of resetting the GMBUS controller and so clearing the
333 * BUS_ERROR raised by the slave's NAK. 335 * BUS_ERROR raised by the slave's NAK.
334 */ 336 */
335 REG_WRITE(GMBUS1 + reg_offset, GMBUS_SW_CLR_INT); 337 GMBUS_REG_WRITE(GMBUS1 + reg_offset, GMBUS_SW_CLR_INT);
336 REG_WRITE(GMBUS1 + reg_offset, 0); 338 GMBUS_REG_WRITE(GMBUS1 + reg_offset, 0);
337 339
338done: 340done:
339 /* Mark the GMBUS interface as disabled. We will re-enable it at the 341 /* Mark the GMBUS interface as disabled. We will re-enable it at the
340 * start of the next xfer, till then let it sleep. 342 * start of the next xfer, till then let it sleep.
341 */ 343 */
342 REG_WRITE(GMBUS0 + reg_offset, 0); 344 GMBUS_REG_WRITE(GMBUS0 + reg_offset, 0);
343 return i; 345 return i;
344 346
345timeout: 347timeout:
346 DRM_INFO("GMBUS timed out, falling back to bit banging on pin %d [%s]\n", 348 DRM_INFO("GMBUS timed out, falling back to bit banging on pin %d [%s]\n",
347 bus->reg0 & 0xff, bus->adapter.name); 349 bus->reg0 & 0xff, bus->adapter.name);
348 REG_WRITE(GMBUS0 + reg_offset, 0); 350 GMBUS_REG_WRITE(GMBUS0 + reg_offset, 0);
349 351
350 /* Hardware may not support GMBUS over these pins? Try GPIO bitbanging instead. */ 352 /* Hardware may not support GMBUS over these pins? Try GPIO bitbanging instead. */
351 bus->force_bit = intel_gpio_create(dev_priv, bus->reg0 & 0xff); 353 bus->force_bit = intel_gpio_create(dev_priv, bus->reg0 & 0xff);
@@ -399,6 +401,11 @@ int gma_intel_setup_gmbus(struct drm_device *dev)
399 if (dev_priv->gmbus == NULL) 401 if (dev_priv->gmbus == NULL)
400 return -ENOMEM; 402 return -ENOMEM;
401 403
404 if (IS_MRST(dev))
405 dev_priv->gmbus_reg = dev_priv->aux_reg;
406 else
407 dev_priv->gmbus_reg = dev_priv->vdc_reg;
408
402 for (i = 0; i < GMBUS_NUM_PORTS; i++) { 409 for (i = 0; i < GMBUS_NUM_PORTS; i++) {
403 struct intel_gmbus *bus = &dev_priv->gmbus[i]; 410 struct intel_gmbus *bus = &dev_priv->gmbus[i];
404 411
@@ -487,6 +494,7 @@ void gma_intel_teardown_gmbus(struct drm_device *dev)
487 i2c_del_adapter(&bus->adapter); 494 i2c_del_adapter(&bus->adapter);
488 } 495 }
489 496
497 dev_priv->gmbus_reg = NULL; /* iounmap is done in driver_unload */
490 kfree(dev_priv->gmbus); 498 kfree(dev_priv->gmbus);
491 dev_priv->gmbus = NULL; 499 dev_priv->gmbus = NULL;
492} 500}
diff --git a/drivers/gpu/drm/gma500/oaktrail_crtc.c b/drivers/gpu/drm/gma500/oaktrail_crtc.c
index 54c98962b73e..8195e8592107 100644
--- a/drivers/gpu/drm/gma500/oaktrail_crtc.c
+++ b/drivers/gpu/drm/gma500/oaktrail_crtc.c
@@ -26,24 +26,10 @@
26#include "gma_display.h" 26#include "gma_display.h"
27#include "power.h" 27#include "power.h"
28 28
29struct psb_intel_range_t { 29#define MRST_LIMIT_LVDS_100L 0
30 int min, max; 30#define MRST_LIMIT_LVDS_83 1
31}; 31#define MRST_LIMIT_LVDS_100 2
32 32#define MRST_LIMIT_SDVO 3
33struct oaktrail_limit_t {
34 struct psb_intel_range_t dot, m, p1;
35};
36
37struct oaktrail_clock_t {
38 /* derived values */
39 int dot;
40 int m;
41 int p1;
42};
43
44#define MRST_LIMIT_LVDS_100L 0
45#define MRST_LIMIT_LVDS_83 1
46#define MRST_LIMIT_LVDS_100 2
47 33
48#define MRST_DOT_MIN 19750 34#define MRST_DOT_MIN 19750
49#define MRST_DOT_MAX 120000 35#define MRST_DOT_MAX 120000
@@ -57,21 +43,40 @@ struct oaktrail_clock_t {
57#define MRST_P1_MAX_0 7 43#define MRST_P1_MAX_0 7
58#define MRST_P1_MAX_1 8 44#define MRST_P1_MAX_1 8
59 45
60static const struct oaktrail_limit_t oaktrail_limits[] = { 46static bool mrst_lvds_find_best_pll(const struct gma_limit_t *limit,
47 struct drm_crtc *crtc, int target,
48 int refclk, struct gma_clock_t *best_clock);
49
50static bool mrst_sdvo_find_best_pll(const struct gma_limit_t *limit,
51 struct drm_crtc *crtc, int target,
52 int refclk, struct gma_clock_t *best_clock);
53
54static const struct gma_limit_t mrst_limits[] = {
61 { /* MRST_LIMIT_LVDS_100L */ 55 { /* MRST_LIMIT_LVDS_100L */
62 .dot = {.min = MRST_DOT_MIN, .max = MRST_DOT_MAX}, 56 .dot = {.min = MRST_DOT_MIN, .max = MRST_DOT_MAX},
63 .m = {.min = MRST_M_MIN_100L, .max = MRST_M_MAX_100L}, 57 .m = {.min = MRST_M_MIN_100L, .max = MRST_M_MAX_100L},
64 .p1 = {.min = MRST_P1_MIN, .max = MRST_P1_MAX_1}, 58 .p1 = {.min = MRST_P1_MIN, .max = MRST_P1_MAX_1},
59 .find_pll = mrst_lvds_find_best_pll,
65 }, 60 },
66 { /* MRST_LIMIT_LVDS_83L */ 61 { /* MRST_LIMIT_LVDS_83L */
67 .dot = {.min = MRST_DOT_MIN, .max = MRST_DOT_MAX}, 62 .dot = {.min = MRST_DOT_MIN, .max = MRST_DOT_MAX},
68 .m = {.min = MRST_M_MIN_83, .max = MRST_M_MAX_83}, 63 .m = {.min = MRST_M_MIN_83, .max = MRST_M_MAX_83},
69 .p1 = {.min = MRST_P1_MIN, .max = MRST_P1_MAX_0}, 64 .p1 = {.min = MRST_P1_MIN, .max = MRST_P1_MAX_0},
65 .find_pll = mrst_lvds_find_best_pll,
70 }, 66 },
71 { /* MRST_LIMIT_LVDS_100 */ 67 { /* MRST_LIMIT_LVDS_100 */
72 .dot = {.min = MRST_DOT_MIN, .max = MRST_DOT_MAX}, 68 .dot = {.min = MRST_DOT_MIN, .max = MRST_DOT_MAX},
73 .m = {.min = MRST_M_MIN_100, .max = MRST_M_MAX_100}, 69 .m = {.min = MRST_M_MIN_100, .max = MRST_M_MAX_100},
74 .p1 = {.min = MRST_P1_MIN, .max = MRST_P1_MAX_1}, 70 .p1 = {.min = MRST_P1_MIN, .max = MRST_P1_MAX_1},
71 .find_pll = mrst_lvds_find_best_pll,
72 },
73 { /* MRST_LIMIT_SDVO */
74 .vco = {.min = 1400000, .max = 2800000},
75 .n = {.min = 3, .max = 7},
76 .m = {.min = 80, .max = 137},
77 .p1 = {.min = 1, .max = 2},
78 .p2 = {.dot_limit = 200000, .p2_slow = 10, .p2_fast = 10},
79 .find_pll = mrst_sdvo_find_best_pll,
75 }, 80 },
76}; 81};
77 82
@@ -82,9 +87,10 @@ static const u32 oaktrail_m_converts[] = {
82 0x12, 0x09, 0x24, 0x32, 0x39, 0x1c, 87 0x12, 0x09, 0x24, 0x32, 0x39, 0x1c,
83}; 88};
84 89
85static const struct oaktrail_limit_t *oaktrail_limit(struct drm_crtc *crtc) 90static const struct gma_limit_t *mrst_limit(struct drm_crtc *crtc,
91 int refclk)
86{ 92{
87 const struct oaktrail_limit_t *limit = NULL; 93 const struct gma_limit_t *limit = NULL;
88 struct drm_device *dev = crtc->dev; 94 struct drm_device *dev = crtc->dev;
89 struct drm_psb_private *dev_priv = dev->dev_private; 95 struct drm_psb_private *dev_priv = dev->dev_private;
90 96
@@ -92,45 +98,100 @@ static const struct oaktrail_limit_t *oaktrail_limit(struct drm_crtc *crtc)
92 || gma_pipe_has_type(crtc, INTEL_OUTPUT_MIPI)) { 98 || gma_pipe_has_type(crtc, INTEL_OUTPUT_MIPI)) {
93 switch (dev_priv->core_freq) { 99 switch (dev_priv->core_freq) {
94 case 100: 100 case 100:
95 limit = &oaktrail_limits[MRST_LIMIT_LVDS_100L]; 101 limit = &mrst_limits[MRST_LIMIT_LVDS_100L];
96 break; 102 break;
97 case 166: 103 case 166:
98 limit = &oaktrail_limits[MRST_LIMIT_LVDS_83]; 104 limit = &mrst_limits[MRST_LIMIT_LVDS_83];
99 break; 105 break;
100 case 200: 106 case 200:
101 limit = &oaktrail_limits[MRST_LIMIT_LVDS_100]; 107 limit = &mrst_limits[MRST_LIMIT_LVDS_100];
102 break; 108 break;
103 } 109 }
110 } else if (gma_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) {
111 limit = &mrst_limits[MRST_LIMIT_SDVO];
104 } else { 112 } else {
105 limit = NULL; 113 limit = NULL;
106 dev_err(dev->dev, "oaktrail_limit Wrong display type.\n"); 114 dev_err(dev->dev, "mrst_limit Wrong display type.\n");
107 } 115 }
108 116
109 return limit; 117 return limit;
110} 118}
111 119
112/** Derive the pixel clock for the given refclk and divisors for 8xx chips. */ 120/** Derive the pixel clock for the given refclk and divisors for 8xx chips. */
113static void oaktrail_clock(int refclk, struct oaktrail_clock_t *clock) 121static void mrst_lvds_clock(int refclk, struct gma_clock_t *clock)
114{ 122{
115 clock->dot = (refclk * clock->m) / (14 * clock->p1); 123 clock->dot = (refclk * clock->m) / (14 * clock->p1);
116} 124}
117 125
118static void mrstPrintPll(char *prefix, struct oaktrail_clock_t *clock) 126static void mrst_print_pll(struct gma_clock_t *clock)
119{ 127{
120 pr_debug("%s: dotclock = %d, m = %d, p1 = %d.\n", 128 DRM_DEBUG_DRIVER("dotclock=%d, m=%d, m1=%d, m2=%d, n=%d, p1=%d, p2=%d\n",
121 prefix, clock->dot, clock->m, clock->p1); 129 clock->dot, clock->m, clock->m1, clock->m2, clock->n,
130 clock->p1, clock->p2);
131}
132
133static bool mrst_sdvo_find_best_pll(const struct gma_limit_t *limit,
134 struct drm_crtc *crtc, int target,
135 int refclk, struct gma_clock_t *best_clock)
136{
137 struct gma_clock_t clock;
138 u32 target_vco, actual_freq;
139 s32 freq_error, min_error = 100000;
140
141 memset(best_clock, 0, sizeof(*best_clock));
142
143 for (clock.m = limit->m.min; clock.m <= limit->m.max; clock.m++) {
144 for (clock.n = limit->n.min; clock.n <= limit->n.max;
145 clock.n++) {
146 for (clock.p1 = limit->p1.min;
147 clock.p1 <= limit->p1.max; clock.p1++) {
148 /* p2 value always stored in p2_slow on SDVO */
149 clock.p = clock.p1 * limit->p2.p2_slow;
150 target_vco = target * clock.p;
151
152 /* VCO will increase at this point so break */
153 if (target_vco > limit->vco.max)
154 break;
155
156 if (target_vco < limit->vco.min)
157 continue;
158
159 actual_freq = (refclk * clock.m) /
160 (clock.n * clock.p);
161 freq_error = 10000 -
162 ((target * 10000) / actual_freq);
163
164 if (freq_error < -min_error) {
165 /* freq_error will start to decrease at
166 this point so break */
167 break;
168 }
169
170 if (freq_error < 0)
171 freq_error = -freq_error;
172
173 if (freq_error < min_error) {
174 min_error = freq_error;
175 *best_clock = clock;
176 }
177 }
178 }
179 if (min_error == 0)
180 break;
181 }
182
183 return min_error == 0;
122} 184}
123 185
124/** 186/**
125 * Returns a set of divisors for the desired target clock with the given refclk, 187 * Returns a set of divisors for the desired target clock with the given refclk,
126 * or FALSE. Divisor values are the actual divisors for 188 * or FALSE. Divisor values are the actual divisors for
127 */ 189 */
128static bool 190static bool mrst_lvds_find_best_pll(const struct gma_limit_t *limit,
129mrstFindBestPLL(struct drm_crtc *crtc, int target, int refclk, 191 struct drm_crtc *crtc, int target,
130 struct oaktrail_clock_t *best_clock) 192 int refclk, struct gma_clock_t *best_clock)
131{ 193{
132 struct oaktrail_clock_t clock; 194 struct gma_clock_t clock;
133 const struct oaktrail_limit_t *limit = oaktrail_limit(crtc);
134 int err = target; 195 int err = target;
135 196
136 memset(best_clock, 0, sizeof(*best_clock)); 197 memset(best_clock, 0, sizeof(*best_clock));
@@ -140,7 +201,7 @@ mrstFindBestPLL(struct drm_crtc *crtc, int target, int refclk,
140 clock.p1++) { 201 clock.p1++) {
141 int this_err; 202 int this_err;
142 203
143 oaktrail_clock(refclk, &clock); 204 mrst_lvds_clock(refclk, &clock);
144 205
145 this_err = abs(clock.dot - target); 206 this_err = abs(clock.dot - target);
146 if (this_err < err) { 207 if (this_err < err) {
@@ -149,7 +210,6 @@ mrstFindBestPLL(struct drm_crtc *crtc, int target, int refclk,
149 } 210 }
150 } 211 }
151 } 212 }
152 dev_dbg(crtc->dev->dev, "mrstFindBestPLL err = %d.\n", err);
153 return err != target; 213 return err != target;
154} 214}
155 215
@@ -167,8 +227,10 @@ static void oaktrail_crtc_dpms(struct drm_crtc *crtc, int mode)
167 int pipe = gma_crtc->pipe; 227 int pipe = gma_crtc->pipe;
168 const struct psb_offset *map = &dev_priv->regmap[pipe]; 228 const struct psb_offset *map = &dev_priv->regmap[pipe];
169 u32 temp; 229 u32 temp;
230 int i;
231 int need_aux = gma_pipe_has_type(crtc, INTEL_OUTPUT_SDVO) ? 1 : 0;
170 232
171 if (pipe == 1) { 233 if (gma_pipe_has_type(crtc, INTEL_OUTPUT_HDMI)) {
172 oaktrail_crtc_hdmi_dpms(crtc, mode); 234 oaktrail_crtc_hdmi_dpms(crtc, mode);
173 return; 235 return;
174 } 236 }
@@ -183,35 +245,45 @@ static void oaktrail_crtc_dpms(struct drm_crtc *crtc, int mode)
183 case DRM_MODE_DPMS_ON: 245 case DRM_MODE_DPMS_ON:
184 case DRM_MODE_DPMS_STANDBY: 246 case DRM_MODE_DPMS_STANDBY:
185 case DRM_MODE_DPMS_SUSPEND: 247 case DRM_MODE_DPMS_SUSPEND:
186 /* Enable the DPLL */ 248 for (i = 0; i <= need_aux; i++) {
187 temp = REG_READ(map->dpll); 249 /* Enable the DPLL */
188 if ((temp & DPLL_VCO_ENABLE) == 0) { 250 temp = REG_READ_WITH_AUX(map->dpll, i);
189 REG_WRITE(map->dpll, temp); 251 if ((temp & DPLL_VCO_ENABLE) == 0) {
190 REG_READ(map->dpll); 252 REG_WRITE_WITH_AUX(map->dpll, temp, i);
191 /* Wait for the clocks to stabilize. */ 253 REG_READ_WITH_AUX(map->dpll, i);
192 udelay(150); 254 /* Wait for the clocks to stabilize. */
193 REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE); 255 udelay(150);
194 REG_READ(map->dpll); 256 REG_WRITE_WITH_AUX(map->dpll,
195 /* Wait for the clocks to stabilize. */ 257 temp | DPLL_VCO_ENABLE, i);
196 udelay(150); 258 REG_READ_WITH_AUX(map->dpll, i);
197 REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE); 259 /* Wait for the clocks to stabilize. */
198 REG_READ(map->dpll); 260 udelay(150);
199 /* Wait for the clocks to stabilize. */ 261 REG_WRITE_WITH_AUX(map->dpll,
200 udelay(150); 262 temp | DPLL_VCO_ENABLE, i);
201 } 263 REG_READ_WITH_AUX(map->dpll, i);
202 /* Enable the pipe */ 264 /* Wait for the clocks to stabilize. */
203 temp = REG_READ(map->conf); 265 udelay(150);
204 if ((temp & PIPEACONF_ENABLE) == 0) 266 }
205 REG_WRITE(map->conf, temp | PIPEACONF_ENABLE); 267
206 /* Enable the plane */ 268 /* Enable the pipe */
207 temp = REG_READ(map->cntr); 269 temp = REG_READ_WITH_AUX(map->conf, i);
208 if ((temp & DISPLAY_PLANE_ENABLE) == 0) { 270 if ((temp & PIPEACONF_ENABLE) == 0) {
209 REG_WRITE(map->cntr, 271 REG_WRITE_WITH_AUX(map->conf,
210 temp | DISPLAY_PLANE_ENABLE); 272 temp | PIPEACONF_ENABLE, i);
211 /* Flush the plane changes */ 273 }
212 REG_WRITE(map->base, REG_READ(map->base)); 274
213 } 275 /* Enable the plane */
276 temp = REG_READ_WITH_AUX(map->cntr, i);
277 if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
278 REG_WRITE_WITH_AUX(map->cntr,
279 temp | DISPLAY_PLANE_ENABLE,
280 i);
281 /* Flush the plane changes */
282 REG_WRITE_WITH_AUX(map->base,
283 REG_READ_WITH_AUX(map->base, i), i);
284 }
214 285
286 }
215 gma_crtc_load_lut(crtc); 287 gma_crtc_load_lut(crtc);
216 288
217 /* Give the overlay scaler a chance to enable 289 /* Give the overlay scaler a chance to enable
@@ -223,48 +295,52 @@ static void oaktrail_crtc_dpms(struct drm_crtc *crtc, int mode)
223 * if it's on this pipe */ 295 * if it's on this pipe */
224 /* psb_intel_crtc_dpms_video(crtc, FALSE); TODO */ 296 /* psb_intel_crtc_dpms_video(crtc, FALSE); TODO */
225 297
226 /* Disable the VGA plane that we never use */ 298 for (i = 0; i <= need_aux; i++) {
227 REG_WRITE(VGACNTRL, VGA_DISP_DISABLE); 299 /* Disable the VGA plane that we never use */
228 /* Disable display plane */ 300 REG_WRITE_WITH_AUX(VGACNTRL, VGA_DISP_DISABLE, i);
229 temp = REG_READ(map->cntr); 301 /* Disable display plane */
230 if ((temp & DISPLAY_PLANE_ENABLE) != 0) { 302 temp = REG_READ_WITH_AUX(map->cntr, i);
231 REG_WRITE(map->cntr, 303 if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
232 temp & ~DISPLAY_PLANE_ENABLE); 304 REG_WRITE_WITH_AUX(map->cntr,
233 /* Flush the plane changes */ 305 temp & ~DISPLAY_PLANE_ENABLE, i);
234 REG_WRITE(map->base, REG_READ(map->base)); 306 /* Flush the plane changes */
235 REG_READ(map->base); 307 REG_WRITE_WITH_AUX(map->base,
236 } 308 REG_READ(map->base), i);
309 REG_READ_WITH_AUX(map->base, i);
310 }
237 311
238 /* Next, disable display pipes */ 312 /* Next, disable display pipes */
239 temp = REG_READ(map->conf); 313 temp = REG_READ_WITH_AUX(map->conf, i);
240 if ((temp & PIPEACONF_ENABLE) != 0) { 314 if ((temp & PIPEACONF_ENABLE) != 0) {
241 REG_WRITE(map->conf, temp & ~PIPEACONF_ENABLE); 315 REG_WRITE_WITH_AUX(map->conf,
242 REG_READ(map->conf); 316 temp & ~PIPEACONF_ENABLE, i);
243 } 317 REG_READ_WITH_AUX(map->conf, i);
244 /* Wait for for the pipe disable to take effect. */ 318 }
245 gma_wait_for_vblank(dev); 319 /* Wait for for the pipe disable to take effect. */
320 gma_wait_for_vblank(dev);
321
322 temp = REG_READ_WITH_AUX(map->dpll, i);
323 if ((temp & DPLL_VCO_ENABLE) != 0) {
324 REG_WRITE_WITH_AUX(map->dpll,
325 temp & ~DPLL_VCO_ENABLE, i);
326 REG_READ_WITH_AUX(map->dpll, i);
327 }
246 328
247 temp = REG_READ(map->dpll); 329 /* Wait for the clocks to turn off. */
248 if ((temp & DPLL_VCO_ENABLE) != 0) { 330 udelay(150);
249 REG_WRITE(map->dpll, temp & ~DPLL_VCO_ENABLE);
250 REG_READ(map->dpll);
251 } 331 }
252
253 /* Wait for the clocks to turn off. */
254 udelay(150);
255 break; 332 break;
256 } 333 }
257 334
258 /*Set FIFO Watermarks*/ 335 /* Set FIFO Watermarks (values taken from EMGD) */
259 REG_WRITE(DSPARB, 0x3FFF); 336 REG_WRITE(DSPARB, 0x3f80);
260 REG_WRITE(DSPFW1, 0x3F88080A); 337 REG_WRITE(DSPFW1, 0x3f8f0404);
261 REG_WRITE(DSPFW2, 0x0b060808); 338 REG_WRITE(DSPFW2, 0x04040f04);
262 REG_WRITE(DSPFW3, 0x0); 339 REG_WRITE(DSPFW3, 0x0);
263 REG_WRITE(DSPFW4, 0x08030404); 340 REG_WRITE(DSPFW4, 0x04040404);
264 REG_WRITE(DSPFW5, 0x04040404); 341 REG_WRITE(DSPFW5, 0x04040404);
265 REG_WRITE(DSPFW6, 0x78); 342 REG_WRITE(DSPFW6, 0x78);
266 REG_WRITE(0x70400, REG_READ(0x70400) | 0x4000); 343 REG_WRITE(DSPCHICKENBIT, REG_READ(DSPCHICKENBIT) | 0xc040);
267 /* Must write Bit 14 of the Chicken Bit Register */
268 344
269 gma_power_end(dev); 345 gma_power_end(dev);
270} 346}
@@ -297,7 +373,8 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
297 int pipe = gma_crtc->pipe; 373 int pipe = gma_crtc->pipe;
298 const struct psb_offset *map = &dev_priv->regmap[pipe]; 374 const struct psb_offset *map = &dev_priv->regmap[pipe];
299 int refclk = 0; 375 int refclk = 0;
300 struct oaktrail_clock_t clock; 376 struct gma_clock_t clock;
377 const struct gma_limit_t *limit;
301 u32 dpll = 0, fp = 0, dspcntr, pipeconf; 378 u32 dpll = 0, fp = 0, dspcntr, pipeconf;
302 bool ok, is_sdvo = false; 379 bool ok, is_sdvo = false;
303 bool is_lvds = false; 380 bool is_lvds = false;
@@ -306,8 +383,10 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
306 struct gma_encoder *gma_encoder = NULL; 383 struct gma_encoder *gma_encoder = NULL;
307 uint64_t scalingType = DRM_MODE_SCALE_FULLSCREEN; 384 uint64_t scalingType = DRM_MODE_SCALE_FULLSCREEN;
308 struct drm_connector *connector; 385 struct drm_connector *connector;
386 int i;
387 int need_aux = gma_pipe_has_type(crtc, INTEL_OUTPUT_SDVO) ? 1 : 0;
309 388
310 if (pipe == 1) 389 if (gma_pipe_has_type(crtc, INTEL_OUTPUT_HDMI))
311 return oaktrail_crtc_hdmi_mode_set(crtc, mode, adjusted_mode, x, y, old_fb); 390 return oaktrail_crtc_hdmi_mode_set(crtc, mode, adjusted_mode, x, y, old_fb);
312 391
313 if (!gma_power_begin(dev, true)) 392 if (!gma_power_begin(dev, true))
@@ -340,15 +419,17 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
340 } 419 }
341 420
342 /* Disable the VGA plane that we never use */ 421 /* Disable the VGA plane that we never use */
343 REG_WRITE(VGACNTRL, VGA_DISP_DISABLE); 422 for (i = 0; i <= need_aux; i++)
423 REG_WRITE_WITH_AUX(VGACNTRL, VGA_DISP_DISABLE, i);
344 424
345 /* Disable the panel fitter if it was on our pipe */ 425 /* Disable the panel fitter if it was on our pipe */
346 if (oaktrail_panel_fitter_pipe(dev) == pipe) 426 if (oaktrail_panel_fitter_pipe(dev) == pipe)
347 REG_WRITE(PFIT_CONTROL, 0); 427 REG_WRITE(PFIT_CONTROL, 0);
348 428
349 REG_WRITE(map->src, 429 for (i = 0; i <= need_aux; i++) {
350 ((mode->crtc_hdisplay - 1) << 16) | 430 REG_WRITE_WITH_AUX(map->src, ((mode->crtc_hdisplay - 1) << 16) |
351 (mode->crtc_vdisplay - 1)); 431 (mode->crtc_vdisplay - 1), i);
432 }
352 433
353 if (gma_encoder) 434 if (gma_encoder)
354 drm_object_property_get_value(&connector->base, 435 drm_object_property_get_value(&connector->base,
@@ -365,35 +446,39 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
365 offsetY = (adjusted_mode->crtc_vdisplay - 446 offsetY = (adjusted_mode->crtc_vdisplay -
366 mode->crtc_vdisplay) / 2; 447 mode->crtc_vdisplay) / 2;
367 448
368 REG_WRITE(map->htotal, (mode->crtc_hdisplay - 1) | 449 for (i = 0; i <= need_aux; i++) {
369 ((adjusted_mode->crtc_htotal - 1) << 16)); 450 REG_WRITE_WITH_AUX(map->htotal, (mode->crtc_hdisplay - 1) |
370 REG_WRITE(map->vtotal, (mode->crtc_vdisplay - 1) | 451 ((adjusted_mode->crtc_htotal - 1) << 16), i);
371 ((adjusted_mode->crtc_vtotal - 1) << 16)); 452 REG_WRITE_WITH_AUX(map->vtotal, (mode->crtc_vdisplay - 1) |
372 REG_WRITE(map->hblank, 453 ((adjusted_mode->crtc_vtotal - 1) << 16), i);
373 (adjusted_mode->crtc_hblank_start - offsetX - 1) | 454 REG_WRITE_WITH_AUX(map->hblank,
374 ((adjusted_mode->crtc_hblank_end - offsetX - 1) << 16)); 455 (adjusted_mode->crtc_hblank_start - offsetX - 1) |
375 REG_WRITE(map->hsync, 456 ((adjusted_mode->crtc_hblank_end - offsetX - 1) << 16), i);
376 (adjusted_mode->crtc_hsync_start - offsetX - 1) | 457 REG_WRITE_WITH_AUX(map->hsync,
377 ((adjusted_mode->crtc_hsync_end - offsetX - 1) << 16)); 458 (adjusted_mode->crtc_hsync_start - offsetX - 1) |
378 REG_WRITE(map->vblank, 459 ((adjusted_mode->crtc_hsync_end - offsetX - 1) << 16), i);
379 (adjusted_mode->crtc_vblank_start - offsetY - 1) | 460 REG_WRITE_WITH_AUX(map->vblank,
380 ((adjusted_mode->crtc_vblank_end - offsetY - 1) << 16)); 461 (adjusted_mode->crtc_vblank_start - offsetY - 1) |
381 REG_WRITE(map->vsync, 462 ((adjusted_mode->crtc_vblank_end - offsetY - 1) << 16), i);
382 (adjusted_mode->crtc_vsync_start - offsetY - 1) | 463 REG_WRITE_WITH_AUX(map->vsync,
383 ((adjusted_mode->crtc_vsync_end - offsetY - 1) << 16)); 464 (adjusted_mode->crtc_vsync_start - offsetY - 1) |
465 ((adjusted_mode->crtc_vsync_end - offsetY - 1) << 16), i);
466 }
384 } else { 467 } else {
385 REG_WRITE(map->htotal, (adjusted_mode->crtc_hdisplay - 1) | 468 for (i = 0; i <= need_aux; i++) {
386 ((adjusted_mode->crtc_htotal - 1) << 16)); 469 REG_WRITE_WITH_AUX(map->htotal, (adjusted_mode->crtc_hdisplay - 1) |
387 REG_WRITE(map->vtotal, (adjusted_mode->crtc_vdisplay - 1) | 470 ((adjusted_mode->crtc_htotal - 1) << 16), i);
388 ((adjusted_mode->crtc_vtotal - 1) << 16)); 471 REG_WRITE_WITH_AUX(map->vtotal, (adjusted_mode->crtc_vdisplay - 1) |
389 REG_WRITE(map->hblank, (adjusted_mode->crtc_hblank_start - 1) | 472 ((adjusted_mode->crtc_vtotal - 1) << 16), i);
390 ((adjusted_mode->crtc_hblank_end - 1) << 16)); 473 REG_WRITE_WITH_AUX(map->hblank, (adjusted_mode->crtc_hblank_start - 1) |
391 REG_WRITE(map->hsync, (adjusted_mode->crtc_hsync_start - 1) | 474 ((adjusted_mode->crtc_hblank_end - 1) << 16), i);
392 ((adjusted_mode->crtc_hsync_end - 1) << 16)); 475 REG_WRITE_WITH_AUX(map->hsync, (adjusted_mode->crtc_hsync_start - 1) |
393 REG_WRITE(map->vblank, (adjusted_mode->crtc_vblank_start - 1) | 476 ((adjusted_mode->crtc_hsync_end - 1) << 16), i);
394 ((adjusted_mode->crtc_vblank_end - 1) << 16)); 477 REG_WRITE_WITH_AUX(map->vblank, (adjusted_mode->crtc_vblank_start - 1) |
395 REG_WRITE(map->vsync, (adjusted_mode->crtc_vsync_start - 1) | 478 ((adjusted_mode->crtc_vblank_end - 1) << 16), i);
396 ((adjusted_mode->crtc_vsync_end - 1) << 16)); 479 REG_WRITE_WITH_AUX(map->vsync, (adjusted_mode->crtc_vsync_start - 1) |
480 ((adjusted_mode->crtc_vsync_end - 1) << 16), i);
481 }
397 } 482 }
398 483
399 /* Flush the plane changes */ 484 /* Flush the plane changes */
@@ -418,21 +503,30 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
418 if (is_mipi) 503 if (is_mipi)
419 goto oaktrail_crtc_mode_set_exit; 504 goto oaktrail_crtc_mode_set_exit;
420 505
421 refclk = dev_priv->core_freq * 1000;
422 506
423 dpll = 0; /*BIT16 = 0 for 100MHz reference */ 507 dpll = 0; /*BIT16 = 0 for 100MHz reference */
424 508
425 ok = mrstFindBestPLL(crtc, adjusted_mode->clock, refclk, &clock); 509 refclk = is_sdvo ? 96000 : dev_priv->core_freq * 1000;
510 limit = mrst_limit(crtc, refclk);
511 ok = limit->find_pll(limit, crtc, adjusted_mode->clock,
512 refclk, &clock);
426 513
427 if (!ok) { 514 if (is_sdvo) {
428 dev_dbg(dev->dev, "mrstFindBestPLL fail in oaktrail_crtc_mode_set.\n"); 515 /* Convert calculated values to register values */
429 } else { 516 clock.p1 = (1L << (clock.p1 - 1));
430 dev_dbg(dev->dev, "oaktrail_crtc_mode_set pixel clock = %d," 517 clock.m -= 2;
431 "m = %x, p1 = %x.\n", clock.dot, clock.m, 518 clock.n = (1L << (clock.n - 1));
432 clock.p1);
433 } 519 }
434 520
435 fp = oaktrail_m_converts[(clock.m - MRST_M_MIN)] << 8; 521 if (!ok)
522 DRM_ERROR("Failed to find proper PLL settings");
523
524 mrst_print_pll(&clock);
525
526 if (is_sdvo)
527 fp = clock.n << 16 | clock.m;
528 else
529 fp = oaktrail_m_converts[(clock.m - MRST_M_MIN)] << 8;
436 530
437 dpll |= DPLL_VGA_MODE_DIS; 531 dpll |= DPLL_VGA_MODE_DIS;
438 532
@@ -456,38 +550,43 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
456 550
457 551
458 /* compute bitmask from p1 value */ 552 /* compute bitmask from p1 value */
459 dpll |= (1 << (clock.p1 - 2)) << 17; 553 if (is_sdvo)
554 dpll |= clock.p1 << 16; // dpll |= (1 << (clock.p1 - 1)) << 16;
555 else
556 dpll |= (1 << (clock.p1 - 2)) << 17;
460 557
461 dpll |= DPLL_VCO_ENABLE; 558 dpll |= DPLL_VCO_ENABLE;
462 559
463 mrstPrintPll("chosen", &clock);
464
465 if (dpll & DPLL_VCO_ENABLE) { 560 if (dpll & DPLL_VCO_ENABLE) {
466 REG_WRITE(map->fp0, fp); 561 for (i = 0; i <= need_aux; i++) {
467 REG_WRITE(map->dpll, dpll & ~DPLL_VCO_ENABLE); 562 REG_WRITE_WITH_AUX(map->fp0, fp, i);
468 REG_READ(map->dpll); 563 REG_WRITE_WITH_AUX(map->dpll, dpll & ~DPLL_VCO_ENABLE, i);
469 /* Check the DPLLA lock bit PIPEACONF[29] */ 564 REG_READ_WITH_AUX(map->dpll, i);
470 udelay(150); 565 /* Check the DPLLA lock bit PIPEACONF[29] */
566 udelay(150);
567 }
471 } 568 }
472 569
473 REG_WRITE(map->fp0, fp); 570 for (i = 0; i <= need_aux; i++) {
474 REG_WRITE(map->dpll, dpll); 571 REG_WRITE_WITH_AUX(map->fp0, fp, i);
475 REG_READ(map->dpll); 572 REG_WRITE_WITH_AUX(map->dpll, dpll, i);
476 /* Wait for the clocks to stabilize. */ 573 REG_READ_WITH_AUX(map->dpll, i);
477 udelay(150); 574 /* Wait for the clocks to stabilize. */
575 udelay(150);
478 576
479 /* write it again -- the BIOS does, after all */ 577 /* write it again -- the BIOS does, after all */
480 REG_WRITE(map->dpll, dpll); 578 REG_WRITE_WITH_AUX(map->dpll, dpll, i);
481 REG_READ(map->dpll); 579 REG_READ_WITH_AUX(map->dpll, i);
482 /* Wait for the clocks to stabilize. */ 580 /* Wait for the clocks to stabilize. */
483 udelay(150); 581 udelay(150);
484 582
485 REG_WRITE(map->conf, pipeconf); 583 REG_WRITE_WITH_AUX(map->conf, pipeconf, i);
486 REG_READ(map->conf); 584 REG_READ_WITH_AUX(map->conf, i);
487 gma_wait_for_vblank(dev); 585 gma_wait_for_vblank(dev);
488 586
489 REG_WRITE(map->cntr, dspcntr); 587 REG_WRITE_WITH_AUX(map->cntr, dspcntr, i);
490 gma_wait_for_vblank(dev); 588 gma_wait_for_vblank(dev);
589 }
491 590
492oaktrail_crtc_mode_set_exit: 591oaktrail_crtc_mode_set_exit:
493 gma_power_end(dev); 592 gma_power_end(dev);
@@ -565,3 +664,9 @@ const struct drm_crtc_helper_funcs oaktrail_helper_funcs = {
565 .commit = gma_crtc_commit, 664 .commit = gma_crtc_commit,
566}; 665};
567 666
667/* Not used yet */
668const struct gma_clock_funcs mrst_clock_funcs = {
669 .clock = mrst_lvds_clock,
670 .limit = mrst_limit,
671 .pll_is_valid = gma_pll_is_valid,
672};
diff --git a/drivers/gpu/drm/gma500/oaktrail_device.c b/drivers/gpu/drm/gma500/oaktrail_device.c
index 08747fd7105c..27d3875d895b 100644
--- a/drivers/gpu/drm/gma500/oaktrail_device.c
+++ b/drivers/gpu/drm/gma500/oaktrail_device.c
@@ -40,6 +40,9 @@ static int oaktrail_output_init(struct drm_device *dev)
40 dev_err(dev->dev, "DSI is not supported\n"); 40 dev_err(dev->dev, "DSI is not supported\n");
41 if (dev_priv->hdmi_priv) 41 if (dev_priv->hdmi_priv)
42 oaktrail_hdmi_init(dev, &dev_priv->mode_dev); 42 oaktrail_hdmi_init(dev, &dev_priv->mode_dev);
43
44 psb_intel_sdvo_init(dev, SDVOB);
45
43 return 0; 46 return 0;
44} 47}
45 48
@@ -526,6 +529,7 @@ static int oaktrail_chip_setup(struct drm_device *dev)
526 psb_intel_opregion_init(dev); 529 psb_intel_opregion_init(dev);
527 psb_intel_init_bios(dev); 530 psb_intel_init_bios(dev);
528 } 531 }
532 gma_intel_setup_gmbus(dev);
529 oaktrail_hdmi_setup(dev); 533 oaktrail_hdmi_setup(dev);
530 return 0; 534 return 0;
531} 535}
@@ -534,6 +538,7 @@ static void oaktrail_teardown(struct drm_device *dev)
534{ 538{
535 struct drm_psb_private *dev_priv = dev->dev_private; 539 struct drm_psb_private *dev_priv = dev->dev_private;
536 540
541 gma_intel_teardown_gmbus(dev);
537 oaktrail_hdmi_teardown(dev); 542 oaktrail_hdmi_teardown(dev);
538 if (!dev_priv->has_gct) 543 if (!dev_priv->has_gct)
539 psb_intel_destroy_bios(dev); 544 psb_intel_destroy_bios(dev);
@@ -546,6 +551,7 @@ const struct psb_ops oaktrail_chip_ops = {
546 .crtcs = 2, 551 .crtcs = 2,
547 .hdmi_mask = (1 << 1), 552 .hdmi_mask = (1 << 1),
548 .lvds_mask = (1 << 0), 553 .lvds_mask = (1 << 0),
554 .sdvo_mask = (1 << 1),
549 .cursor_needs_phys = 0, 555 .cursor_needs_phys = 0,
550 .sgx_offset = MRST_SGX_OFFSET, 556 .sgx_offset = MRST_SGX_OFFSET,
551 557
diff --git a/drivers/gpu/drm/gma500/oaktrail_lvds.c b/drivers/gpu/drm/gma500/oaktrail_lvds.c
index e77d7214fca4..4c17c93d8d10 100644
--- a/drivers/gpu/drm/gma500/oaktrail_lvds.c
+++ b/drivers/gpu/drm/gma500/oaktrail_lvds.c
@@ -218,30 +218,6 @@ static const struct drm_encoder_helper_funcs oaktrail_lvds_helper_funcs = {
218 .commit = oaktrail_lvds_commit, 218 .commit = oaktrail_lvds_commit,
219}; 219};
220 220
221static struct drm_display_mode lvds_configuration_modes[] = {
222 /* hard coded fixed mode for TPO LTPS LPJ040K001A */
223 { DRM_MODE("800x480", DRM_MODE_TYPE_DRIVER, 33264, 800, 836,
224 846, 1056, 0, 480, 489, 491, 525, 0, 0) },
225 /* hard coded fixed mode for LVDS 800x480 */
226 { DRM_MODE("800x480", DRM_MODE_TYPE_DRIVER, 30994, 800, 801,
227 802, 1024, 0, 480, 481, 482, 525, 0, 0) },
228 /* hard coded fixed mode for Samsung 480wsvga LVDS 1024x600@75 */
229 { DRM_MODE("1024x600", DRM_MODE_TYPE_DRIVER, 53990, 1024, 1072,
230 1104, 1184, 0, 600, 603, 604, 608, 0, 0) },
231 /* hard coded fixed mode for Samsung 480wsvga LVDS 1024x600@75 */
232 { DRM_MODE("1024x600", DRM_MODE_TYPE_DRIVER, 53990, 1024, 1104,
233 1136, 1184, 0, 600, 603, 604, 608, 0, 0) },
234 /* hard coded fixed mode for Sharp wsvga LVDS 1024x600 */
235 { DRM_MODE("1024x600", DRM_MODE_TYPE_DRIVER, 48885, 1024, 1124,
236 1204, 1312, 0, 600, 607, 610, 621, 0, 0) },
237 /* hard coded fixed mode for LVDS 1024x768 */
238 { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
239 1184, 1344, 0, 768, 771, 777, 806, 0, 0) },
240 /* hard coded fixed mode for LVDS 1366x768 */
241 { DRM_MODE("1366x768", DRM_MODE_TYPE_DRIVER, 77500, 1366, 1430,
242 1558, 1664, 0, 768, 769, 770, 776, 0, 0) },
243};
244
245/* Returns the panel fixed mode from configuration. */ 221/* Returns the panel fixed mode from configuration. */
246 222
247static void oaktrail_lvds_get_configuration_mode(struct drm_device *dev, 223static void oaktrail_lvds_get_configuration_mode(struct drm_device *dev,
@@ -303,10 +279,10 @@ static void oaktrail_lvds_get_configuration_mode(struct drm_device *dev,
303 mode_dev->panel_fixed_mode = 279 mode_dev->panel_fixed_mode =
304 drm_mode_duplicate(dev, 280 drm_mode_duplicate(dev,
305 dev_priv->lfp_lvds_vbt_mode); 281 dev_priv->lfp_lvds_vbt_mode);
306 /* Then guess */ 282
283 /* If we still got no mode then bail */
307 if (mode_dev->panel_fixed_mode == NULL) 284 if (mode_dev->panel_fixed_mode == NULL)
308 mode_dev->panel_fixed_mode 285 return;
309 = drm_mode_duplicate(dev, &lvds_configuration_modes[2]);
310 286
311 drm_mode_set_name(mode_dev->panel_fixed_mode); 287 drm_mode_set_name(mode_dev->panel_fixed_mode);
312 drm_mode_set_crtcinfo(mode_dev->panel_fixed_mode, 0); 288 drm_mode_set_crtcinfo(mode_dev->panel_fixed_mode, 0);
diff --git a/drivers/gpu/drm/gma500/psb_device.c b/drivers/gpu/drm/gma500/psb_device.c
index 697678619bd1..23fb33f1471b 100644
--- a/drivers/gpu/drm/gma500/psb_device.c
+++ b/drivers/gpu/drm/gma500/psb_device.c
@@ -373,6 +373,7 @@ const struct psb_ops psb_chip_ops = {
373 .crtcs = 2, 373 .crtcs = 2,
374 .hdmi_mask = (1 << 0), 374 .hdmi_mask = (1 << 0),
375 .lvds_mask = (1 << 1), 375 .lvds_mask = (1 << 1),
376 .sdvo_mask = (1 << 0),
376 .cursor_needs_phys = 1, 377 .cursor_needs_phys = 1,
377 .sgx_offset = PSB_SGX_OFFSET, 378 .sgx_offset = PSB_SGX_OFFSET,
378 .chip_setup = psb_chip_setup, 379 .chip_setup = psb_chip_setup,
diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
index dd607f820a26..1199180667c9 100644
--- a/drivers/gpu/drm/gma500/psb_drv.c
+++ b/drivers/gpu/drm/gma500/psb_drv.c
@@ -251,6 +251,12 @@ static int psb_driver_unload(struct drm_device *dev)
251 iounmap(dev_priv->sgx_reg); 251 iounmap(dev_priv->sgx_reg);
252 dev_priv->sgx_reg = NULL; 252 dev_priv->sgx_reg = NULL;
253 } 253 }
254 if (dev_priv->aux_reg) {
255 iounmap(dev_priv->aux_reg);
256 dev_priv->aux_reg = NULL;
257 }
258 if (dev_priv->aux_pdev)
259 pci_dev_put(dev_priv->aux_pdev);
254 260
255 /* Destroy VBT data */ 261 /* Destroy VBT data */
256 psb_intel_destroy_bios(dev); 262 psb_intel_destroy_bios(dev);
@@ -266,7 +272,7 @@ static int psb_driver_unload(struct drm_device *dev)
266static int psb_driver_load(struct drm_device *dev, unsigned long chipset) 272static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
267{ 273{
268 struct drm_psb_private *dev_priv; 274 struct drm_psb_private *dev_priv;
269 unsigned long resource_start; 275 unsigned long resource_start, resource_len;
270 unsigned long irqflags; 276 unsigned long irqflags;
271 int ret = -ENOMEM; 277 int ret = -ENOMEM;
272 struct drm_connector *connector; 278 struct drm_connector *connector;
@@ -296,6 +302,30 @@ static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
296 if (!dev_priv->sgx_reg) 302 if (!dev_priv->sgx_reg)
297 goto out_err; 303 goto out_err;
298 304
305 if (IS_MRST(dev)) {
306 dev_priv->aux_pdev = pci_get_bus_and_slot(0, PCI_DEVFN(3, 0));
307
308 if (dev_priv->aux_pdev) {
309 resource_start = pci_resource_start(dev_priv->aux_pdev,
310 PSB_AUX_RESOURCE);
311 resource_len = pci_resource_len(dev_priv->aux_pdev,
312 PSB_AUX_RESOURCE);
313 dev_priv->aux_reg = ioremap_nocache(resource_start,
314 resource_len);
315 if (!dev_priv->aux_reg)
316 goto out_err;
317
318 DRM_DEBUG_KMS("Found aux vdc");
319 } else {
320 /* Couldn't find the aux vdc so map to primary vdc */
321 dev_priv->aux_reg = dev_priv->vdc_reg;
322 DRM_DEBUG_KMS("Couldn't find aux pci device");
323 }
324 dev_priv->gmbus_reg = dev_priv->aux_reg;
325 } else {
326 dev_priv->gmbus_reg = dev_priv->vdc_reg;
327 }
328
299 psb_intel_opregion_setup(dev); 329 psb_intel_opregion_setup(dev);
300 330
301 ret = dev_priv->ops->chip_setup(dev); 331 ret = dev_priv->ops->chip_setup(dev);
@@ -449,7 +479,7 @@ static int psb_gamma_ioctl(struct drm_device *dev, void *data,
449 obj = drm_mode_object_find(dev, obj_id, DRM_MODE_OBJECT_CONNECTOR); 479 obj = drm_mode_object_find(dev, obj_id, DRM_MODE_OBJECT_CONNECTOR);
450 if (!obj) { 480 if (!obj) {
451 dev_dbg(dev->dev, "Invalid Connector object.\n"); 481 dev_dbg(dev->dev, "Invalid Connector object.\n");
452 return -EINVAL; 482 return -ENOENT;
453 } 483 }
454 484
455 connector = obj_to_connector(obj); 485 connector = obj_to_connector(obj);
@@ -491,7 +521,7 @@ static int psb_mode_operation_ioctl(struct drm_device *dev, void *data,
491 obj = drm_mode_object_find(dev, obj_id, 521 obj = drm_mode_object_find(dev, obj_id,
492 DRM_MODE_OBJECT_CONNECTOR); 522 DRM_MODE_OBJECT_CONNECTOR);
493 if (!obj) { 523 if (!obj) {
494 ret = -EINVAL; 524 ret = -ENOENT;
495 goto mode_op_out; 525 goto mode_op_out;
496 } 526 }
497 527
diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
index 0bab46bd73d2..b59e6588c343 100644
--- a/drivers/gpu/drm/gma500/psb_drv.h
+++ b/drivers/gpu/drm/gma500/psb_drv.h
@@ -45,7 +45,7 @@ enum {
45}; 45};
46 46
47#define IS_PSB(dev) (((dev)->pdev->device & 0xfffe) == 0x8108) 47#define IS_PSB(dev) (((dev)->pdev->device & 0xfffe) == 0x8108)
48#define IS_MRST(dev) (((dev)->pdev->device & 0xfffc) == 0x4100) 48#define IS_MRST(dev) (((dev)->pdev->device & 0xfff0) == 0x4100)
49#define IS_MFLD(dev) (((dev)->pdev->device & 0xfff8) == 0x0130) 49#define IS_MFLD(dev) (((dev)->pdev->device & 0xfff8) == 0x0130)
50#define IS_CDV(dev) (((dev)->pdev->device & 0xfff0) == 0x0be0) 50#define IS_CDV(dev) (((dev)->pdev->device & 0xfff0) == 0x0be0)
51 51
@@ -75,6 +75,7 @@ enum {
75 * PCI resource identifiers 75 * PCI resource identifiers
76 */ 76 */
77#define PSB_MMIO_RESOURCE 0 77#define PSB_MMIO_RESOURCE 0
78#define PSB_AUX_RESOURCE 0
78#define PSB_GATT_RESOURCE 2 79#define PSB_GATT_RESOURCE 2
79#define PSB_GTT_RESOURCE 3 80#define PSB_GTT_RESOURCE 3
80/* 81/*
@@ -455,6 +456,7 @@ struct psb_ops;
455 456
456struct drm_psb_private { 457struct drm_psb_private {
457 struct drm_device *dev; 458 struct drm_device *dev;
459 struct pci_dev *aux_pdev; /* Currently only used by mrst */
458 const struct psb_ops *ops; 460 const struct psb_ops *ops;
459 const struct psb_offset *regmap; 461 const struct psb_offset *regmap;
460 462
@@ -486,6 +488,7 @@ struct drm_psb_private {
486 488
487 uint8_t __iomem *sgx_reg; 489 uint8_t __iomem *sgx_reg;
488 uint8_t __iomem *vdc_reg; 490 uint8_t __iomem *vdc_reg;
491 uint8_t __iomem *aux_reg; /* Auxillary vdc pipe regs */
489 uint32_t gatt_free_offset; 492 uint32_t gatt_free_offset;
490 493
491 /* 494 /*
@@ -532,6 +535,7 @@ struct drm_psb_private {
532 535
533 /* gmbus */ 536 /* gmbus */
534 struct intel_gmbus *gmbus; 537 struct intel_gmbus *gmbus;
538 uint8_t __iomem *gmbus_reg;
535 539
536 /* Used by SDVO */ 540 /* Used by SDVO */
537 int crt_ddc_pin; 541 int crt_ddc_pin;
@@ -672,6 +676,7 @@ struct psb_ops {
672 int sgx_offset; /* Base offset of SGX device */ 676 int sgx_offset; /* Base offset of SGX device */
673 int hdmi_mask; /* Mask of HDMI CRTCs */ 677 int hdmi_mask; /* Mask of HDMI CRTCs */
674 int lvds_mask; /* Mask of LVDS CRTCs */ 678 int lvds_mask; /* Mask of LVDS CRTCs */
679 int sdvo_mask; /* Mask of SDVO CRTCs */
675 int cursor_needs_phys; /* If cursor base reg need physical address */ 680 int cursor_needs_phys; /* If cursor base reg need physical address */
676 681
677 /* Sub functions */ 682 /* Sub functions */
@@ -927,16 +932,58 @@ static inline uint32_t REGISTER_READ(struct drm_device *dev, uint32_t reg)
927 return ioread32(dev_priv->vdc_reg + reg); 932 return ioread32(dev_priv->vdc_reg + reg);
928} 933}
929 934
935static inline uint32_t REGISTER_READ_AUX(struct drm_device *dev, uint32_t reg)
936{
937 struct drm_psb_private *dev_priv = dev->dev_private;
938 return ioread32(dev_priv->aux_reg + reg);
939}
940
930#define REG_READ(reg) REGISTER_READ(dev, (reg)) 941#define REG_READ(reg) REGISTER_READ(dev, (reg))
942#define REG_READ_AUX(reg) REGISTER_READ_AUX(dev, (reg))
943
944/* Useful for post reads */
945static inline uint32_t REGISTER_READ_WITH_AUX(struct drm_device *dev,
946 uint32_t reg, int aux)
947{
948 uint32_t val;
949
950 if (aux)
951 val = REG_READ_AUX(reg);
952 else
953 val = REG_READ(reg);
954
955 return val;
956}
957
958#define REG_READ_WITH_AUX(reg, aux) REGISTER_READ_WITH_AUX(dev, (reg), (aux))
931 959
932static inline void REGISTER_WRITE(struct drm_device *dev, uint32_t reg, 960static inline void REGISTER_WRITE(struct drm_device *dev, uint32_t reg,
933 uint32_t val) 961 uint32_t val)
934{ 962{
935 struct drm_psb_private *dev_priv = dev->dev_private; 963 struct drm_psb_private *dev_priv = dev->dev_private;
936 iowrite32((val), dev_priv->vdc_reg + (reg)); 964 iowrite32((val), dev_priv->vdc_reg + (reg));
937} 965}
938 966
967static inline void REGISTER_WRITE_AUX(struct drm_device *dev, uint32_t reg,
968 uint32_t val)
969{
970 struct drm_psb_private *dev_priv = dev->dev_private;
971 iowrite32((val), dev_priv->aux_reg + (reg));
972}
973
939#define REG_WRITE(reg, val) REGISTER_WRITE(dev, (reg), (val)) 974#define REG_WRITE(reg, val) REGISTER_WRITE(dev, (reg), (val))
975#define REG_WRITE_AUX(reg, val) REGISTER_WRITE_AUX(dev, (reg), (val))
976
977static inline void REGISTER_WRITE_WITH_AUX(struct drm_device *dev, uint32_t reg,
978 uint32_t val, int aux)
979{
980 if (aux)
981 REG_WRITE_AUX(reg, val);
982 else
983 REG_WRITE(reg, val);
984}
985
986#define REG_WRITE_WITH_AUX(reg, val, aux) REGISTER_WRITE_WITH_AUX(dev, (reg), (val), (aux))
940 987
941static inline void REGISTER_WRITE16(struct drm_device *dev, 988static inline void REGISTER_WRITE16(struct drm_device *dev,
942 uint32_t reg, uint32_t val) 989 uint32_t reg, uint32_t val)
diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c
index 97f8a03fee43..c8841ac6c8f1 100644
--- a/drivers/gpu/drm/gma500/psb_intel_display.c
+++ b/drivers/gpu/drm/gma500/psb_intel_display.c
@@ -572,7 +572,7 @@ int psb_intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
572 572
573 if (!drmmode_obj) { 573 if (!drmmode_obj) {
574 dev_err(dev->dev, "no such CRTC id\n"); 574 dev_err(dev->dev, "no such CRTC id\n");
575 return -EINVAL; 575 return -ENOENT;
576 } 576 }
577 577
578 crtc = to_gma_crtc(obj_to_crtc(drmmode_obj)); 578 crtc = to_gma_crtc(obj_to_crtc(drmmode_obj));
diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
index 6f01cdf5e125..07d3a9e6d79b 100644
--- a/drivers/gpu/drm/gma500/psb_intel_sdvo.c
+++ b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
@@ -228,24 +228,26 @@ static void psb_intel_sdvo_write_sdvox(struct psb_intel_sdvo *psb_intel_sdvo, u3
228{ 228{
229 struct drm_device *dev = psb_intel_sdvo->base.base.dev; 229 struct drm_device *dev = psb_intel_sdvo->base.base.dev;
230 u32 bval = val, cval = val; 230 u32 bval = val, cval = val;
231 int i; 231 int i, j;
232 int need_aux = IS_MRST(dev) ? 1 : 0;
232 233
233 if (psb_intel_sdvo->sdvo_reg == SDVOB) { 234 for (j = 0; j <= need_aux; j++) {
234 cval = REG_READ(SDVOC); 235 if (psb_intel_sdvo->sdvo_reg == SDVOB)
235 } else { 236 cval = REG_READ_WITH_AUX(SDVOC, j);
236 bval = REG_READ(SDVOB); 237 else
237 } 238 bval = REG_READ_WITH_AUX(SDVOB, j);
238 /* 239
239 * Write the registers twice for luck. Sometimes, 240 /*
240 * writing them only once doesn't appear to 'stick'. 241 * Write the registers twice for luck. Sometimes,
241 * The BIOS does this too. Yay, magic 242 * writing them only once doesn't appear to 'stick'.
242 */ 243 * The BIOS does this too. Yay, magic
243 for (i = 0; i < 2; i++) 244 */
244 { 245 for (i = 0; i < 2; i++) {
245 REG_WRITE(SDVOB, bval); 246 REG_WRITE_WITH_AUX(SDVOB, bval, j);
246 REG_READ(SDVOB); 247 REG_READ_WITH_AUX(SDVOB, j);
247 REG_WRITE(SDVOC, cval); 248 REG_WRITE_WITH_AUX(SDVOC, cval, j);
248 REG_READ(SDVOC); 249 REG_READ_WITH_AUX(SDVOC, j);
250 }
249 } 251 }
250} 252}
251 253
@@ -995,6 +997,7 @@ static void psb_intel_sdvo_mode_set(struct drm_encoder *encoder,
995 struct psb_intel_sdvo_dtd input_dtd; 997 struct psb_intel_sdvo_dtd input_dtd;
996 int pixel_multiplier = psb_intel_mode_get_pixel_multiplier(adjusted_mode); 998 int pixel_multiplier = psb_intel_mode_get_pixel_multiplier(adjusted_mode);
997 int rate; 999 int rate;
1000 int need_aux = IS_MRST(dev) ? 1 : 0;
998 1001
999 if (!mode) 1002 if (!mode)
1000 return; 1003 return;
@@ -1060,7 +1063,11 @@ static void psb_intel_sdvo_mode_set(struct drm_encoder *encoder,
1060 return; 1063 return;
1061 1064
1062 /* Set the SDVO control regs. */ 1065 /* Set the SDVO control regs. */
1063 sdvox = REG_READ(psb_intel_sdvo->sdvo_reg); 1066 if (need_aux)
1067 sdvox = REG_READ_AUX(psb_intel_sdvo->sdvo_reg);
1068 else
1069 sdvox = REG_READ(psb_intel_sdvo->sdvo_reg);
1070
1064 switch (psb_intel_sdvo->sdvo_reg) { 1071 switch (psb_intel_sdvo->sdvo_reg) {
1065 case SDVOB: 1072 case SDVOB:
1066 sdvox &= SDVOB_PRESERVE_MASK; 1073 sdvox &= SDVOB_PRESERVE_MASK;
@@ -1090,6 +1097,8 @@ static void psb_intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
1090 struct drm_device *dev = encoder->dev; 1097 struct drm_device *dev = encoder->dev;
1091 struct psb_intel_sdvo *psb_intel_sdvo = to_psb_intel_sdvo(encoder); 1098 struct psb_intel_sdvo *psb_intel_sdvo = to_psb_intel_sdvo(encoder);
1092 u32 temp; 1099 u32 temp;
1100 int i;
1101 int need_aux = IS_MRST(dev) ? 1 : 0;
1093 1102
1094 switch (mode) { 1103 switch (mode) {
1095 case DRM_MODE_DPMS_ON: 1104 case DRM_MODE_DPMS_ON:
@@ -1108,19 +1117,27 @@ static void psb_intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
1108 psb_intel_sdvo_set_encoder_power_state(psb_intel_sdvo, mode); 1117 psb_intel_sdvo_set_encoder_power_state(psb_intel_sdvo, mode);
1109 1118
1110 if (mode == DRM_MODE_DPMS_OFF) { 1119 if (mode == DRM_MODE_DPMS_OFF) {
1111 temp = REG_READ(psb_intel_sdvo->sdvo_reg); 1120 if (need_aux)
1121 temp = REG_READ_AUX(psb_intel_sdvo->sdvo_reg);
1122 else
1123 temp = REG_READ(psb_intel_sdvo->sdvo_reg);
1124
1112 if ((temp & SDVO_ENABLE) != 0) { 1125 if ((temp & SDVO_ENABLE) != 0) {
1113 psb_intel_sdvo_write_sdvox(psb_intel_sdvo, temp & ~SDVO_ENABLE); 1126 psb_intel_sdvo_write_sdvox(psb_intel_sdvo, temp & ~SDVO_ENABLE);
1114 } 1127 }
1115 } 1128 }
1116 } else { 1129 } else {
1117 bool input1, input2; 1130 bool input1, input2;
1118 int i;
1119 u8 status; 1131 u8 status;
1120 1132
1121 temp = REG_READ(psb_intel_sdvo->sdvo_reg); 1133 if (need_aux)
1134 temp = REG_READ_AUX(psb_intel_sdvo->sdvo_reg);
1135 else
1136 temp = REG_READ(psb_intel_sdvo->sdvo_reg);
1137
1122 if ((temp & SDVO_ENABLE) == 0) 1138 if ((temp & SDVO_ENABLE) == 0)
1123 psb_intel_sdvo_write_sdvox(psb_intel_sdvo, temp | SDVO_ENABLE); 1139 psb_intel_sdvo_write_sdvox(psb_intel_sdvo, temp | SDVO_ENABLE);
1140
1124 for (i = 0; i < 2; i++) 1141 for (i = 0; i < 2; i++)
1125 gma_wait_for_vblank(dev); 1142 gma_wait_for_vblank(dev);
1126 1143
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index 60e84043aa34..400b0c4a10fb 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -17,6 +17,7 @@
17 17
18 18
19 19
20#include <linux/hdmi.h>
20#include <linux/module.h> 21#include <linux/module.h>
21 22
22#include <drm/drmP.h> 23#include <drm/drmP.h>
@@ -549,6 +550,8 @@ tda998x_write_avi(struct drm_encoder *encoder, struct drm_display_mode *mode)
549 buf[HB(0)] = 0x82; 550 buf[HB(0)] = 0x82;
550 buf[HB(1)] = 0x02; 551 buf[HB(1)] = 0x02;
551 buf[HB(2)] = 13; 552 buf[HB(2)] = 13;
553 buf[PB(1)] = HDMI_SCAN_MODE_UNDERSCAN;
554 buf[PB(3)] = HDMI_QUANTIZATION_RANGE_FULL << 2;
552 buf[PB(4)] = drm_match_cea_mode(mode); 555 buf[PB(4)] = drm_match_cea_mode(mode);
553 556
554 tda998x_write_if(encoder, DIP_IF_FLAGS_IF2, REG_IF2_HB0, buf, 557 tda998x_write_if(encoder, DIP_IF_FLAGS_IF2, REG_IF2_HB0, buf,
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 506f8efeb519..d1491f8e0f39 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -586,7 +586,53 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
586 if (ret) 586 if (ret)
587 return ret; 587 return ret;
588 588
589 if (IS_VALLEYVIEW(dev)) { 589 if (INTEL_INFO(dev)->gen >= 8) {
590 int i;
591 seq_printf(m, "Master Interrupt Control:\t%08x\n",
592 I915_READ(GEN8_MASTER_IRQ));
593
594 for (i = 0; i < 4; i++) {
595 seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
596 i, I915_READ(GEN8_GT_IMR(i)));
597 seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
598 i, I915_READ(GEN8_GT_IIR(i)));
599 seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
600 i, I915_READ(GEN8_GT_IER(i)));
601 }
602
603 for_each_pipe(i) {
604 seq_printf(m, "Pipe %c IMR:\t%08x\n",
605 pipe_name(i),
606 I915_READ(GEN8_DE_PIPE_IMR(i)));
607 seq_printf(m, "Pipe %c IIR:\t%08x\n",
608 pipe_name(i),
609 I915_READ(GEN8_DE_PIPE_IIR(i)));
610 seq_printf(m, "Pipe %c IER:\t%08x\n",
611 pipe_name(i),
612 I915_READ(GEN8_DE_PIPE_IER(i)));
613 }
614
615 seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
616 I915_READ(GEN8_DE_PORT_IMR));
617 seq_printf(m, "Display Engine port interrupt identity:\t%08x\n",
618 I915_READ(GEN8_DE_PORT_IIR));
619 seq_printf(m, "Display Engine port interrupt enable:\t%08x\n",
620 I915_READ(GEN8_DE_PORT_IER));
621
622 seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n",
623 I915_READ(GEN8_DE_MISC_IMR));
624 seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n",
625 I915_READ(GEN8_DE_MISC_IIR));
626 seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n",
627 I915_READ(GEN8_DE_MISC_IER));
628
629 seq_printf(m, "PCU interrupt mask:\t%08x\n",
630 I915_READ(GEN8_PCU_IMR));
631 seq_printf(m, "PCU interrupt identity:\t%08x\n",
632 I915_READ(GEN8_PCU_IIR));
633 seq_printf(m, "PCU interrupt enable:\t%08x\n",
634 I915_READ(GEN8_PCU_IER));
635 } else if (IS_VALLEYVIEW(dev)) {
590 seq_printf(m, "Display IER:\t%08x\n", 636 seq_printf(m, "Display IER:\t%08x\n",
591 I915_READ(VLV_IER)); 637 I915_READ(VLV_IER));
592 seq_printf(m, "Display IIR:\t%08x\n", 638 seq_printf(m, "Display IIR:\t%08x\n",
@@ -658,7 +704,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
658 seq_printf(m, "Interrupts received: %d\n", 704 seq_printf(m, "Interrupts received: %d\n",
659 atomic_read(&dev_priv->irq_received)); 705 atomic_read(&dev_priv->irq_received));
660 for_each_ring(ring, dev_priv, i) { 706 for_each_ring(ring, dev_priv, i) {
661 if (IS_GEN6(dev) || IS_GEN7(dev)) { 707 if (INTEL_INFO(dev)->gen >= 6) {
662 seq_printf(m, 708 seq_printf(m,
663 "Graphics Interrupt mask (%s): %08x\n", 709 "Graphics Interrupt mask (%s): %08x\n",
664 ring->name, I915_READ_IMR(ring)); 710 ring->name, I915_READ_IMR(ring));
@@ -1576,7 +1622,7 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
1576 I915_READ16(C0DRB3)); 1622 I915_READ16(C0DRB3));
1577 seq_printf(m, "C1DRB3 = 0x%04x\n", 1623 seq_printf(m, "C1DRB3 = 0x%04x\n",
1578 I915_READ16(C1DRB3)); 1624 I915_READ16(C1DRB3));
1579 } else if (IS_GEN6(dev) || IS_GEN7(dev)) { 1625 } else if (INTEL_INFO(dev)->gen >= 6) {
1580 seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n", 1626 seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
1581 I915_READ(MAD_DIMM_C0)); 1627 I915_READ(MAD_DIMM_C0));
1582 seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n", 1628 seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
@@ -1585,8 +1631,12 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
1585 I915_READ(MAD_DIMM_C2)); 1631 I915_READ(MAD_DIMM_C2));
1586 seq_printf(m, "TILECTL = 0x%08x\n", 1632 seq_printf(m, "TILECTL = 0x%08x\n",
1587 I915_READ(TILECTL)); 1633 I915_READ(TILECTL));
1588 seq_printf(m, "ARB_MODE = 0x%08x\n", 1634 if (IS_GEN8(dev))
1589 I915_READ(ARB_MODE)); 1635 seq_printf(m, "GAMTARBMODE = 0x%08x\n",
1636 I915_READ(GAMTARBMODE));
1637 else
1638 seq_printf(m, "ARB_MODE = 0x%08x\n",
1639 I915_READ(ARB_MODE));
1590 seq_printf(m, "DISP_ARB_CTL = 0x%08x\n", 1640 seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
1591 I915_READ(DISP_ARB_CTL)); 1641 I915_READ(DISP_ARB_CTL));
1592 } 1642 }
@@ -1595,18 +1645,37 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
1595 return 0; 1645 return 0;
1596} 1646}
1597 1647
1598static int i915_ppgtt_info(struct seq_file *m, void *data) 1648static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev)
1599{ 1649{
1600 struct drm_info_node *node = (struct drm_info_node *) m->private;
1601 struct drm_device *dev = node->minor->dev;
1602 struct drm_i915_private *dev_priv = dev->dev_private; 1650 struct drm_i915_private *dev_priv = dev->dev_private;
1603 struct intel_ring_buffer *ring; 1651 struct intel_ring_buffer *ring;
1604 int i, ret; 1652 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
1653 int unused, i;
1605 1654
1655 if (!ppgtt)
1656 return;
1657
1658 seq_printf(m, "Page directories: %d\n", ppgtt->num_pd_pages);
1659 seq_printf(m, "Page tables: %d\n", ppgtt->num_pt_pages);
1660 for_each_ring(ring, dev_priv, unused) {
1661 seq_printf(m, "%s\n", ring->name);
1662 for (i = 0; i < 4; i++) {
1663 u32 offset = 0x270 + i * 8;
1664 u64 pdp = I915_READ(ring->mmio_base + offset + 4);
1665 pdp <<= 32;
1666 pdp |= I915_READ(ring->mmio_base + offset);
1667 for (i = 0; i < 4; i++)
1668 seq_printf(m, "\tPDP%d 0x%016llx\n", i, pdp);
1669 }
1670 }
1671}
1672
1673static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev)
1674{
1675 struct drm_i915_private *dev_priv = dev->dev_private;
1676 struct intel_ring_buffer *ring;
1677 int i;
1606 1678
1607 ret = mutex_lock_interruptible(&dev->struct_mutex);
1608 if (ret)
1609 return ret;
1610 if (INTEL_INFO(dev)->gen == 6) 1679 if (INTEL_INFO(dev)->gen == 6)
1611 seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE)); 1680 seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));
1612 1681
@@ -1625,6 +1694,22 @@ static int i915_ppgtt_info(struct seq_file *m, void *data)
1625 seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd_offset); 1694 seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd_offset);
1626 } 1695 }
1627 seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK)); 1696 seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK));
1697}
1698
1699static int i915_ppgtt_info(struct seq_file *m, void *data)
1700{
1701 struct drm_info_node *node = (struct drm_info_node *) m->private;
1702 struct drm_device *dev = node->minor->dev;
1703
1704 int ret = mutex_lock_interruptible(&dev->struct_mutex);
1705 if (ret)
1706 return ret;
1707
1708 if (INTEL_INFO(dev)->gen >= 8)
1709 gen8_ppgtt_info(m, dev);
1710 else if (INTEL_INFO(dev)->gen >= 6)
1711 gen6_ppgtt_info(m, dev);
1712
1628 mutex_unlock(&dev->struct_mutex); 1713 mutex_unlock(&dev->struct_mutex);
1629 1714
1630 return 0; 1715 return 0;
@@ -2955,7 +3040,7 @@ static int i915_debugfs_create(struct dentry *root,
2955 return drm_add_fake_info_node(minor, ent, fops); 3040 return drm_add_fake_info_node(minor, ent, fops);
2956} 3041}
2957 3042
2958static struct drm_info_list i915_debugfs_list[] = { 3043static const struct drm_info_list i915_debugfs_list[] = {
2959 {"i915_capabilities", i915_capabilities, 0}, 3044 {"i915_capabilities", i915_capabilities, 0},
2960 {"i915_gem_objects", i915_gem_object_info, 0}, 3045 {"i915_gem_objects", i915_gem_object_info, 0},
2961 {"i915_gem_gtt", i915_gem_gtt_info, 0}, 3046 {"i915_gem_gtt", i915_gem_gtt_info, 0},
@@ -2997,7 +3082,7 @@ static struct drm_info_list i915_debugfs_list[] = {
2997}; 3082};
2998#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list) 3083#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
2999 3084
3000static struct i915_debugfs_files { 3085static const struct i915_debugfs_files {
3001 const char *name; 3086 const char *name;
3002 const struct file_operations *fops; 3087 const struct file_operations *fops;
3003} i915_debugfs_files[] = { 3088} i915_debugfs_files[] = {
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 00d74f816a72..25acbb5eca6e 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1486,7 +1486,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1486 1486
1487 spin_lock_init(&dev_priv->irq_lock); 1487 spin_lock_init(&dev_priv->irq_lock);
1488 spin_lock_init(&dev_priv->gpu_error.lock); 1488 spin_lock_init(&dev_priv->gpu_error.lock);
1489 spin_lock_init(&dev_priv->backlight.lock); 1489 spin_lock_init(&dev_priv->backlight_lock);
1490 spin_lock_init(&dev_priv->uncore.lock); 1490 spin_lock_init(&dev_priv->uncore.lock);
1491 spin_lock_init(&dev_priv->mm.object_stat_lock); 1491 spin_lock_init(&dev_priv->mm.object_stat_lock);
1492 mutex_init(&dev_priv->dpio_lock); 1492 mutex_init(&dev_priv->dpio_lock);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 92ad319164d7..c2e00ed23195 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -335,6 +335,24 @@ static const struct intel_device_info intel_haswell_m_info = {
335 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, 335 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
336}; 336};
337 337
338static const struct intel_device_info intel_broadwell_d_info = {
339 .is_preliminary = 1,
340 .gen = 8, .num_pipes = 3,
341 .need_gfx_hws = 1, .has_hotplug = 1,
342 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
343 .has_llc = 1,
344 .has_ddi = 1,
345};
346
347static const struct intel_device_info intel_broadwell_m_info = {
348 .is_preliminary = 1,
349 .gen = 8, .is_mobile = 1, .num_pipes = 3,
350 .need_gfx_hws = 1, .has_hotplug = 1,
351 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
352 .has_llc = 1,
353 .has_ddi = 1,
354};
355
338/* 356/*
339 * Make sure any device matches here are from most specific to most 357 * Make sure any device matches here are from most specific to most
340 * general. For example, since the Quanta match is based on the subsystem 358 * general. For example, since the Quanta match is based on the subsystem
@@ -366,7 +384,9 @@ static const struct intel_device_info intel_haswell_m_info = {
366 INTEL_HSW_D_IDS(&intel_haswell_d_info), \ 384 INTEL_HSW_D_IDS(&intel_haswell_d_info), \
367 INTEL_HSW_M_IDS(&intel_haswell_m_info), \ 385 INTEL_HSW_M_IDS(&intel_haswell_m_info), \
368 INTEL_VLV_M_IDS(&intel_valleyview_m_info), \ 386 INTEL_VLV_M_IDS(&intel_valleyview_m_info), \
369 INTEL_VLV_D_IDS(&intel_valleyview_d_info) 387 INTEL_VLV_D_IDS(&intel_valleyview_d_info), \
388 INTEL_BDW_M_IDS(&intel_broadwell_m_info), \
389 INTEL_BDW_D_IDS(&intel_broadwell_d_info)
370 390
371static const struct pci_device_id pciidlist[] = { /* aka */ 391static const struct pci_device_id pciidlist[] = { /* aka */
372 INTEL_PCI_IDS, 392 INTEL_PCI_IDS,
@@ -427,6 +447,12 @@ void intel_detect_pch(struct drm_device *dev)
427 DRM_DEBUG_KMS("Found LynxPoint PCH\n"); 447 DRM_DEBUG_KMS("Found LynxPoint PCH\n");
428 WARN_ON(!IS_HASWELL(dev)); 448 WARN_ON(!IS_HASWELL(dev));
429 WARN_ON(IS_ULT(dev)); 449 WARN_ON(IS_ULT(dev));
450 } else if (IS_BROADWELL(dev)) {
451 dev_priv->pch_type = PCH_LPT;
452 dev_priv->pch_id =
453 INTEL_PCH_LPT_LP_DEVICE_ID_TYPE;
454 DRM_DEBUG_KMS("This is Broadwell, assuming "
455 "LynxPoint LP PCH\n");
430 } else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) { 456 } else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
431 dev_priv->pch_type = PCH_LPT; 457 dev_priv->pch_type = PCH_LPT;
432 DRM_DEBUG_KMS("Found LynxPoint LP PCH\n"); 458 DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
@@ -451,6 +477,12 @@ bool i915_semaphore_is_enabled(struct drm_device *dev)
451 if (INTEL_INFO(dev)->gen < 6) 477 if (INTEL_INFO(dev)->gen < 6)
452 return 0; 478 return 0;
453 479
480 /* Until we get further testing... */
481 if (IS_GEN8(dev)) {
482 WARN_ON(!i915_preliminary_hw_support);
483 return 0;
484 }
485
454 if (i915_semaphores >= 0) 486 if (i915_semaphores >= 0)
455 return i915_semaphores; 487 return i915_semaphores;
456 488
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 6d2a9a1c8379..6b96e91c6f1a 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -54,6 +54,7 @@
54#define DRIVER_DATE "20080730" 54#define DRIVER_DATE "20080730"
55 55
56enum pipe { 56enum pipe {
57 INVALID_PIPE = -1,
57 PIPE_A = 0, 58 PIPE_A = 0,
58 PIPE_B, 59 PIPE_B,
59 PIPE_C, 60 PIPE_C,
@@ -129,6 +130,10 @@ enum intel_display_power_domain {
129#define HSW_ALWAYS_ON_POWER_DOMAINS ( \ 130#define HSW_ALWAYS_ON_POWER_DOMAINS ( \
130 BIT(POWER_DOMAIN_PIPE_A) | \ 131 BIT(POWER_DOMAIN_PIPE_A) | \
131 BIT(POWER_DOMAIN_TRANSCODER_EDP)) 132 BIT(POWER_DOMAIN_TRANSCODER_EDP))
133#define BDW_ALWAYS_ON_POWER_DOMAINS ( \
134 BIT(POWER_DOMAIN_PIPE_A) | \
135 BIT(POWER_DOMAIN_TRANSCODER_EDP) | \
136 BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER))
132 137
133enum hpd_pin { 138enum hpd_pin {
134 HPD_NONE = 0, 139 HPD_NONE = 0,
@@ -254,6 +259,7 @@ struct intel_opregion {
254 struct opregion_asle __iomem *asle; 259 struct opregion_asle __iomem *asle;
255 void __iomem *vbt; 260 void __iomem *vbt;
256 u32 __iomem *lid_state; 261 u32 __iomem *lid_state;
262 struct work_struct asle_work;
257}; 263};
258#define OPREGION_SIZE (8*1024) 264#define OPREGION_SIZE (8*1024)
259 265
@@ -357,6 +363,7 @@ struct drm_i915_error_state {
357 enum intel_ring_hangcheck_action hangcheck_action[I915_NUM_RINGS]; 363 enum intel_ring_hangcheck_action hangcheck_action[I915_NUM_RINGS];
358}; 364};
359 365
366struct intel_connector;
360struct intel_crtc_config; 367struct intel_crtc_config;
361struct intel_crtc; 368struct intel_crtc;
362struct intel_limit; 369struct intel_limit;
@@ -419,6 +426,13 @@ struct drm_i915_display_funcs {
419 /* render clock increase/decrease */ 426 /* render clock increase/decrease */
420 /* display clock increase/decrease */ 427 /* display clock increase/decrease */
421 /* pll clock increase/decrease */ 428 /* pll clock increase/decrease */
429
430 int (*setup_backlight)(struct intel_connector *connector);
431 uint32_t (*get_backlight)(struct intel_connector *connector);
432 void (*set_backlight)(struct intel_connector *connector,
433 uint32_t level);
434 void (*disable_backlight)(struct intel_connector *connector);
435 void (*enable_backlight)(struct intel_connector *connector);
422}; 436};
423 437
424struct intel_uncore_funcs { 438struct intel_uncore_funcs {
@@ -585,10 +599,21 @@ struct i915_gtt {
585struct i915_hw_ppgtt { 599struct i915_hw_ppgtt {
586 struct i915_address_space base; 600 struct i915_address_space base;
587 unsigned num_pd_entries; 601 unsigned num_pd_entries;
588 struct page **pt_pages; 602 union {
589 uint32_t pd_offset; 603 struct page **pt_pages;
590 dma_addr_t *pt_dma_addr; 604 struct page *gen8_pt_pages;
591 605 };
606 struct page *pd_pages;
607 int num_pd_pages;
608 int num_pt_pages;
609 union {
610 uint32_t pd_offset;
611 dma_addr_t pd_dma_addr[4];
612 };
613 union {
614 dma_addr_t *pt_dma_addr;
615 dma_addr_t *gen8_pt_dma_addr[4];
616 };
592 int (*enable)(struct drm_device *dev); 617 int (*enable)(struct drm_device *dev);
593}; 618};
594 619
@@ -703,7 +728,6 @@ enum intel_sbi_destination {
703#define QUIRK_PIPEA_FORCE (1<<0) 728#define QUIRK_PIPEA_FORCE (1<<0)
704#define QUIRK_LVDS_SSC_DISABLE (1<<1) 729#define QUIRK_LVDS_SSC_DISABLE (1<<1)
705#define QUIRK_INVERT_BRIGHTNESS (1<<2) 730#define QUIRK_INVERT_BRIGHTNESS (1<<2)
706#define QUIRK_NO_PCH_PWM_ENABLE (1<<3)
707 731
708struct intel_fbdev; 732struct intel_fbdev;
709struct intel_fbc_work; 733struct intel_fbc_work;
@@ -755,6 +779,7 @@ struct i915_suspend_saved_registers {
755 u32 saveBLC_HIST_CTL; 779 u32 saveBLC_HIST_CTL;
756 u32 saveBLC_PWM_CTL; 780 u32 saveBLC_PWM_CTL;
757 u32 saveBLC_PWM_CTL2; 781 u32 saveBLC_PWM_CTL2;
782 u32 saveBLC_HIST_CTL_B;
758 u32 saveBLC_CPU_PWM_CTL; 783 u32 saveBLC_CPU_PWM_CTL;
759 u32 saveBLC_CPU_PWM_CTL2; 784 u32 saveBLC_CPU_PWM_CTL2;
760 u32 saveFPB0; 785 u32 saveFPB0;
@@ -1325,7 +1350,10 @@ typedef struct drm_i915_private {
1325 struct mutex dpio_lock; 1350 struct mutex dpio_lock;
1326 1351
1327 /** Cached value of IMR to avoid reads in updating the bitfield */ 1352 /** Cached value of IMR to avoid reads in updating the bitfield */
1328 u32 irq_mask; 1353 union {
1354 u32 irq_mask;
1355 u32 de_irq_mask[I915_MAX_PIPES];
1356 };
1329 u32 gt_irq_mask; 1357 u32 gt_irq_mask;
1330 u32 pm_irq_mask; 1358 u32 pm_irq_mask;
1331 1359
@@ -1353,13 +1381,8 @@ typedef struct drm_i915_private {
1353 struct intel_overlay *overlay; 1381 struct intel_overlay *overlay;
1354 unsigned int sprite_scaling_enabled; 1382 unsigned int sprite_scaling_enabled;
1355 1383
1356 /* backlight */ 1384 /* backlight registers and fields in struct intel_panel */
1357 struct { 1385 spinlock_t backlight_lock;
1358 int level;
1359 bool enabled;
1360 spinlock_t lock; /* bl registers and the above bl fields */
1361 struct backlight_device *device;
1362 } backlight;
1363 1386
1364 /* LVDS info */ 1387 /* LVDS info */
1365 bool no_aux_handshake; 1388 bool no_aux_handshake;
@@ -1736,11 +1759,17 @@ struct drm_i915_file_private {
1736 (dev)->pdev->device == 0x010A) 1759 (dev)->pdev->device == 0x010A)
1737#define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview) 1760#define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview)
1738#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell) 1761#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell)
1762#define IS_BROADWELL(dev) (INTEL_INFO(dev)->gen == 8)
1739#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) 1763#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
1740#define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \ 1764#define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \
1741 ((dev)->pdev->device & 0xFF00) == 0x0C00) 1765 ((dev)->pdev->device & 0xFF00) == 0x0C00)
1742#define IS_ULT(dev) (IS_HASWELL(dev) && \ 1766#define IS_BDW_ULT(dev) (IS_BROADWELL(dev) && \
1767 (((dev)->pdev->device & 0xf) == 0x2 || \
1768 ((dev)->pdev->device & 0xf) == 0x6 || \
1769 ((dev)->pdev->device & 0xf) == 0xe))
1770#define IS_HSW_ULT(dev) (IS_HASWELL(dev) && \
1743 ((dev)->pdev->device & 0xFF00) == 0x0A00) 1771 ((dev)->pdev->device & 0xFF00) == 0x0A00)
1772#define IS_ULT(dev) (IS_HSW_ULT(dev) || IS_BDW_ULT(dev))
1744#define IS_HSW_GT3(dev) (IS_HASWELL(dev) && \ 1773#define IS_HSW_GT3(dev) (IS_HASWELL(dev) && \
1745 ((dev)->pdev->device & 0x00F0) == 0x0020) 1774 ((dev)->pdev->device & 0x00F0) == 0x0020)
1746#define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary) 1775#define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary)
@@ -1757,6 +1786,7 @@ struct drm_i915_file_private {
1757#define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5) 1786#define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5)
1758#define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6) 1787#define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6)
1759#define IS_GEN7(dev) (INTEL_INFO(dev)->gen == 7) 1788#define IS_GEN7(dev) (INTEL_INFO(dev)->gen == 7)
1789#define IS_GEN8(dev) (INTEL_INFO(dev)->gen == 8)
1760 1790
1761#define RENDER_RING (1<<RCS) 1791#define RENDER_RING (1<<RCS)
1762#define BSD_RING (1<<VCS) 1792#define BSD_RING (1<<VCS)
@@ -1793,12 +1823,12 @@ struct drm_i915_file_private {
1793#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr) 1823#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
1794#define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc) 1824#define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
1795 1825
1796#define HAS_IPS(dev) (IS_ULT(dev)) 1826#define HAS_IPS(dev) (IS_ULT(dev) || IS_BROADWELL(dev))
1797 1827
1798#define HAS_DDI(dev) (INTEL_INFO(dev)->has_ddi) 1828#define HAS_DDI(dev) (INTEL_INFO(dev)->has_ddi)
1799#define HAS_POWER_WELL(dev) (IS_HASWELL(dev)) 1829#define HAS_POWER_WELL(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev))
1800#define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg) 1830#define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg)
1801#define HAS_PSR(dev) (IS_HASWELL(dev)) 1831#define HAS_PSR(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev))
1802 1832
1803#define INTEL_PCH_DEVICE_ID_MASK 0xff00 1833#define INTEL_PCH_DEVICE_ID_MASK 0xff00
1804#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00 1834#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index bc528201caca..40d9dcf858bf 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2954,6 +2954,7 @@ static void i915_gem_write_fence(struct drm_device *dev, int reg,
2954 obj->stride, obj->tiling_mode); 2954 obj->stride, obj->tiling_mode);
2955 2955
2956 switch (INTEL_INFO(dev)->gen) { 2956 switch (INTEL_INFO(dev)->gen) {
2957 case 8:
2957 case 7: 2958 case 7:
2958 case 6: 2959 case 6:
2959 case 5: 2960 case 5:
@@ -4361,6 +4362,8 @@ void i915_gem_init_swizzling(struct drm_device *dev)
4361 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB)); 4362 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
4362 else if (IS_GEN7(dev)) 4363 else if (IS_GEN7(dev))
4363 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB)); 4364 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
4365 else if (IS_GEN8(dev))
4366 I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
4364 else 4367 else
4365 BUG(); 4368 BUG();
4366} 4369}
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 4625670bcadb..2ec122a63406 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -117,6 +117,9 @@ static int get_context_size(struct drm_device *dev)
117 else 117 else
118 ret = GEN7_CXT_TOTAL_SIZE(reg) * 64; 118 ret = GEN7_CXT_TOTAL_SIZE(reg) * 64;
119 break; 119 break;
120 case 8:
121 ret = GEN8_CXT_TOTAL_SIZE;
122 break;
120 default: 123 default:
121 BUG(); 124 BUG();
122 } 125 }
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 0ce0d47e4b0f..885d595e0e02 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -212,6 +212,7 @@ static int
212relocate_entry_cpu(struct drm_i915_gem_object *obj, 212relocate_entry_cpu(struct drm_i915_gem_object *obj,
213 struct drm_i915_gem_relocation_entry *reloc) 213 struct drm_i915_gem_relocation_entry *reloc)
214{ 214{
215 struct drm_device *dev = obj->base.dev;
215 uint32_t page_offset = offset_in_page(reloc->offset); 216 uint32_t page_offset = offset_in_page(reloc->offset);
216 char *vaddr; 217 char *vaddr;
217 int ret = -EINVAL; 218 int ret = -EINVAL;
@@ -223,6 +224,19 @@ relocate_entry_cpu(struct drm_i915_gem_object *obj,
223 vaddr = kmap_atomic(i915_gem_object_get_page(obj, 224 vaddr = kmap_atomic(i915_gem_object_get_page(obj,
224 reloc->offset >> PAGE_SHIFT)); 225 reloc->offset >> PAGE_SHIFT));
225 *(uint32_t *)(vaddr + page_offset) = reloc->delta; 226 *(uint32_t *)(vaddr + page_offset) = reloc->delta;
227
228 if (INTEL_INFO(dev)->gen >= 8) {
229 page_offset = offset_in_page(page_offset + sizeof(uint32_t));
230
231 if (page_offset == 0) {
232 kunmap_atomic(vaddr);
233 vaddr = kmap_atomic(i915_gem_object_get_page(obj,
234 (reloc->offset + sizeof(uint32_t)) >> PAGE_SHIFT));
235 }
236
237 *(uint32_t *)(vaddr + page_offset) = 0;
238 }
239
226 kunmap_atomic(vaddr); 240 kunmap_atomic(vaddr);
227 241
228 return 0; 242 return 0;
@@ -253,6 +267,21 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj,
253 reloc_entry = (uint32_t __iomem *) 267 reloc_entry = (uint32_t __iomem *)
254 (reloc_page + offset_in_page(reloc->offset)); 268 (reloc_page + offset_in_page(reloc->offset));
255 iowrite32(reloc->delta, reloc_entry); 269 iowrite32(reloc->delta, reloc_entry);
270
271 if (INTEL_INFO(dev)->gen >= 8) {
272 reloc_entry += 1;
273
274 if (offset_in_page(reloc->offset + sizeof(uint32_t)) == 0) {
275 io_mapping_unmap_atomic(reloc_page);
276 reloc_page = io_mapping_map_atomic_wc(
277 dev_priv->gtt.mappable,
278 reloc->offset + sizeof(uint32_t));
279 reloc_entry = reloc_page;
280 }
281
282 iowrite32(0, reloc_entry);
283 }
284
256 io_mapping_unmap_atomic(reloc_page); 285 io_mapping_unmap_atomic(reloc_page);
257 286
258 return 0; 287 return 0;
@@ -323,7 +352,8 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
323 return 0; 352 return 0;
324 353
325 /* Check that the relocation address is valid... */ 354 /* Check that the relocation address is valid... */
326 if (unlikely(reloc->offset > obj->base.size - 4)) { 355 if (unlikely(reloc->offset >
356 obj->base.size - (INTEL_INFO(dev)->gen >= 8 ? 8 : 4))) {
327 DRM_DEBUG("Relocation beyond object bounds: " 357 DRM_DEBUG("Relocation beyond object bounds: "
328 "obj %p target %d offset %d size %d.\n", 358 "obj %p target %d offset %d size %d.\n",
329 obj, reloc->target_handle, 359 obj, reloc->target_handle,
@@ -1116,8 +1146,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1116 1146
1117 /* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure 1147 /* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
1118 * batch" bit. Hence we need to pin secure batches into the global gtt. 1148 * batch" bit. Hence we need to pin secure batches into the global gtt.
1119 * hsw should have this fixed, but let's be paranoid and do it 1149 * hsw should have this fixed, but bdw mucks it up again. */
1120 * unconditionally for now. */
1121 if (flags & I915_DISPATCH_SECURE && !batch_obj->has_global_gtt_mapping) 1150 if (flags & I915_DISPATCH_SECURE && !batch_obj->has_global_gtt_mapping)
1122 i915_gem_gtt_bind_object(batch_obj, batch_obj->cache_level); 1151 i915_gem_gtt_bind_object(batch_obj, batch_obj->cache_level);
1123 1152
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 2b4c530d74a3..efb5dab61c81 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -30,6 +30,8 @@
30 30
31#define GEN6_PPGTT_PD_ENTRIES 512 31#define GEN6_PPGTT_PD_ENTRIES 512
32#define I915_PPGTT_PT_ENTRIES (PAGE_SIZE / sizeof(gen6_gtt_pte_t)) 32#define I915_PPGTT_PT_ENTRIES (PAGE_SIZE / sizeof(gen6_gtt_pte_t))
33typedef uint64_t gen8_gtt_pte_t;
34typedef gen8_gtt_pte_t gen8_ppgtt_pde_t;
33 35
34/* PPGTT stuff */ 36/* PPGTT stuff */
35#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0)) 37#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
@@ -57,6 +59,41 @@
57#define HSW_WB_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0xb) 59#define HSW_WB_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0xb)
58#define HSW_WT_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x6) 60#define HSW_WT_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x6)
59 61
62#define GEN8_PTES_PER_PAGE (PAGE_SIZE / sizeof(gen8_gtt_pte_t))
63#define GEN8_PDES_PER_PAGE (PAGE_SIZE / sizeof(gen8_ppgtt_pde_t))
64#define GEN8_LEGACY_PDPS 4
65
66#define PPAT_UNCACHED_INDEX (_PAGE_PWT | _PAGE_PCD)
67#define PPAT_CACHED_PDE_INDEX 0 /* WB LLC */
68#define PPAT_CACHED_INDEX _PAGE_PAT /* WB LLCeLLC */
69#define PPAT_DISPLAY_ELLC_INDEX _PAGE_PCD /* WT eLLC */
70
71static inline gen8_gtt_pte_t gen8_pte_encode(dma_addr_t addr,
72 enum i915_cache_level level,
73 bool valid)
74{
75 gen8_gtt_pte_t pte = valid ? _PAGE_PRESENT | _PAGE_RW : 0;
76 pte |= addr;
77 if (level != I915_CACHE_NONE)
78 pte |= PPAT_CACHED_INDEX;
79 else
80 pte |= PPAT_UNCACHED_INDEX;
81 return pte;
82}
83
84static inline gen8_ppgtt_pde_t gen8_pde_encode(struct drm_device *dev,
85 dma_addr_t addr,
86 enum i915_cache_level level)
87{
88 gen8_ppgtt_pde_t pde = _PAGE_PRESENT | _PAGE_RW;
89 pde |= addr;
90 if (level != I915_CACHE_NONE)
91 pde |= PPAT_CACHED_PDE_INDEX;
92 else
93 pde |= PPAT_UNCACHED_INDEX;
94 return pde;
95}
96
60static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr, 97static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr,
61 enum i915_cache_level level, 98 enum i915_cache_level level,
62 bool valid) 99 bool valid)
@@ -158,6 +195,257 @@ static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr,
158 return pte; 195 return pte;
159} 196}
160 197
198/* Broadwell Page Directory Pointer Descriptors */
199static int gen8_write_pdp(struct intel_ring_buffer *ring, unsigned entry,
200 uint64_t val)
201{
202 int ret;
203
204 BUG_ON(entry >= 4);
205
206 ret = intel_ring_begin(ring, 6);
207 if (ret)
208 return ret;
209
210 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
211 intel_ring_emit(ring, GEN8_RING_PDP_UDW(ring, entry));
212 intel_ring_emit(ring, (u32)(val >> 32));
213 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
214 intel_ring_emit(ring, GEN8_RING_PDP_LDW(ring, entry));
215 intel_ring_emit(ring, (u32)(val));
216 intel_ring_advance(ring);
217
218 return 0;
219}
220
221static int gen8_ppgtt_enable(struct drm_device *dev)
222{
223 struct drm_i915_private *dev_priv = dev->dev_private;
224 struct intel_ring_buffer *ring;
225 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
226 int i, j, ret;
227
228 /* bit of a hack to find the actual last used pd */
229 int used_pd = ppgtt->num_pd_entries / GEN8_PDES_PER_PAGE;
230
231 for_each_ring(ring, dev_priv, j) {
232 I915_WRITE(RING_MODE_GEN7(ring),
233 _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
234 }
235
236 for (i = used_pd - 1; i >= 0; i--) {
237 dma_addr_t addr = ppgtt->pd_dma_addr[i];
238 for_each_ring(ring, dev_priv, j) {
239 ret = gen8_write_pdp(ring, i, addr);
240 if (ret)
241 return ret;
242 }
243 }
244 return 0;
245}
246
247static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
248 unsigned first_entry,
249 unsigned num_entries,
250 bool use_scratch)
251{
252 struct i915_hw_ppgtt *ppgtt =
253 container_of(vm, struct i915_hw_ppgtt, base);
254 gen8_gtt_pte_t *pt_vaddr, scratch_pte;
255 unsigned act_pt = first_entry / GEN8_PTES_PER_PAGE;
256 unsigned first_pte = first_entry % GEN8_PTES_PER_PAGE;
257 unsigned last_pte, i;
258
259 scratch_pte = gen8_pte_encode(ppgtt->base.scratch.addr,
260 I915_CACHE_LLC, use_scratch);
261
262 while (num_entries) {
263 struct page *page_table = &ppgtt->gen8_pt_pages[act_pt];
264
265 last_pte = first_pte + num_entries;
266 if (last_pte > GEN8_PTES_PER_PAGE)
267 last_pte = GEN8_PTES_PER_PAGE;
268
269 pt_vaddr = kmap_atomic(page_table);
270
271 for (i = first_pte; i < last_pte; i++)
272 pt_vaddr[i] = scratch_pte;
273
274 kunmap_atomic(pt_vaddr);
275
276 num_entries -= last_pte - first_pte;
277 first_pte = 0;
278 act_pt++;
279 }
280}
281
282static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
283 struct sg_table *pages,
284 unsigned first_entry,
285 enum i915_cache_level cache_level)
286{
287 struct i915_hw_ppgtt *ppgtt =
288 container_of(vm, struct i915_hw_ppgtt, base);
289 gen8_gtt_pte_t *pt_vaddr;
290 unsigned act_pt = first_entry / GEN8_PTES_PER_PAGE;
291 unsigned act_pte = first_entry % GEN8_PTES_PER_PAGE;
292 struct sg_page_iter sg_iter;
293
294 pt_vaddr = kmap_atomic(&ppgtt->gen8_pt_pages[act_pt]);
295 for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
296 dma_addr_t page_addr;
297
298 page_addr = sg_dma_address(sg_iter.sg) +
299 (sg_iter.sg_pgoffset << PAGE_SHIFT);
300 pt_vaddr[act_pte] = gen8_pte_encode(page_addr, cache_level,
301 true);
302 if (++act_pte == GEN8_PTES_PER_PAGE) {
303 kunmap_atomic(pt_vaddr);
304 act_pt++;
305 pt_vaddr = kmap_atomic(&ppgtt->gen8_pt_pages[act_pt]);
306 act_pte = 0;
307
308 }
309 }
310 kunmap_atomic(pt_vaddr);
311}
312
313static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
314{
315 struct i915_hw_ppgtt *ppgtt =
316 container_of(vm, struct i915_hw_ppgtt, base);
317 int i, j;
318
319 for (i = 0; i < ppgtt->num_pd_pages ; i++) {
320 if (ppgtt->pd_dma_addr[i]) {
321 pci_unmap_page(ppgtt->base.dev->pdev,
322 ppgtt->pd_dma_addr[i],
323 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
324
325 for (j = 0; j < GEN8_PDES_PER_PAGE; j++) {
326 dma_addr_t addr = ppgtt->gen8_pt_dma_addr[i][j];
327 if (addr)
328 pci_unmap_page(ppgtt->base.dev->pdev,
329 addr,
330 PAGE_SIZE,
331 PCI_DMA_BIDIRECTIONAL);
332
333 }
334 }
335 kfree(ppgtt->gen8_pt_dma_addr[i]);
336 }
337
338 __free_pages(ppgtt->gen8_pt_pages, get_order(ppgtt->num_pt_pages << PAGE_SHIFT));
339 __free_pages(ppgtt->pd_pages, get_order(ppgtt->num_pd_pages << PAGE_SHIFT));
340}
341
342/**
343 * GEN8 legacy ppgtt programming is accomplished through 4 PDP registers with a
344 * net effect resembling a 2-level page table in normal x86 terms. Each PDP
345 * represents 1GB of memory
346 * 4 * 512 * 512 * 4096 = 4GB legacy 32b address space.
347 *
348 * TODO: Do something with the size parameter
349 **/
350static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size)
351{
352 struct page *pt_pages;
353 int i, j, ret = -ENOMEM;
354 const int max_pdp = DIV_ROUND_UP(size, 1 << 30);
355 const int num_pt_pages = GEN8_PDES_PER_PAGE * max_pdp;
356
357 if (size % (1<<30))
358 DRM_INFO("Pages will be wasted unless GTT size (%llu) is divisible by 1GB\n", size);
359
360 /* FIXME: split allocation into smaller pieces. For now we only ever do
361 * this once, but with full PPGTT, the multiple contiguous allocations
362 * will be bad.
363 */
364 ppgtt->pd_pages = alloc_pages(GFP_KERNEL, get_order(max_pdp << PAGE_SHIFT));
365 if (!ppgtt->pd_pages)
366 return -ENOMEM;
367
368 pt_pages = alloc_pages(GFP_KERNEL, get_order(num_pt_pages << PAGE_SHIFT));
369 if (!pt_pages) {
370 __free_pages(ppgtt->pd_pages, get_order(max_pdp << PAGE_SHIFT));
371 return -ENOMEM;
372 }
373
374 ppgtt->gen8_pt_pages = pt_pages;
375 ppgtt->num_pd_pages = 1 << get_order(max_pdp << PAGE_SHIFT);
376 ppgtt->num_pt_pages = 1 << get_order(num_pt_pages << PAGE_SHIFT);
377 ppgtt->num_pd_entries = max_pdp * GEN8_PDES_PER_PAGE;
378 ppgtt->enable = gen8_ppgtt_enable;
379 ppgtt->base.clear_range = gen8_ppgtt_clear_range;
380 ppgtt->base.insert_entries = gen8_ppgtt_insert_entries;
381 ppgtt->base.cleanup = gen8_ppgtt_cleanup;
382
383 BUG_ON(ppgtt->num_pd_pages > GEN8_LEGACY_PDPS);
384
385 /*
386 * - Create a mapping for the page directories.
387 * - For each page directory:
388 * allocate space for page table mappings.
389 * map each page table
390 */
391 for (i = 0; i < max_pdp; i++) {
392 dma_addr_t temp;
393 temp = pci_map_page(ppgtt->base.dev->pdev,
394 &ppgtt->pd_pages[i], 0,
395 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
396 if (pci_dma_mapping_error(ppgtt->base.dev->pdev, temp))
397 goto err_out;
398
399 ppgtt->pd_dma_addr[i] = temp;
400
401 ppgtt->gen8_pt_dma_addr[i] = kmalloc(sizeof(dma_addr_t) * GEN8_PDES_PER_PAGE, GFP_KERNEL);
402 if (!ppgtt->gen8_pt_dma_addr[i])
403 goto err_out;
404
405 for (j = 0; j < GEN8_PDES_PER_PAGE; j++) {
406 struct page *p = &pt_pages[i * GEN8_PDES_PER_PAGE + j];
407 temp = pci_map_page(ppgtt->base.dev->pdev,
408 p, 0, PAGE_SIZE,
409 PCI_DMA_BIDIRECTIONAL);
410
411 if (pci_dma_mapping_error(ppgtt->base.dev->pdev, temp))
412 goto err_out;
413
414 ppgtt->gen8_pt_dma_addr[i][j] = temp;
415 }
416 }
417
418 /* For now, the PPGTT helper functions all require that the PDEs are
419 * plugged in correctly. So we do that now/here. For aliasing PPGTT, we
420 * will never need to touch the PDEs again */
421 for (i = 0; i < max_pdp; i++) {
422 gen8_ppgtt_pde_t *pd_vaddr;
423 pd_vaddr = kmap_atomic(&ppgtt->pd_pages[i]);
424 for (j = 0; j < GEN8_PDES_PER_PAGE; j++) {
425 dma_addr_t addr = ppgtt->gen8_pt_dma_addr[i][j];
426 pd_vaddr[j] = gen8_pde_encode(ppgtt->base.dev, addr,
427 I915_CACHE_LLC);
428 }
429 kunmap_atomic(pd_vaddr);
430 }
431
432 ppgtt->base.clear_range(&ppgtt->base, 0,
433 ppgtt->num_pd_entries * GEN8_PTES_PER_PAGE,
434 true);
435
436 DRM_DEBUG_DRIVER("Allocated %d pages for page directories (%d wasted)\n",
437 ppgtt->num_pd_pages, ppgtt->num_pd_pages - max_pdp);
438 DRM_DEBUG_DRIVER("Allocated %d pages for page tables (%lld wasted)\n",
439 ppgtt->num_pt_pages,
440 (ppgtt->num_pt_pages - num_pt_pages) +
441 size % (1<<30));
442 return 0;
443
444err_out:
445 ppgtt->base.cleanup(&ppgtt->base);
446 return ret;
447}
448
161static void gen6_write_pdes(struct i915_hw_ppgtt *ppgtt) 449static void gen6_write_pdes(struct i915_hw_ppgtt *ppgtt)
162{ 450{
163 struct drm_i915_private *dev_priv = ppgtt->base.dev->dev_private; 451 struct drm_i915_private *dev_priv = ppgtt->base.dev->dev_private;
@@ -410,6 +698,8 @@ static int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
410 698
411 if (INTEL_INFO(dev)->gen < 8) 699 if (INTEL_INFO(dev)->gen < 8)
412 ret = gen6_ppgtt_init(ppgtt); 700 ret = gen6_ppgtt_init(ppgtt);
701 else if (IS_GEN8(dev))
702 ret = gen8_ppgtt_init(ppgtt, dev_priv->gtt.base.total);
413 else 703 else
414 BUG(); 704 BUG();
415 705
@@ -573,6 +863,57 @@ int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
573 return 0; 863 return 0;
574} 864}
575 865
866static inline void gen8_set_pte(void __iomem *addr, gen8_gtt_pte_t pte)
867{
868#ifdef writeq
869 writeq(pte, addr);
870#else
871 iowrite32((u32)pte, addr);
872 iowrite32(pte >> 32, addr + 4);
873#endif
874}
875
876static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
877 struct sg_table *st,
878 unsigned int first_entry,
879 enum i915_cache_level level)
880{
881 struct drm_i915_private *dev_priv = vm->dev->dev_private;
882 gen8_gtt_pte_t __iomem *gtt_entries =
883 (gen8_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
884 int i = 0;
885 struct sg_page_iter sg_iter;
886 dma_addr_t addr;
887
888 for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
889 addr = sg_dma_address(sg_iter.sg) +
890 (sg_iter.sg_pgoffset << PAGE_SHIFT);
891 gen8_set_pte(&gtt_entries[i],
892 gen8_pte_encode(addr, level, true));
893 i++;
894 }
895
896 /*
897 * XXX: This serves as a posting read to make sure that the PTE has
898 * actually been updated. There is some concern that even though
899 * registers and PTEs are within the same BAR that they are potentially
900 * of NUMA access patterns. Therefore, even with the way we assume
901 * hardware should work, we must keep this posting read for paranoia.
902 */
903 if (i != 0)
904 WARN_ON(readq(&gtt_entries[i-1])
905 != gen8_pte_encode(addr, level, true));
906
907#if 0 /* TODO: Still needed on GEN8? */
908 /* This next bit makes the above posting read even more important. We
909 * want to flush the TLBs only after we're certain all the PTE updates
910 * have finished.
911 */
912 I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
913 POSTING_READ(GFX_FLSH_CNTL_GEN6);
914#endif
915}
916
576/* 917/*
577 * Binds an object into the global gtt with the specified cache level. The object 918 * Binds an object into the global gtt with the specified cache level. The object
578 * will be accessible to the GPU via commands whose operands reference offsets 919 * will be accessible to the GPU via commands whose operands reference offsets
@@ -615,6 +956,30 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
615 POSTING_READ(GFX_FLSH_CNTL_GEN6); 956 POSTING_READ(GFX_FLSH_CNTL_GEN6);
616} 957}
617 958
959static void gen8_ggtt_clear_range(struct i915_address_space *vm,
960 unsigned int first_entry,
961 unsigned int num_entries,
962 bool use_scratch)
963{
964 struct drm_i915_private *dev_priv = vm->dev->dev_private;
965 gen8_gtt_pte_t scratch_pte, __iomem *gtt_base =
966 (gen8_gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
967 const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry;
968 int i;
969
970 if (WARN(num_entries > max_entries,
971 "First entry = %d; Num entries = %d (max=%d)\n",
972 first_entry, num_entries, max_entries))
973 num_entries = max_entries;
974
975 scratch_pte = gen8_pte_encode(vm->scratch.addr,
976 I915_CACHE_LLC,
977 use_scratch);
978 for (i = 0; i < num_entries; i++)
979 gen8_set_pte(&gtt_base[i], scratch_pte);
980 readl(gtt_base);
981}
982
618static void gen6_ggtt_clear_range(struct i915_address_space *vm, 983static void gen6_ggtt_clear_range(struct i915_address_space *vm,
619 unsigned int first_entry, 984 unsigned int first_entry,
620 unsigned int num_entries, 985 unsigned int num_entries,
@@ -638,7 +1003,6 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm,
638 readl(gtt_base); 1003 readl(gtt_base);
639} 1004}
640 1005
641
642static void i915_ggtt_insert_entries(struct i915_address_space *vm, 1006static void i915_ggtt_insert_entries(struct i915_address_space *vm,
643 struct sg_table *st, 1007 struct sg_table *st,
644 unsigned int pg_start, 1008 unsigned int pg_start,
@@ -720,6 +1084,7 @@ static void i915_gtt_color_adjust(struct drm_mm_node *node,
720 *end -= 4096; 1084 *end -= 4096;
721 } 1085 }
722} 1086}
1087
723void i915_gem_setup_global_gtt(struct drm_device *dev, 1088void i915_gem_setup_global_gtt(struct drm_device *dev,
724 unsigned long start, 1089 unsigned long start,
725 unsigned long mappable_end, 1090 unsigned long mappable_end,
@@ -816,7 +1181,8 @@ void i915_gem_init_global_gtt(struct drm_device *dev)
816 1181
817 DRM_ERROR("Aliased PPGTT setup failed %d\n", ret); 1182 DRM_ERROR("Aliased PPGTT setup failed %d\n", ret);
818 drm_mm_takedown(&dev_priv->gtt.base.mm); 1183 drm_mm_takedown(&dev_priv->gtt.base.mm);
819 gtt_size += GEN6_PPGTT_PD_ENTRIES * PAGE_SIZE; 1184 if (INTEL_INFO(dev)->gen < 8)
1185 gtt_size += GEN6_PPGTT_PD_ENTRIES*PAGE_SIZE;
820 } 1186 }
821 i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size); 1187 i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
822} 1188}
@@ -866,6 +1232,20 @@ static inline unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
866 return snb_gmch_ctl << 20; 1232 return snb_gmch_ctl << 20;
867} 1233}
868 1234
1235static inline unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl)
1236{
1237 bdw_gmch_ctl >>= BDW_GMCH_GGMS_SHIFT;
1238 bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK;
1239 if (bdw_gmch_ctl)
1240 bdw_gmch_ctl = 1 << bdw_gmch_ctl;
1241 if (bdw_gmch_ctl > 4) {
1242 WARN_ON(!i915_preliminary_hw_support);
1243 return 4<<20;
1244 }
1245
1246 return bdw_gmch_ctl << 20;
1247}
1248
869static inline size_t gen6_get_stolen_size(u16 snb_gmch_ctl) 1249static inline size_t gen6_get_stolen_size(u16 snb_gmch_ctl)
870{ 1250{
871 snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT; 1251 snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT;
@@ -873,6 +1253,108 @@ static inline size_t gen6_get_stolen_size(u16 snb_gmch_ctl)
873 return snb_gmch_ctl << 25; /* 32 MB units */ 1253 return snb_gmch_ctl << 25; /* 32 MB units */
874} 1254}
875 1255
1256static inline size_t gen8_get_stolen_size(u16 bdw_gmch_ctl)
1257{
1258 bdw_gmch_ctl >>= BDW_GMCH_GMS_SHIFT;
1259 bdw_gmch_ctl &= BDW_GMCH_GMS_MASK;
1260 return bdw_gmch_ctl << 25; /* 32 MB units */
1261}
1262
1263static int ggtt_probe_common(struct drm_device *dev,
1264 size_t gtt_size)
1265{
1266 struct drm_i915_private *dev_priv = dev->dev_private;
1267 phys_addr_t gtt_bus_addr;
1268 int ret;
1269
1270 /* For Modern GENs the PTEs and register space are split in the BAR */
1271 gtt_bus_addr = pci_resource_start(dev->pdev, 0) +
1272 (pci_resource_len(dev->pdev, 0) / 2);
1273
1274 dev_priv->gtt.gsm = ioremap_wc(gtt_bus_addr, gtt_size);
1275 if (!dev_priv->gtt.gsm) {
1276 DRM_ERROR("Failed to map the gtt page table\n");
1277 return -ENOMEM;
1278 }
1279
1280 ret = setup_scratch_page(dev);
1281 if (ret) {
1282 DRM_ERROR("Scratch setup failed\n");
1283 /* iounmap will also get called at remove, but meh */
1284 iounmap(dev_priv->gtt.gsm);
1285 }
1286
1287 return ret;
1288}
1289
1290/* The GGTT and PPGTT need a private PPAT setup in order to handle cacheability
1291 * bits. When using advanced contexts each context stores its own PAT, but
1292 * writing this data shouldn't be harmful even in those cases. */
1293static void gen8_setup_private_ppat(struct drm_i915_private *dev_priv)
1294{
1295#define GEN8_PPAT_UC (0<<0)
1296#define GEN8_PPAT_WC (1<<0)
1297#define GEN8_PPAT_WT (2<<0)
1298#define GEN8_PPAT_WB (3<<0)
1299#define GEN8_PPAT_ELLC_OVERRIDE (0<<2)
1300/* FIXME(BDW): Bspec is completely confused about cache control bits. */
1301#define GEN8_PPAT_LLC (1<<2)
1302#define GEN8_PPAT_LLCELLC (2<<2)
1303#define GEN8_PPAT_LLCeLLC (3<<2)
1304#define GEN8_PPAT_AGE(x) (x<<4)
1305#define GEN8_PPAT(i, x) ((uint64_t) (x) << ((i) * 8))
1306 uint64_t pat;
1307
1308 pat = GEN8_PPAT(0, GEN8_PPAT_WB | GEN8_PPAT_LLC) | /* for normal objects, no eLLC */
1309 GEN8_PPAT(1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC) | /* for something pointing to ptes? */
1310 GEN8_PPAT(2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC) | /* for scanout with eLLC */
1311 GEN8_PPAT(3, GEN8_PPAT_UC) | /* Uncached objects, mostly for scanout */
1312 GEN8_PPAT(4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)) |
1313 GEN8_PPAT(5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)) |
1314 GEN8_PPAT(6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)) |
1315 GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
1316
1317 /* XXX: spec defines this as 2 distinct registers. It's unclear if a 64b
1318 * write would work. */
1319 I915_WRITE(GEN8_PRIVATE_PAT, pat);
1320 I915_WRITE(GEN8_PRIVATE_PAT + 4, pat >> 32);
1321}
1322
1323static int gen8_gmch_probe(struct drm_device *dev,
1324 size_t *gtt_total,
1325 size_t *stolen,
1326 phys_addr_t *mappable_base,
1327 unsigned long *mappable_end)
1328{
1329 struct drm_i915_private *dev_priv = dev->dev_private;
1330 unsigned int gtt_size;
1331 u16 snb_gmch_ctl;
1332 int ret;
1333
1334 /* TODO: We're not aware of mappable constraints on gen8 yet */
1335 *mappable_base = pci_resource_start(dev->pdev, 2);
1336 *mappable_end = pci_resource_len(dev->pdev, 2);
1337
1338 if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(39)))
1339 pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(39));
1340
1341 pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
1342
1343 *stolen = gen8_get_stolen_size(snb_gmch_ctl);
1344
1345 gtt_size = gen8_get_total_gtt_size(snb_gmch_ctl);
1346 *gtt_total = (gtt_size / sizeof(gen8_gtt_pte_t)) << PAGE_SHIFT;
1347
1348 gen8_setup_private_ppat(dev_priv);
1349
1350 ret = ggtt_probe_common(dev, gtt_size);
1351
1352 dev_priv->gtt.base.clear_range = gen8_ggtt_clear_range;
1353 dev_priv->gtt.base.insert_entries = gen8_ggtt_insert_entries;
1354
1355 return ret;
1356}
1357
876static int gen6_gmch_probe(struct drm_device *dev, 1358static int gen6_gmch_probe(struct drm_device *dev,
877 size_t *gtt_total, 1359 size_t *gtt_total,
878 size_t *stolen, 1360 size_t *stolen,
@@ -880,7 +1362,6 @@ static int gen6_gmch_probe(struct drm_device *dev,
880 unsigned long *mappable_end) 1362 unsigned long *mappable_end)
881{ 1363{
882 struct drm_i915_private *dev_priv = dev->dev_private; 1364 struct drm_i915_private *dev_priv = dev->dev_private;
883 phys_addr_t gtt_bus_addr;
884 unsigned int gtt_size; 1365 unsigned int gtt_size;
885 u16 snb_gmch_ctl; 1366 u16 snb_gmch_ctl;
886 int ret; 1367 int ret;
@@ -900,24 +1381,13 @@ static int gen6_gmch_probe(struct drm_device *dev,
900 if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(40))) 1381 if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(40)))
901 pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(40)); 1382 pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(40));
902 pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl); 1383 pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
903 gtt_size = gen6_get_total_gtt_size(snb_gmch_ctl);
904 1384
905 *stolen = gen6_get_stolen_size(snb_gmch_ctl); 1385 *stolen = gen6_get_stolen_size(snb_gmch_ctl);
906 *gtt_total = (gtt_size / sizeof(gen6_gtt_pte_t)) << PAGE_SHIFT;
907
908 /* For Modern GENs the PTEs and register space are split in the BAR */
909 gtt_bus_addr = pci_resource_start(dev->pdev, 0) +
910 (pci_resource_len(dev->pdev, 0) / 2);
911 1386
912 dev_priv->gtt.gsm = ioremap_wc(gtt_bus_addr, gtt_size); 1387 gtt_size = gen6_get_total_gtt_size(snb_gmch_ctl);
913 if (!dev_priv->gtt.gsm) { 1388 *gtt_total = (gtt_size / sizeof(gen6_gtt_pte_t)) << PAGE_SHIFT;
914 DRM_ERROR("Failed to map the gtt page table\n");
915 return -ENOMEM;
916 }
917 1389
918 ret = setup_scratch_page(dev); 1390 ret = ggtt_probe_common(dev, gtt_size);
919 if (ret)
920 DRM_ERROR("Scratch setup failed\n");
921 1391
922 dev_priv->gtt.base.clear_range = gen6_ggtt_clear_range; 1392 dev_priv->gtt.base.clear_range = gen6_ggtt_clear_range;
923 dev_priv->gtt.base.insert_entries = gen6_ggtt_insert_entries; 1393 dev_priv->gtt.base.insert_entries = gen6_ggtt_insert_entries;
@@ -971,7 +1441,7 @@ int i915_gem_gtt_init(struct drm_device *dev)
971 if (INTEL_INFO(dev)->gen <= 5) { 1441 if (INTEL_INFO(dev)->gen <= 5) {
972 gtt->gtt_probe = i915_gmch_probe; 1442 gtt->gtt_probe = i915_gmch_probe;
973 gtt->base.cleanup = i915_gmch_remove; 1443 gtt->base.cleanup = i915_gmch_remove;
974 } else { 1444 } else if (INTEL_INFO(dev)->gen < 8) {
975 gtt->gtt_probe = gen6_gmch_probe; 1445 gtt->gtt_probe = gen6_gmch_probe;
976 gtt->base.cleanup = gen6_gmch_remove; 1446 gtt->base.cleanup = gen6_gmch_remove;
977 if (IS_HASWELL(dev) && dev_priv->ellc_size) 1447 if (IS_HASWELL(dev) && dev_priv->ellc_size)
@@ -984,6 +1454,9 @@ int i915_gem_gtt_init(struct drm_device *dev)
984 gtt->base.pte_encode = ivb_pte_encode; 1454 gtt->base.pte_encode = ivb_pte_encode;
985 else 1455 else
986 gtt->base.pte_encode = snb_pte_encode; 1456 gtt->base.pte_encode = snb_pte_encode;
1457 } else {
1458 dev_priv->gtt.gtt_probe = gen8_gmch_probe;
1459 dev_priv->gtt.base.cleanup = gen6_gmch_remove;
987 } 1460 }
988 1461
989 ret = gtt->gtt_probe(dev, &gtt->base.total, &gtt->stolen_size, 1462 ret = gtt->gtt_probe(dev, &gtt->base.total, &gtt->stolen_size,
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index a8bb213da79f..79dcb8f896c6 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -624,6 +624,7 @@ static void i915_gem_record_fences(struct drm_device *dev,
624 624
625 /* Fences */ 625 /* Fences */
626 switch (INTEL_INFO(dev)->gen) { 626 switch (INTEL_INFO(dev)->gen) {
627 case 8:
627 case 7: 628 case 7:
628 case 6: 629 case 6:
629 for (i = 0; i < dev_priv->num_fence_regs; i++) 630 for (i = 0; i < dev_priv->num_fence_regs; i++)
@@ -1044,6 +1045,7 @@ void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone)
1044 default: 1045 default:
1045 WARN_ONCE(1, "Unsupported platform\n"); 1046 WARN_ONCE(1, "Unsupported platform\n");
1046 case 7: 1047 case 7:
1048 case 8:
1047 instdone[0] = I915_READ(GEN7_INSTDONE_1); 1049 instdone[0] = I915_READ(GEN7_INSTDONE_1);
1048 instdone[1] = I915_READ(GEN7_SC_INSTDONE); 1050 instdone[1] = I915_READ(GEN7_SC_INSTDONE);
1049 instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE); 1051 instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 19949e8b36c5..931ee5d8cdb1 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -270,6 +270,21 @@ static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
270 } 270 }
271} 271}
272 272
273static void broadwell_set_fifo_underrun_reporting(struct drm_device *dev,
274 enum pipe pipe, bool enable)
275{
276 struct drm_i915_private *dev_priv = dev->dev_private;
277
278 assert_spin_locked(&dev_priv->irq_lock);
279
280 if (enable)
281 dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_FIFO_UNDERRUN;
282 else
283 dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_FIFO_UNDERRUN;
284 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
285 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
286}
287
273/** 288/**
274 * ibx_display_interrupt_update - update SDEIMR 289 * ibx_display_interrupt_update - update SDEIMR
275 * @dev_priv: driver private 290 * @dev_priv: driver private
@@ -382,6 +397,8 @@ bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
382 ironlake_set_fifo_underrun_reporting(dev, pipe, enable); 397 ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
383 else if (IS_GEN7(dev)) 398 else if (IS_GEN7(dev))
384 ivybridge_set_fifo_underrun_reporting(dev, pipe, enable); 399 ivybridge_set_fifo_underrun_reporting(dev, pipe, enable);
400 else if (IS_GEN8(dev))
401 broadwell_set_fifo_underrun_reporting(dev, pipe, enable);
385 402
386done: 403done:
387 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 404 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
@@ -600,35 +617,40 @@ static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
600 return I915_READ(reg); 617 return I915_READ(reg);
601} 618}
602 619
603static bool intel_pipe_in_vblank(struct drm_device *dev, enum pipe pipe) 620/* raw reads, only for fast reads of display block, no need for forcewake etc. */
621#define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
622#define __raw_i915_read16(dev_priv__, reg__) readw((dev_priv__)->regs + (reg__))
623
624static bool intel_pipe_in_vblank_locked(struct drm_device *dev, enum pipe pipe)
604{ 625{
605 struct drm_i915_private *dev_priv = dev->dev_private; 626 struct drm_i915_private *dev_priv = dev->dev_private;
606 uint32_t status; 627 uint32_t status;
628 int reg;
607 629
608 if (IS_VALLEYVIEW(dev)) { 630 if (IS_VALLEYVIEW(dev)) {
609 status = pipe == PIPE_A ? 631 status = pipe == PIPE_A ?
610 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT : 632 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT :
611 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 633 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
612 634
613 return I915_READ(VLV_ISR) & status; 635 reg = VLV_ISR;
614 } else if (IS_GEN2(dev)) { 636 } else if (IS_GEN2(dev)) {
615 status = pipe == PIPE_A ? 637 status = pipe == PIPE_A ?
616 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT : 638 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT :
617 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 639 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
618 640
619 return I915_READ16(ISR) & status; 641 reg = ISR;
620 } else if (INTEL_INFO(dev)->gen < 5) { 642 } else if (INTEL_INFO(dev)->gen < 5) {
621 status = pipe == PIPE_A ? 643 status = pipe == PIPE_A ?
622 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT : 644 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT :
623 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 645 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
624 646
625 return I915_READ(ISR) & status; 647 reg = ISR;
626 } else if (INTEL_INFO(dev)->gen < 7) { 648 } else if (INTEL_INFO(dev)->gen < 7) {
627 status = pipe == PIPE_A ? 649 status = pipe == PIPE_A ?
628 DE_PIPEA_VBLANK : 650 DE_PIPEA_VBLANK :
629 DE_PIPEB_VBLANK; 651 DE_PIPEB_VBLANK;
630 652
631 return I915_READ(DEISR) & status; 653 reg = DEISR;
632 } else { 654 } else {
633 switch (pipe) { 655 switch (pipe) {
634 default: 656 default:
@@ -643,12 +665,17 @@ static bool intel_pipe_in_vblank(struct drm_device *dev, enum pipe pipe)
643 break; 665 break;
644 } 666 }
645 667
646 return I915_READ(DEISR) & status; 668 reg = DEISR;
647 } 669 }
670
671 if (IS_GEN2(dev))
672 return __raw_i915_read16(dev_priv, reg) & status;
673 else
674 return __raw_i915_read32(dev_priv, reg) & status;
648} 675}
649 676
650static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe, 677static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
651 int *vpos, int *hpos) 678 int *vpos, int *hpos, ktime_t *stime, ktime_t *etime)
652{ 679{
653 struct drm_i915_private *dev_priv = dev->dev_private; 680 struct drm_i915_private *dev_priv = dev->dev_private;
654 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 681 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
@@ -658,6 +685,7 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
658 int vbl_start, vbl_end, htotal, vtotal; 685 int vbl_start, vbl_end, htotal, vtotal;
659 bool in_vbl = true; 686 bool in_vbl = true;
660 int ret = 0; 687 int ret = 0;
688 unsigned long irqflags;
661 689
662 if (!intel_crtc->active) { 690 if (!intel_crtc->active) {
663 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled " 691 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
@@ -672,14 +700,27 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
672 700
673 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE; 701 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
674 702
703 /*
704 * Lock uncore.lock, as we will do multiple timing critical raw
705 * register reads, potentially with preemption disabled, so the
706 * following code must not block on uncore.lock.
707 */
708 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
709
710 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
711
712 /* Get optional system timestamp before query. */
713 if (stime)
714 *stime = ktime_get();
715
675 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { 716 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
676 /* No obvious pixelcount register. Only query vertical 717 /* No obvious pixelcount register. Only query vertical
677 * scanout position from Display scan line register. 718 * scanout position from Display scan line register.
678 */ 719 */
679 if (IS_GEN2(dev)) 720 if (IS_GEN2(dev))
680 position = I915_READ(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2; 721 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
681 else 722 else
682 position = I915_READ(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; 723 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
683 724
684 /* 725 /*
685 * The scanline counter increments at the leading edge 726 * The scanline counter increments at the leading edge
@@ -688,7 +729,7 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
688 * to get a more accurate picture whether we're in vblank 729 * to get a more accurate picture whether we're in vblank
689 * or not. 730 * or not.
690 */ 731 */
691 in_vbl = intel_pipe_in_vblank(dev, pipe); 732 in_vbl = intel_pipe_in_vblank_locked(dev, pipe);
692 if ((in_vbl && position == vbl_start - 1) || 733 if ((in_vbl && position == vbl_start - 1) ||
693 (!in_vbl && position == vbl_end - 1)) 734 (!in_vbl && position == vbl_end - 1))
694 position = (position + 1) % vtotal; 735 position = (position + 1) % vtotal;
@@ -697,7 +738,7 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
697 * We can split this into vertical and horizontal 738 * We can split this into vertical and horizontal
698 * scanout position. 739 * scanout position.
699 */ 740 */
700 position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; 741 position = (__raw_i915_read32(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
701 742
702 /* convert to pixel counts */ 743 /* convert to pixel counts */
703 vbl_start *= htotal; 744 vbl_start *= htotal;
@@ -705,6 +746,14 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
705 vtotal *= htotal; 746 vtotal *= htotal;
706 } 747 }
707 748
749 /* Get optional system timestamp after query. */
750 if (etime)
751 *etime = ktime_get();
752
753 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
754
755 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
756
708 in_vbl = position >= vbl_start && position < vbl_end; 757 in_vbl = position >= vbl_start && position < vbl_end;
709 758
710 /* 759 /*
@@ -1038,7 +1087,7 @@ static void ivybridge_parity_work(struct work_struct *work)
1038 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice); 1087 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
1039 parity_event[5] = NULL; 1088 parity_event[5] = NULL;
1040 1089
1041 kobject_uevent_env(&dev_priv->dev->primary->kdev.kobj, 1090 kobject_uevent_env(&dev_priv->dev->primary->kdev->kobj,
1042 KOBJ_CHANGE, parity_event); 1091 KOBJ_CHANGE, parity_event);
1043 1092
1044 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n", 1093 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
@@ -1117,6 +1166,56 @@ static void snb_gt_irq_handler(struct drm_device *dev,
1117 ivybridge_parity_error_irq_handler(dev, gt_iir); 1166 ivybridge_parity_error_irq_handler(dev, gt_iir);
1118} 1167}
1119 1168
1169static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
1170 struct drm_i915_private *dev_priv,
1171 u32 master_ctl)
1172{
1173 u32 rcs, bcs, vcs;
1174 uint32_t tmp = 0;
1175 irqreturn_t ret = IRQ_NONE;
1176
1177 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
1178 tmp = I915_READ(GEN8_GT_IIR(0));
1179 if (tmp) {
1180 ret = IRQ_HANDLED;
1181 rcs = tmp >> GEN8_RCS_IRQ_SHIFT;
1182 bcs = tmp >> GEN8_BCS_IRQ_SHIFT;
1183 if (rcs & GT_RENDER_USER_INTERRUPT)
1184 notify_ring(dev, &dev_priv->ring[RCS]);
1185 if (bcs & GT_RENDER_USER_INTERRUPT)
1186 notify_ring(dev, &dev_priv->ring[BCS]);
1187 I915_WRITE(GEN8_GT_IIR(0), tmp);
1188 } else
1189 DRM_ERROR("The master control interrupt lied (GT0)!\n");
1190 }
1191
1192 if (master_ctl & GEN8_GT_VCS1_IRQ) {
1193 tmp = I915_READ(GEN8_GT_IIR(1));
1194 if (tmp) {
1195 ret = IRQ_HANDLED;
1196 vcs = tmp >> GEN8_VCS1_IRQ_SHIFT;
1197 if (vcs & GT_RENDER_USER_INTERRUPT)
1198 notify_ring(dev, &dev_priv->ring[VCS]);
1199 I915_WRITE(GEN8_GT_IIR(1), tmp);
1200 } else
1201 DRM_ERROR("The master control interrupt lied (GT1)!\n");
1202 }
1203
1204 if (master_ctl & GEN8_GT_VECS_IRQ) {
1205 tmp = I915_READ(GEN8_GT_IIR(3));
1206 if (tmp) {
1207 ret = IRQ_HANDLED;
1208 vcs = tmp >> GEN8_VECS_IRQ_SHIFT;
1209 if (vcs & GT_RENDER_USER_INTERRUPT)
1210 notify_ring(dev, &dev_priv->ring[VECS]);
1211 I915_WRITE(GEN8_GT_IIR(3), tmp);
1212 } else
1213 DRM_ERROR("The master control interrupt lied (GT3)!\n");
1214 }
1215
1216 return ret;
1217}
1218
1120#define HPD_STORM_DETECT_PERIOD 1000 1219#define HPD_STORM_DETECT_PERIOD 1000
1121#define HPD_STORM_THRESHOLD 5 1220#define HPD_STORM_THRESHOLD 5
1122 1221
@@ -1351,7 +1450,7 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
1351 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1450 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1352 1451
1353 for_each_pipe(pipe) { 1452 for_each_pipe(pipe) {
1354 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS) 1453 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
1355 drm_handle_vblank(dev, pipe); 1454 drm_handle_vblank(dev, pipe);
1356 1455
1357 if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) { 1456 if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) {
@@ -1690,6 +1789,117 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
1690 return ret; 1789 return ret;
1691} 1790}
1692 1791
1792static irqreturn_t gen8_irq_handler(int irq, void *arg)
1793{
1794 struct drm_device *dev = arg;
1795 struct drm_i915_private *dev_priv = dev->dev_private;
1796 u32 master_ctl;
1797 irqreturn_t ret = IRQ_NONE;
1798 uint32_t tmp = 0;
1799 enum pipe pipe;
1800
1801 atomic_inc(&dev_priv->irq_received);
1802
1803 master_ctl = I915_READ(GEN8_MASTER_IRQ);
1804 master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
1805 if (!master_ctl)
1806 return IRQ_NONE;
1807
1808 I915_WRITE(GEN8_MASTER_IRQ, 0);
1809 POSTING_READ(GEN8_MASTER_IRQ);
1810
1811 ret = gen8_gt_irq_handler(dev, dev_priv, master_ctl);
1812
1813 if (master_ctl & GEN8_DE_MISC_IRQ) {
1814 tmp = I915_READ(GEN8_DE_MISC_IIR);
1815 if (tmp & GEN8_DE_MISC_GSE)
1816 intel_opregion_asle_intr(dev);
1817 else if (tmp)
1818 DRM_ERROR("Unexpected DE Misc interrupt\n");
1819 else
1820 DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
1821
1822 if (tmp) {
1823 I915_WRITE(GEN8_DE_MISC_IIR, tmp);
1824 ret = IRQ_HANDLED;
1825 }
1826 }
1827
1828 if (master_ctl & GEN8_DE_PORT_IRQ) {
1829 tmp = I915_READ(GEN8_DE_PORT_IIR);
1830 if (tmp & GEN8_AUX_CHANNEL_A)
1831 dp_aux_irq_handler(dev);
1832 else if (tmp)
1833 DRM_ERROR("Unexpected DE Port interrupt\n");
1834 else
1835 DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
1836
1837 if (tmp) {
1838 I915_WRITE(GEN8_DE_PORT_IIR, tmp);
1839 ret = IRQ_HANDLED;
1840 }
1841 }
1842
1843 for_each_pipe(pipe) {
1844 uint32_t pipe_iir;
1845
1846 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
1847 continue;
1848
1849 pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
1850 if (pipe_iir & GEN8_PIPE_VBLANK)
1851 drm_handle_vblank(dev, pipe);
1852
1853 if (pipe_iir & GEN8_PIPE_FLIP_DONE) {
1854 intel_prepare_page_flip(dev, pipe);
1855 intel_finish_page_flip_plane(dev, pipe);
1856 }
1857
1858 if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE)
1859 hsw_pipe_crc_irq_handler(dev, pipe);
1860
1861 if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN) {
1862 if (intel_set_cpu_fifo_underrun_reporting(dev, pipe,
1863 false))
1864 DRM_DEBUG_DRIVER("Pipe %c FIFO underrun\n",
1865 pipe_name(pipe));
1866 }
1867
1868 if (pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS) {
1869 DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
1870 pipe_name(pipe),
1871 pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS);
1872 }
1873
1874 if (pipe_iir) {
1875 ret = IRQ_HANDLED;
1876 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir);
1877 } else
1878 DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
1879 }
1880
1881 if (!HAS_PCH_NOP(dev) && master_ctl & GEN8_DE_PCH_IRQ) {
1882 /*
1883 * FIXME(BDW): Assume for now that the new interrupt handling
1884 * scheme also closed the SDE interrupt handling race we've seen
1885 * on older pch-split platforms. But this needs testing.
1886 */
1887 u32 pch_iir = I915_READ(SDEIIR);
1888
1889 cpt_irq_handler(dev, pch_iir);
1890
1891 if (pch_iir) {
1892 I915_WRITE(SDEIIR, pch_iir);
1893 ret = IRQ_HANDLED;
1894 }
1895 }
1896
1897 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
1898 POSTING_READ(GEN8_MASTER_IRQ);
1899
1900 return ret;
1901}
1902
1693static void i915_error_wake_up(struct drm_i915_private *dev_priv, 1903static void i915_error_wake_up(struct drm_i915_private *dev_priv,
1694 bool reset_completed) 1904 bool reset_completed)
1695{ 1905{
@@ -1737,7 +1947,7 @@ static void i915_error_work_func(struct work_struct *work)
1737 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL }; 1947 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
1738 int ret; 1948 int ret;
1739 1949
1740 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event); 1950 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, error_event);
1741 1951
1742 /* 1952 /*
1743 * Note that there's only one work item which does gpu resets, so we 1953 * Note that there's only one work item which does gpu resets, so we
@@ -1751,7 +1961,7 @@ static void i915_error_work_func(struct work_struct *work)
1751 */ 1961 */
1752 if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) { 1962 if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
1753 DRM_DEBUG_DRIVER("resetting chip\n"); 1963 DRM_DEBUG_DRIVER("resetting chip\n");
1754 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, 1964 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE,
1755 reset_event); 1965 reset_event);
1756 1966
1757 /* 1967 /*
@@ -1778,7 +1988,7 @@ static void i915_error_work_func(struct work_struct *work)
1778 smp_mb__before_atomic_inc(); 1988 smp_mb__before_atomic_inc();
1779 atomic_inc(&dev_priv->gpu_error.reset_counter); 1989 atomic_inc(&dev_priv->gpu_error.reset_counter);
1780 1990
1781 kobject_uevent_env(&dev->primary->kdev.kobj, 1991 kobject_uevent_env(&dev->primary->kdev->kobj,
1782 KOBJ_CHANGE, reset_done_event); 1992 KOBJ_CHANGE, reset_done_event);
1783 } else { 1993 } else {
1784 atomic_set_mask(I915_WEDGED, &error->reset_counter); 1994 atomic_set_mask(I915_WEDGED, &error->reset_counter);
@@ -2043,6 +2253,22 @@ static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
2043 return 0; 2253 return 0;
2044} 2254}
2045 2255
2256static int gen8_enable_vblank(struct drm_device *dev, int pipe)
2257{
2258 struct drm_i915_private *dev_priv = dev->dev_private;
2259 unsigned long irqflags;
2260
2261 if (!i915_pipe_enabled(dev, pipe))
2262 return -EINVAL;
2263
2264 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2265 dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_VBLANK;
2266 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
2267 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
2268 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2269 return 0;
2270}
2271
2046/* Called from drm generic code, passed 'crtc' which 2272/* Called from drm generic code, passed 'crtc' which
2047 * we use as a pipe index 2273 * we use as a pipe index
2048 */ 2274 */
@@ -2091,6 +2317,21 @@ static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
2091 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2317 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2092} 2318}
2093 2319
2320static void gen8_disable_vblank(struct drm_device *dev, int pipe)
2321{
2322 struct drm_i915_private *dev_priv = dev->dev_private;
2323 unsigned long irqflags;
2324
2325 if (!i915_pipe_enabled(dev, pipe))
2326 return;
2327
2328 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2329 dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_VBLANK;
2330 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
2331 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
2332 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2333}
2334
2094static u32 2335static u32
2095ring_last_seqno(struct intel_ring_buffer *ring) 2336ring_last_seqno(struct intel_ring_buffer *ring)
2096{ 2337{
@@ -2425,6 +2666,53 @@ static void valleyview_irq_preinstall(struct drm_device *dev)
2425 POSTING_READ(VLV_IER); 2666 POSTING_READ(VLV_IER);
2426} 2667}
2427 2668
2669static void gen8_irq_preinstall(struct drm_device *dev)
2670{
2671 struct drm_i915_private *dev_priv = dev->dev_private;
2672 int pipe;
2673
2674 atomic_set(&dev_priv->irq_received, 0);
2675
2676 I915_WRITE(GEN8_MASTER_IRQ, 0);
2677 POSTING_READ(GEN8_MASTER_IRQ);
2678
2679 /* IIR can theoretically queue up two events. Be paranoid */
2680#define GEN8_IRQ_INIT_NDX(type, which) do { \
2681 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
2682 POSTING_READ(GEN8_##type##_IMR(which)); \
2683 I915_WRITE(GEN8_##type##_IER(which), 0); \
2684 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
2685 POSTING_READ(GEN8_##type##_IIR(which)); \
2686 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
2687 } while (0)
2688
2689#define GEN8_IRQ_INIT(type) do { \
2690 I915_WRITE(GEN8_##type##_IMR, 0xffffffff); \
2691 POSTING_READ(GEN8_##type##_IMR); \
2692 I915_WRITE(GEN8_##type##_IER, 0); \
2693 I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \
2694 POSTING_READ(GEN8_##type##_IIR); \
2695 I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \
2696 } while (0)
2697
2698 GEN8_IRQ_INIT_NDX(GT, 0);
2699 GEN8_IRQ_INIT_NDX(GT, 1);
2700 GEN8_IRQ_INIT_NDX(GT, 2);
2701 GEN8_IRQ_INIT_NDX(GT, 3);
2702
2703 for_each_pipe(pipe) {
2704 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe);
2705 }
2706
2707 GEN8_IRQ_INIT(DE_PORT);
2708 GEN8_IRQ_INIT(DE_MISC);
2709 GEN8_IRQ_INIT(PCU);
2710#undef GEN8_IRQ_INIT
2711#undef GEN8_IRQ_INIT_NDX
2712
2713 POSTING_READ(GEN8_PCU_IIR);
2714}
2715
2428static void ibx_hpd_irq_setup(struct drm_device *dev) 2716static void ibx_hpd_irq_setup(struct drm_device *dev)
2429{ 2717{
2430 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2718 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -2630,6 +2918,117 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
2630 return 0; 2918 return 0;
2631} 2919}
2632 2920
2921static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
2922{
2923 int i;
2924
2925 /* These are interrupts we'll toggle with the ring mask register */
2926 uint32_t gt_interrupts[] = {
2927 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
2928 GT_RENDER_L3_PARITY_ERROR_INTERRUPT |
2929 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
2930 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
2931 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
2932 0,
2933 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT
2934 };
2935
2936 for (i = 0; i < ARRAY_SIZE(gt_interrupts); i++) {
2937 u32 tmp = I915_READ(GEN8_GT_IIR(i));
2938 if (tmp)
2939 DRM_ERROR("Interrupt (%d) should have been masked in pre-install 0x%08x\n",
2940 i, tmp);
2941 I915_WRITE(GEN8_GT_IMR(i), ~gt_interrupts[i]);
2942 I915_WRITE(GEN8_GT_IER(i), gt_interrupts[i]);
2943 }
2944 POSTING_READ(GEN8_GT_IER(0));
2945}
2946
2947static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
2948{
2949 struct drm_device *dev = dev_priv->dev;
2950 uint32_t de_pipe_masked = GEN8_PIPE_FLIP_DONE |
2951 GEN8_PIPE_CDCLK_CRC_DONE |
2952 GEN8_PIPE_FIFO_UNDERRUN |
2953 GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
2954 uint32_t de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK;
2955 int pipe;
2956 dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
2957 dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
2958 dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
2959
2960 for_each_pipe(pipe) {
2961 u32 tmp = I915_READ(GEN8_DE_PIPE_IIR(pipe));
2962 if (tmp)
2963 DRM_ERROR("Interrupt (%d) should have been masked in pre-install 0x%08x\n",
2964 pipe, tmp);
2965 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
2966 I915_WRITE(GEN8_DE_PIPE_IER(pipe), de_pipe_enables);
2967 }
2968 POSTING_READ(GEN8_DE_PIPE_ISR(0));
2969
2970 I915_WRITE(GEN8_DE_PORT_IMR, ~GEN8_AUX_CHANNEL_A);
2971 I915_WRITE(GEN8_DE_PORT_IER, GEN8_AUX_CHANNEL_A);
2972 POSTING_READ(GEN8_DE_PORT_IER);
2973}
2974
2975static int gen8_irq_postinstall(struct drm_device *dev)
2976{
2977 struct drm_i915_private *dev_priv = dev->dev_private;
2978
2979 gen8_gt_irq_postinstall(dev_priv);
2980 gen8_de_irq_postinstall(dev_priv);
2981
2982 ibx_irq_postinstall(dev);
2983
2984 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
2985 POSTING_READ(GEN8_MASTER_IRQ);
2986
2987 return 0;
2988}
2989
2990static void gen8_irq_uninstall(struct drm_device *dev)
2991{
2992 struct drm_i915_private *dev_priv = dev->dev_private;
2993 int pipe;
2994
2995 if (!dev_priv)
2996 return;
2997
2998 atomic_set(&dev_priv->irq_received, 0);
2999
3000 I915_WRITE(GEN8_MASTER_IRQ, 0);
3001
3002#define GEN8_IRQ_FINI_NDX(type, which) do { \
3003 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
3004 I915_WRITE(GEN8_##type##_IER(which), 0); \
3005 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
3006 } while (0)
3007
3008#define GEN8_IRQ_FINI(type) do { \
3009 I915_WRITE(GEN8_##type##_IMR, 0xffffffff); \
3010 I915_WRITE(GEN8_##type##_IER, 0); \
3011 I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \
3012 } while (0)
3013
3014 GEN8_IRQ_FINI_NDX(GT, 0);
3015 GEN8_IRQ_FINI_NDX(GT, 1);
3016 GEN8_IRQ_FINI_NDX(GT, 2);
3017 GEN8_IRQ_FINI_NDX(GT, 3);
3018
3019 for_each_pipe(pipe) {
3020 GEN8_IRQ_FINI_NDX(DE_PIPE, pipe);
3021 }
3022
3023 GEN8_IRQ_FINI(DE_PORT);
3024 GEN8_IRQ_FINI(DE_MISC);
3025 GEN8_IRQ_FINI(PCU);
3026#undef GEN8_IRQ_FINI
3027#undef GEN8_IRQ_FINI_NDX
3028
3029 POSTING_READ(GEN8_PCU_IIR);
3030}
3031
2633static void valleyview_irq_uninstall(struct drm_device *dev) 3032static void valleyview_irq_uninstall(struct drm_device *dev)
2634{ 3033{
2635 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3034 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -3409,6 +3808,14 @@ void intel_irq_init(struct drm_device *dev)
3409 dev->driver->enable_vblank = valleyview_enable_vblank; 3808 dev->driver->enable_vblank = valleyview_enable_vblank;
3410 dev->driver->disable_vblank = valleyview_disable_vblank; 3809 dev->driver->disable_vblank = valleyview_disable_vblank;
3411 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 3810 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
3811 } else if (IS_GEN8(dev)) {
3812 dev->driver->irq_handler = gen8_irq_handler;
3813 dev->driver->irq_preinstall = gen8_irq_preinstall;
3814 dev->driver->irq_postinstall = gen8_irq_postinstall;
3815 dev->driver->irq_uninstall = gen8_irq_uninstall;
3816 dev->driver->enable_vblank = gen8_enable_vblank;
3817 dev->driver->disable_vblank = gen8_disable_vblank;
3818 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
3412 } else if (HAS_PCH_SPLIT(dev)) { 3819 } else if (HAS_PCH_SPLIT(dev)) {
3413 dev->driver->irq_handler = ironlake_irq_handler; 3820 dev->driver->irq_handler = ironlake_irq_handler;
3414 dev->driver->irq_preinstall = ironlake_irq_preinstall; 3821 dev->driver->irq_preinstall = ironlake_irq_preinstall;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index a8a5bcb521c7..849e595ed19d 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -110,6 +110,9 @@
110#define RING_PP_DIR_DCLV(ring) ((ring)->mmio_base+0x220) 110#define RING_PP_DIR_DCLV(ring) ((ring)->mmio_base+0x220)
111#define PP_DIR_DCLV_2G 0xffffffff 111#define PP_DIR_DCLV_2G 0xffffffff
112 112
113#define GEN8_RING_PDP_UDW(ring, n) ((ring)->mmio_base+0x270 + ((n) * 8 + 4))
114#define GEN8_RING_PDP_LDW(ring, n) ((ring)->mmio_base+0x270 + (n) * 8)
115
113#define GAM_ECOCHK 0x4090 116#define GAM_ECOCHK 0x4090
114#define ECOCHK_SNB_BIT (1<<10) 117#define ECOCHK_SNB_BIT (1<<10)
115#define HSW_ECOCHK_ARB_PRIO_SOL (1<<6) 118#define HSW_ECOCHK_ARB_PRIO_SOL (1<<6)
@@ -247,6 +250,7 @@
247#define MI_BATCH_NON_SECURE_HSW (1<<13) 250#define MI_BATCH_NON_SECURE_HSW (1<<13)
248#define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0) 251#define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0)
249#define MI_BATCH_GTT (2<<6) /* aliased with (1<<7) on gen4 */ 252#define MI_BATCH_GTT (2<<6) /* aliased with (1<<7) on gen4 */
253#define MI_BATCH_BUFFER_START_GEN8 MI_INSTR(0x31, 1)
250#define MI_SEMAPHORE_MBOX MI_INSTR(0x16, 1) /* gen6+ */ 254#define MI_SEMAPHORE_MBOX MI_INSTR(0x16, 1) /* gen6+ */
251#define MI_SEMAPHORE_GLOBAL_GTT (1<<22) 255#define MI_SEMAPHORE_GLOBAL_GTT (1<<22)
252#define MI_SEMAPHORE_UPDATE (1<<21) 256#define MI_SEMAPHORE_UPDATE (1<<21)
@@ -657,6 +661,9 @@
657#define ARB_MODE 0x04030 661#define ARB_MODE 0x04030
658#define ARB_MODE_SWIZZLE_SNB (1<<4) 662#define ARB_MODE_SWIZZLE_SNB (1<<4)
659#define ARB_MODE_SWIZZLE_IVB (1<<5) 663#define ARB_MODE_SWIZZLE_IVB (1<<5)
664#define GAMTARBMODE 0x04a08
665#define ARB_MODE_BWGTLB_DISABLE (1<<9)
666#define ARB_MODE_SWIZZLE_BDW (1<<1)
660#define RENDER_HWS_PGA_GEN7 (0x04080) 667#define RENDER_HWS_PGA_GEN7 (0x04080)
661#define RING_FAULT_REG(ring) (0x4094 + 0x100*(ring)->id) 668#define RING_FAULT_REG(ring) (0x4094 + 0x100*(ring)->id)
662#define RING_FAULT_GTTSEL_MASK (1<<11) 669#define RING_FAULT_GTTSEL_MASK (1<<11)
@@ -664,6 +671,7 @@
664#define RING_FAULT_FAULT_TYPE(x) ((x >> 1) & 0x3) 671#define RING_FAULT_FAULT_TYPE(x) ((x >> 1) & 0x3)
665#define RING_FAULT_VALID (1<<0) 672#define RING_FAULT_VALID (1<<0)
666#define DONE_REG 0x40b0 673#define DONE_REG 0x40b0
674#define GEN8_PRIVATE_PAT 0x40e0
667#define BSD_HWS_PGA_GEN7 (0x04180) 675#define BSD_HWS_PGA_GEN7 (0x04180)
668#define BLT_HWS_PGA_GEN7 (0x04280) 676#define BLT_HWS_PGA_GEN7 (0x04280)
669#define VEBOX_HWS_PGA_GEN7 (0x04380) 677#define VEBOX_HWS_PGA_GEN7 (0x04380)
@@ -743,6 +751,7 @@
743#define FPGA_DBG_RM_NOCLAIM (1<<31) 751#define FPGA_DBG_RM_NOCLAIM (1<<31)
744 752
745#define DERRMR 0x44050 753#define DERRMR 0x44050
754/* Note that HBLANK events are reserved on bdw+ */
746#define DERRMR_PIPEA_SCANLINE (1<<0) 755#define DERRMR_PIPEA_SCANLINE (1<<0)
747#define DERRMR_PIPEA_PRI_FLIP_DONE (1<<1) 756#define DERRMR_PIPEA_PRI_FLIP_DONE (1<<1)
748#define DERRMR_PIPEA_SPR_FLIP_DONE (1<<2) 757#define DERRMR_PIPEA_SPR_FLIP_DONE (1<<2)
@@ -776,6 +785,7 @@
776#define _3D_CHICKEN3 0x02090 785#define _3D_CHICKEN3 0x02090
777#define _3D_CHICKEN_SF_DISABLE_OBJEND_CULL (1 << 10) 786#define _3D_CHICKEN_SF_DISABLE_OBJEND_CULL (1 << 10)
778#define _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL (1 << 5) 787#define _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL (1 << 5)
788#define _3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(x) ((x)<<1)
779 789
780#define MI_MODE 0x0209c 790#define MI_MODE 0x0209c
781# define VS_TIMER_DISPATCH (1 << 6) 791# define VS_TIMER_DISPATCH (1 << 6)
@@ -1822,6 +1832,9 @@
1822 * on HSW) - so the final size is 66944 bytes, which rounds to 17 pages. 1832 * on HSW) - so the final size is 66944 bytes, which rounds to 17 pages.
1823 */ 1833 */
1824#define HSW_CXT_TOTAL_SIZE (17 * PAGE_SIZE) 1834#define HSW_CXT_TOTAL_SIZE (17 * PAGE_SIZE)
1835/* Same as Haswell, but 72064 bytes now. */
1836#define GEN8_CXT_TOTAL_SIZE (18 * PAGE_SIZE)
1837
1825 1838
1826#define VLV_CLK_CTL2 0x101104 1839#define VLV_CLK_CTL2 0x101104
1827#define CLK_CTL2_CZCOUNT_30NS_SHIFT 28 1840#define CLK_CTL2_CZCOUNT_30NS_SHIFT 28
@@ -1952,8 +1965,8 @@
1952#define BCLRPAT(pipe) _PIPE(pipe, _BCLRPAT_A, _BCLRPAT_B) 1965#define BCLRPAT(pipe) _PIPE(pipe, _BCLRPAT_A, _BCLRPAT_B)
1953#define VSYNCSHIFT(trans) _TRANSCODER(trans, _VSYNCSHIFT_A, _VSYNCSHIFT_B) 1966#define VSYNCSHIFT(trans) _TRANSCODER(trans, _VSYNCSHIFT_A, _VSYNCSHIFT_B)
1954 1967
1955/* HSW eDP PSR registers */ 1968/* HSW+ eDP PSR registers */
1956#define EDP_PSR_BASE(dev) 0x64800 1969#define EDP_PSR_BASE(dev) (IS_HASWELL(dev) ? 0x64800 : 0x6f800)
1957#define EDP_PSR_CTL(dev) (EDP_PSR_BASE(dev) + 0) 1970#define EDP_PSR_CTL(dev) (EDP_PSR_BASE(dev) + 0)
1958#define EDP_PSR_ENABLE (1<<31) 1971#define EDP_PSR_ENABLE (1<<31)
1959#define EDP_PSR_LINK_DISABLE (0<<27) 1972#define EDP_PSR_LINK_DISABLE (0<<27)
@@ -2397,6 +2410,21 @@
2397 2410
2398#define PFIT_AUTO_RATIOS (dev_priv->info->display_mmio_offset + 0x61238) 2411#define PFIT_AUTO_RATIOS (dev_priv->info->display_mmio_offset + 0x61238)
2399 2412
2413#define _VLV_BLC_PWM_CTL2_A (dev_priv->info->display_mmio_offset + 0x61250)
2414#define _VLV_BLC_PWM_CTL2_B (dev_priv->info->display_mmio_offset + 0x61350)
2415#define VLV_BLC_PWM_CTL2(pipe) _PIPE(pipe, _VLV_BLC_PWM_CTL2_A, \
2416 _VLV_BLC_PWM_CTL2_B)
2417
2418#define _VLV_BLC_PWM_CTL_A (dev_priv->info->display_mmio_offset + 0x61254)
2419#define _VLV_BLC_PWM_CTL_B (dev_priv->info->display_mmio_offset + 0x61354)
2420#define VLV_BLC_PWM_CTL(pipe) _PIPE(pipe, _VLV_BLC_PWM_CTL_A, \
2421 _VLV_BLC_PWM_CTL_B)
2422
2423#define _VLV_BLC_HIST_CTL_A (dev_priv->info->display_mmio_offset + 0x61260)
2424#define _VLV_BLC_HIST_CTL_B (dev_priv->info->display_mmio_offset + 0x61360)
2425#define VLV_BLC_HIST_CTL(pipe) _PIPE(pipe, _VLV_BLC_HIST_CTL_A, \
2426 _VLV_BLC_HIST_CTL_B)
2427
2400/* Backlight control */ 2428/* Backlight control */
2401#define BLC_PWM_CTL2 (dev_priv->info->display_mmio_offset + 0x61250) /* 965+ only */ 2429#define BLC_PWM_CTL2 (dev_priv->info->display_mmio_offset + 0x61250) /* 965+ only */
2402#define BLM_PWM_ENABLE (1 << 31) 2430#define BLM_PWM_ENABLE (1 << 31)
@@ -3228,6 +3256,18 @@
3228#define PIPEFRAMEPIXEL(pipe) _PIPE(pipe, _PIPEAFRAMEPIXEL, _PIPEBFRAMEPIXEL) 3256#define PIPEFRAMEPIXEL(pipe) _PIPE(pipe, _PIPEAFRAMEPIXEL, _PIPEBFRAMEPIXEL)
3229#define PIPESTAT(pipe) _PIPE(pipe, _PIPEASTAT, _PIPEBSTAT) 3257#define PIPESTAT(pipe) _PIPE(pipe, _PIPEASTAT, _PIPEBSTAT)
3230 3258
3259#define _PIPE_MISC_A 0x70030
3260#define _PIPE_MISC_B 0x71030
3261#define PIPEMISC_DITHER_BPC_MASK (7<<5)
3262#define PIPEMISC_DITHER_8_BPC (0<<5)
3263#define PIPEMISC_DITHER_10_BPC (1<<5)
3264#define PIPEMISC_DITHER_6_BPC (2<<5)
3265#define PIPEMISC_DITHER_12_BPC (3<<5)
3266#define PIPEMISC_DITHER_ENABLE (1<<4)
3267#define PIPEMISC_DITHER_TYPE_MASK (3<<2)
3268#define PIPEMISC_DITHER_TYPE_SP (0<<2)
3269#define PIPEMISC(pipe) _PIPE(pipe, _PIPE_MISC_A, _PIPE_MISC_B)
3270
3231#define VLV_DPFLIPSTAT (VLV_DISPLAY_BASE + 0x70028) 3271#define VLV_DPFLIPSTAT (VLV_DISPLAY_BASE + 0x70028)
3232#define PIPEB_LINE_COMPARE_INT_EN (1<<29) 3272#define PIPEB_LINE_COMPARE_INT_EN (1<<29)
3233#define PIPEB_HLINE_INT_EN (1<<28) 3273#define PIPEB_HLINE_INT_EN (1<<28)
@@ -3358,6 +3398,7 @@
3358#define WM1_LP_LATENCY_MASK (0x7f<<24) 3398#define WM1_LP_LATENCY_MASK (0x7f<<24)
3359#define WM1_LP_FBC_MASK (0xf<<20) 3399#define WM1_LP_FBC_MASK (0xf<<20)
3360#define WM1_LP_FBC_SHIFT 20 3400#define WM1_LP_FBC_SHIFT 20
3401#define WM1_LP_FBC_SHIFT_BDW 19
3361#define WM1_LP_SR_MASK (0x7ff<<8) 3402#define WM1_LP_SR_MASK (0x7ff<<8)
3362#define WM1_LP_SR_SHIFT 8 3403#define WM1_LP_SR_SHIFT 8
3363#define WM1_LP_CURSOR_MASK (0xff) 3404#define WM1_LP_CURSOR_MASK (0xff)
@@ -3998,6 +4039,71 @@
3998#define GTIIR 0x44018 4039#define GTIIR 0x44018
3999#define GTIER 0x4401c 4040#define GTIER 0x4401c
4000 4041
4042#define GEN8_MASTER_IRQ 0x44200
4043#define GEN8_MASTER_IRQ_CONTROL (1<<31)
4044#define GEN8_PCU_IRQ (1<<30)
4045#define GEN8_DE_PCH_IRQ (1<<23)
4046#define GEN8_DE_MISC_IRQ (1<<22)
4047#define GEN8_DE_PORT_IRQ (1<<20)
4048#define GEN8_DE_PIPE_C_IRQ (1<<18)
4049#define GEN8_DE_PIPE_B_IRQ (1<<17)
4050#define GEN8_DE_PIPE_A_IRQ (1<<16)
4051#define GEN8_DE_PIPE_IRQ(pipe) (1<<(16+pipe))
4052#define GEN8_GT_VECS_IRQ (1<<6)
4053#define GEN8_GT_VCS2_IRQ (1<<3)
4054#define GEN8_GT_VCS1_IRQ (1<<2)
4055#define GEN8_GT_BCS_IRQ (1<<1)
4056#define GEN8_GT_RCS_IRQ (1<<0)
4057
4058#define GEN8_GT_ISR(which) (0x44300 + (0x10 * (which)))
4059#define GEN8_GT_IMR(which) (0x44304 + (0x10 * (which)))
4060#define GEN8_GT_IIR(which) (0x44308 + (0x10 * (which)))
4061#define GEN8_GT_IER(which) (0x4430c + (0x10 * (which)))
4062
4063#define GEN8_BCS_IRQ_SHIFT 16
4064#define GEN8_RCS_IRQ_SHIFT 0
4065#define GEN8_VCS2_IRQ_SHIFT 16
4066#define GEN8_VCS1_IRQ_SHIFT 0
4067#define GEN8_VECS_IRQ_SHIFT 0
4068
4069#define GEN8_DE_PIPE_ISR(pipe) (0x44400 + (0x10 * (pipe)))
4070#define GEN8_DE_PIPE_IMR(pipe) (0x44404 + (0x10 * (pipe)))
4071#define GEN8_DE_PIPE_IIR(pipe) (0x44408 + (0x10 * (pipe)))
4072#define GEN8_DE_PIPE_IER(pipe) (0x4440c + (0x10 * (pipe)))
4073#define GEN8_PIPE_FIFO_UNDERRUN (1 << 31)
4074#define GEN8_PIPE_CDCLK_CRC_ERROR (1 << 29)
4075#define GEN8_PIPE_CDCLK_CRC_DONE (1 << 28)
4076#define GEN8_PIPE_CURSOR_FAULT (1 << 10)
4077#define GEN8_PIPE_SPRITE_FAULT (1 << 9)
4078#define GEN8_PIPE_PRIMARY_FAULT (1 << 8)
4079#define GEN8_PIPE_SPRITE_FLIP_DONE (1 << 5)
4080#define GEN8_PIPE_FLIP_DONE (1 << 4)
4081#define GEN8_PIPE_SCAN_LINE_EVENT (1 << 2)
4082#define GEN8_PIPE_VSYNC (1 << 1)
4083#define GEN8_PIPE_VBLANK (1 << 0)
4084#define GEN8_DE_PIPE_IRQ_FAULT_ERRORS \
4085 (GEN8_PIPE_CURSOR_FAULT | \
4086 GEN8_PIPE_SPRITE_FAULT | \
4087 GEN8_PIPE_PRIMARY_FAULT)
4088
4089#define GEN8_DE_PORT_ISR 0x44440
4090#define GEN8_DE_PORT_IMR 0x44444
4091#define GEN8_DE_PORT_IIR 0x44448
4092#define GEN8_DE_PORT_IER 0x4444c
4093#define GEN8_PORT_DP_A_HOTPLUG (1 << 3)
4094#define GEN8_AUX_CHANNEL_A (1 << 0)
4095
4096#define GEN8_DE_MISC_ISR 0x44460
4097#define GEN8_DE_MISC_IMR 0x44464
4098#define GEN8_DE_MISC_IIR 0x44468
4099#define GEN8_DE_MISC_IER 0x4446c
4100#define GEN8_DE_MISC_GSE (1 << 27)
4101
4102#define GEN8_PCU_ISR 0x444e0
4103#define GEN8_PCU_IMR 0x444e4
4104#define GEN8_PCU_IIR 0x444e8
4105#define GEN8_PCU_IER 0x444ec
4106
4001#define ILK_DISPLAY_CHICKEN2 0x42004 4107#define ILK_DISPLAY_CHICKEN2 0x42004
4002/* Required on all Ironlake and Sandybridge according to the B-Spec. */ 4108/* Required on all Ironlake and Sandybridge according to the B-Spec. */
4003#define ILK_ELPIN_409_SELECT (1 << 25) 4109#define ILK_ELPIN_409_SELECT (1 << 25)
@@ -4023,8 +4129,14 @@
4023# define CHICKEN3_DGMG_DONE_FIX_DISABLE (1 << 2) 4129# define CHICKEN3_DGMG_DONE_FIX_DISABLE (1 << 2)
4024 4130
4025#define CHICKEN_PAR1_1 0x42080 4131#define CHICKEN_PAR1_1 0x42080
4132#define DPA_MASK_VBLANK_SRD (1 << 15)
4026#define FORCE_ARB_IDLE_PLANES (1 << 14) 4133#define FORCE_ARB_IDLE_PLANES (1 << 14)
4027 4134
4135#define _CHICKEN_PIPESL_1_A 0x420b0
4136#define _CHICKEN_PIPESL_1_B 0x420b4
4137#define DPRS_MASK_VBLANK_SRD (1 << 0)
4138#define CHICKEN_PIPESL_1(pipe) _PIPE(pipe, _CHICKEN_PIPESL_1_A, _CHICKEN_PIPESL_1_B)
4139
4028#define DISP_ARB_CTL 0x45000 4140#define DISP_ARB_CTL 0x45000
4029#define DISP_TILE_SURFACE_SWIZZLING (1<<13) 4141#define DISP_TILE_SURFACE_SWIZZLING (1<<13)
4030#define DISP_FBC_WM_DIS (1<<15) 4142#define DISP_FBC_WM_DIS (1<<15)
@@ -4035,6 +4147,8 @@
4035/* GEN7 chicken */ 4147/* GEN7 chicken */
4036#define GEN7_COMMON_SLICE_CHICKEN1 0x7010 4148#define GEN7_COMMON_SLICE_CHICKEN1 0x7010
4037# define GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC ((1<<10) | (1<<26)) 4149# define GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC ((1<<10) | (1<<26))
4150#define COMMON_SLICE_CHICKEN2 0x7014
4151# define GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE (1<<0)
4038 4152
4039#define GEN7_L3CNTLREG1 0xB01C 4153#define GEN7_L3CNTLREG1 0xB01C
4040#define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C4FFF8C 4154#define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C4FFF8C
@@ -4863,6 +4977,7 @@
4863#define GEN6_PCODE_WRITE_D_COMP 0x11 4977#define GEN6_PCODE_WRITE_D_COMP 0x11
4864#define GEN6_ENCODE_RC6_VID(mv) (((mv) - 245) / 5) 4978#define GEN6_ENCODE_RC6_VID(mv) (((mv) - 245) / 5)
4865#define GEN6_DECODE_RC6_VID(vids) (((vids) * 5) + 245) 4979#define GEN6_DECODE_RC6_VID(vids) (((vids) * 5) + 245)
4980#define DISPLAY_IPS_CONTROL 0x19
4866#define GEN6_PCODE_DATA 0x138128 4981#define GEN6_PCODE_DATA 0x138128
4867#define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8 4982#define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8
4868#define GEN6_PCODE_FREQ_RING_RATIO_SHIFT 16 4983#define GEN6_PCODE_FREQ_RING_RATIO_SHIFT 16
@@ -4900,6 +5015,7 @@
4900#define GEN7_HALF_SLICE_CHICKEN1 0xe100 /* IVB GT1 + VLV */ 5015#define GEN7_HALF_SLICE_CHICKEN1 0xe100 /* IVB GT1 + VLV */
4901#define GEN7_HALF_SLICE_CHICKEN1_GT2 0xf100 5016#define GEN7_HALF_SLICE_CHICKEN1_GT2 0xf100
4902#define GEN7_MAX_PS_THREAD_DEP (8<<12) 5017#define GEN7_MAX_PS_THREAD_DEP (8<<12)
5018#define GEN7_SINGLE_SUBSCAN_DISPATCH_ENABLE (1<<10)
4903#define GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE (1<<3) 5019#define GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE (1<<3)
4904 5020
4905#define GEN7_ROW_CHICKEN2 0xe4f4 5021#define GEN7_ROW_CHICKEN2 0xe4f4
@@ -4909,6 +5025,10 @@
4909#define HSW_ROW_CHICKEN3 0xe49c 5025#define HSW_ROW_CHICKEN3 0xe49c
4910#define HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE (1 << 6) 5026#define HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE (1 << 6)
4911 5027
5028#define HALF_SLICE_CHICKEN3 0xe184
5029#define GEN8_CENTROID_PIXEL_OPT_DIS (1<<8)
5030#define GEN8_SAMPLER_POWER_BYPASS_DIS (1<<1)
5031
4912#define G4X_AUD_VID_DID (dev_priv->info->display_mmio_offset + 0x62020) 5032#define G4X_AUD_VID_DID (dev_priv->info->display_mmio_offset + 0x62020)
4913#define INTEL_AUDIO_DEVCL 0x808629FB 5033#define INTEL_AUDIO_DEVCL 0x808629FB
4914#define INTEL_AUDIO_DEVBLC 0x80862801 5034#define INTEL_AUDIO_DEVBLC 0x80862801
@@ -4950,6 +5070,18 @@
4950 CPT_AUD_CNTL_ST_B) 5070 CPT_AUD_CNTL_ST_B)
4951#define CPT_AUD_CNTRL_ST2 0xE50C0 5071#define CPT_AUD_CNTRL_ST2 0xE50C0
4952 5072
5073#define VLV_HDMIW_HDMIEDID_A (VLV_DISPLAY_BASE + 0x62050)
5074#define VLV_HDMIW_HDMIEDID_B (VLV_DISPLAY_BASE + 0x62150)
5075#define VLV_HDMIW_HDMIEDID(pipe) _PIPE(pipe, \
5076 VLV_HDMIW_HDMIEDID_A, \
5077 VLV_HDMIW_HDMIEDID_B)
5078#define VLV_AUD_CNTL_ST_A (VLV_DISPLAY_BASE + 0x620B4)
5079#define VLV_AUD_CNTL_ST_B (VLV_DISPLAY_BASE + 0x621B4)
5080#define VLV_AUD_CNTL_ST(pipe) _PIPE(pipe, \
5081 VLV_AUD_CNTL_ST_A, \
5082 VLV_AUD_CNTL_ST_B)
5083#define VLV_AUD_CNTL_ST2 (VLV_DISPLAY_BASE + 0x620C0)
5084
4953/* These are the 4 32-bit write offset registers for each stream 5085/* These are the 4 32-bit write offset registers for each stream
4954 * output buffer. It determines the offset from the 5086 * output buffer. It determines the offset from the
4955 * 3DSTATE_SO_BUFFERs that the next streamed vertex output goes to. 5087 * 3DSTATE_SO_BUFFERs that the next streamed vertex output goes to.
@@ -4966,6 +5098,12 @@
4966#define CPT_AUD_CFG(pipe) _PIPE(pipe, \ 5098#define CPT_AUD_CFG(pipe) _PIPE(pipe, \
4967 CPT_AUD_CONFIG_A, \ 5099 CPT_AUD_CONFIG_A, \
4968 CPT_AUD_CONFIG_B) 5100 CPT_AUD_CONFIG_B)
5101#define VLV_AUD_CONFIG_A (VLV_DISPLAY_BASE + 0x62000)
5102#define VLV_AUD_CONFIG_B (VLV_DISPLAY_BASE + 0x62100)
5103#define VLV_AUD_CFG(pipe) _PIPE(pipe, \
5104 VLV_AUD_CONFIG_A, \
5105 VLV_AUD_CONFIG_B)
5106
4969#define AUD_CONFIG_N_VALUE_INDEX (1 << 29) 5107#define AUD_CONFIG_N_VALUE_INDEX (1 << 29)
4970#define AUD_CONFIG_N_PROG_ENABLE (1 << 28) 5108#define AUD_CONFIG_N_PROG_ENABLE (1 << 28)
4971#define AUD_CONFIG_UPPER_N_SHIFT 20 5109#define AUD_CONFIG_UPPER_N_SHIFT 20
@@ -5108,6 +5246,7 @@
5108#define DDI_BUF_CTL_B 0x64100 5246#define DDI_BUF_CTL_B 0x64100
5109#define DDI_BUF_CTL(port) _PORT(port, DDI_BUF_CTL_A, DDI_BUF_CTL_B) 5247#define DDI_BUF_CTL(port) _PORT(port, DDI_BUF_CTL_A, DDI_BUF_CTL_B)
5110#define DDI_BUF_CTL_ENABLE (1<<31) 5248#define DDI_BUF_CTL_ENABLE (1<<31)
5249/* Haswell */
5111#define DDI_BUF_EMP_400MV_0DB_HSW (0<<24) /* Sel0 */ 5250#define DDI_BUF_EMP_400MV_0DB_HSW (0<<24) /* Sel0 */
5112#define DDI_BUF_EMP_400MV_3_5DB_HSW (1<<24) /* Sel1 */ 5251#define DDI_BUF_EMP_400MV_3_5DB_HSW (1<<24) /* Sel1 */
5113#define DDI_BUF_EMP_400MV_6DB_HSW (2<<24) /* Sel2 */ 5252#define DDI_BUF_EMP_400MV_6DB_HSW (2<<24) /* Sel2 */
@@ -5117,6 +5256,16 @@
5117#define DDI_BUF_EMP_600MV_6DB_HSW (6<<24) /* Sel6 */ 5256#define DDI_BUF_EMP_600MV_6DB_HSW (6<<24) /* Sel6 */
5118#define DDI_BUF_EMP_800MV_0DB_HSW (7<<24) /* Sel7 */ 5257#define DDI_BUF_EMP_800MV_0DB_HSW (7<<24) /* Sel7 */
5119#define DDI_BUF_EMP_800MV_3_5DB_HSW (8<<24) /* Sel8 */ 5258#define DDI_BUF_EMP_800MV_3_5DB_HSW (8<<24) /* Sel8 */
5259/* Broadwell */
5260#define DDI_BUF_EMP_400MV_0DB_BDW (0<<24) /* Sel0 */
5261#define DDI_BUF_EMP_400MV_3_5DB_BDW (1<<24) /* Sel1 */
5262#define DDI_BUF_EMP_400MV_6DB_BDW (2<<24) /* Sel2 */
5263#define DDI_BUF_EMP_600MV_0DB_BDW (3<<24) /* Sel3 */
5264#define DDI_BUF_EMP_600MV_3_5DB_BDW (4<<24) /* Sel4 */
5265#define DDI_BUF_EMP_600MV_6DB_BDW (5<<24) /* Sel5 */
5266#define DDI_BUF_EMP_800MV_0DB_BDW (6<<24) /* Sel6 */
5267#define DDI_BUF_EMP_800MV_3_5DB_BDW (7<<24) /* Sel7 */
5268#define DDI_BUF_EMP_1200MV_0DB_BDW (8<<24) /* Sel8 */
5120#define DDI_BUF_EMP_MASK (0xf<<24) 5269#define DDI_BUF_EMP_MASK (0xf<<24)
5121#define DDI_BUF_PORT_REVERSAL (1<<16) 5270#define DDI_BUF_PORT_REVERSAL (1<<16)
5122#define DDI_BUF_IS_IDLE (1<<7) 5271#define DDI_BUF_IS_IDLE (1<<7)
@@ -5226,6 +5375,9 @@
5226#define LCPLL_PLL_LOCK (1<<30) 5375#define LCPLL_PLL_LOCK (1<<30)
5227#define LCPLL_CLK_FREQ_MASK (3<<26) 5376#define LCPLL_CLK_FREQ_MASK (3<<26)
5228#define LCPLL_CLK_FREQ_450 (0<<26) 5377#define LCPLL_CLK_FREQ_450 (0<<26)
5378#define LCPLL_CLK_FREQ_54O_BDW (1<<26)
5379#define LCPLL_CLK_FREQ_337_5_BDW (2<<26)
5380#define LCPLL_CLK_FREQ_675_BDW (3<<26)
5229#define LCPLL_CD_CLOCK_DISABLE (1<<25) 5381#define LCPLL_CD_CLOCK_DISABLE (1<<25)
5230#define LCPLL_CD2X_CLOCK_DISABLE (1<<23) 5382#define LCPLL_CD2X_CLOCK_DISABLE (1<<23)
5231#define LCPLL_POWER_DOWN_ALLOW (1<<22) 5383#define LCPLL_POWER_DOWN_ALLOW (1<<22)
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index a088f1f46bdb..6b8fef7fb3bb 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -192,7 +192,6 @@ static void i915_restore_vga(struct drm_device *dev)
192static void i915_save_display(struct drm_device *dev) 192static void i915_save_display(struct drm_device *dev)
193{ 193{
194 struct drm_i915_private *dev_priv = dev->dev_private; 194 struct drm_i915_private *dev_priv = dev->dev_private;
195 unsigned long flags;
196 195
197 /* Display arbitration control */ 196 /* Display arbitration control */
198 if (INTEL_INFO(dev)->gen <= 4) 197 if (INTEL_INFO(dev)->gen <= 4)
@@ -203,30 +202,27 @@ static void i915_save_display(struct drm_device *dev)
203 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 202 if (!drm_core_check_feature(dev, DRIVER_MODESET))
204 i915_save_display_reg(dev); 203 i915_save_display_reg(dev);
205 204
206 spin_lock_irqsave(&dev_priv->backlight.lock, flags);
207
208 /* LVDS state */ 205 /* LVDS state */
209 if (HAS_PCH_SPLIT(dev)) { 206 if (HAS_PCH_SPLIT(dev)) {
210 dev_priv->regfile.savePP_CONTROL = I915_READ(PCH_PP_CONTROL); 207 dev_priv->regfile.savePP_CONTROL = I915_READ(PCH_PP_CONTROL);
211 dev_priv->regfile.saveBLC_PWM_CTL = I915_READ(BLC_PWM_PCH_CTL1);
212 dev_priv->regfile.saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_PCH_CTL2);
213 dev_priv->regfile.saveBLC_CPU_PWM_CTL = I915_READ(BLC_PWM_CPU_CTL);
214 dev_priv->regfile.saveBLC_CPU_PWM_CTL2 = I915_READ(BLC_PWM_CPU_CTL2);
215 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) 208 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
216 dev_priv->regfile.saveLVDS = I915_READ(PCH_LVDS); 209 dev_priv->regfile.saveLVDS = I915_READ(PCH_LVDS);
210 } else if (IS_VALLEYVIEW(dev)) {
211 dev_priv->regfile.savePP_CONTROL = I915_READ(PP_CONTROL);
212 dev_priv->regfile.savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
213
214 dev_priv->regfile.saveBLC_HIST_CTL =
215 I915_READ(VLV_BLC_HIST_CTL(PIPE_A));
216 dev_priv->regfile.saveBLC_HIST_CTL_B =
217 I915_READ(VLV_BLC_HIST_CTL(PIPE_B));
217 } else { 218 } else {
218 dev_priv->regfile.savePP_CONTROL = I915_READ(PP_CONTROL); 219 dev_priv->regfile.savePP_CONTROL = I915_READ(PP_CONTROL);
219 dev_priv->regfile.savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS); 220 dev_priv->regfile.savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
220 dev_priv->regfile.saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
221 dev_priv->regfile.saveBLC_HIST_CTL = I915_READ(BLC_HIST_CTL); 221 dev_priv->regfile.saveBLC_HIST_CTL = I915_READ(BLC_HIST_CTL);
222 if (INTEL_INFO(dev)->gen >= 4)
223 dev_priv->regfile.saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2);
224 if (IS_MOBILE(dev) && !IS_I830(dev)) 222 if (IS_MOBILE(dev) && !IS_I830(dev))
225 dev_priv->regfile.saveLVDS = I915_READ(LVDS); 223 dev_priv->regfile.saveLVDS = I915_READ(LVDS);
226 } 224 }
227 225
228 spin_unlock_irqrestore(&dev_priv->backlight.lock, flags);
229
230 if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev)) 226 if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev))
231 dev_priv->regfile.savePFIT_CONTROL = I915_READ(PFIT_CONTROL); 227 dev_priv->regfile.savePFIT_CONTROL = I915_READ(PFIT_CONTROL);
232 228
@@ -262,7 +258,6 @@ static void i915_restore_display(struct drm_device *dev)
262{ 258{
263 struct drm_i915_private *dev_priv = dev->dev_private; 259 struct drm_i915_private *dev_priv = dev->dev_private;
264 u32 mask = 0xffffffff; 260 u32 mask = 0xffffffff;
265 unsigned long flags;
266 261
267 /* Display arbitration */ 262 /* Display arbitration */
268 if (INTEL_INFO(dev)->gen <= 4) 263 if (INTEL_INFO(dev)->gen <= 4)
@@ -271,12 +266,6 @@ static void i915_restore_display(struct drm_device *dev)
271 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 266 if (!drm_core_check_feature(dev, DRIVER_MODESET))
272 i915_restore_display_reg(dev); 267 i915_restore_display_reg(dev);
273 268
274 spin_lock_irqsave(&dev_priv->backlight.lock, flags);
275
276 /* LVDS state */
277 if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
278 I915_WRITE(BLC_PWM_CTL2, dev_priv->regfile.saveBLC_PWM_CTL2);
279
280 if (drm_core_check_feature(dev, DRIVER_MODESET)) 269 if (drm_core_check_feature(dev, DRIVER_MODESET))
281 mask = ~LVDS_PORT_EN; 270 mask = ~LVDS_PORT_EN;
282 271
@@ -289,22 +278,19 @@ static void i915_restore_display(struct drm_device *dev)
289 I915_WRITE(PFIT_CONTROL, dev_priv->regfile.savePFIT_CONTROL); 278 I915_WRITE(PFIT_CONTROL, dev_priv->regfile.savePFIT_CONTROL);
290 279
291 if (HAS_PCH_SPLIT(dev)) { 280 if (HAS_PCH_SPLIT(dev)) {
292 I915_WRITE(BLC_PWM_PCH_CTL1, dev_priv->regfile.saveBLC_PWM_CTL);
293 I915_WRITE(BLC_PWM_PCH_CTL2, dev_priv->regfile.saveBLC_PWM_CTL2);
294 /* NOTE: BLC_PWM_CPU_CTL must be written after BLC_PWM_CPU_CTL2;
295 * otherwise we get blank eDP screen after S3 on some machines
296 */
297 I915_WRITE(BLC_PWM_CPU_CTL2, dev_priv->regfile.saveBLC_CPU_PWM_CTL2);
298 I915_WRITE(BLC_PWM_CPU_CTL, dev_priv->regfile.saveBLC_CPU_PWM_CTL);
299 I915_WRITE(PCH_PP_ON_DELAYS, dev_priv->regfile.savePP_ON_DELAYS); 281 I915_WRITE(PCH_PP_ON_DELAYS, dev_priv->regfile.savePP_ON_DELAYS);
300 I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS); 282 I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS);
301 I915_WRITE(PCH_PP_DIVISOR, dev_priv->regfile.savePP_DIVISOR); 283 I915_WRITE(PCH_PP_DIVISOR, dev_priv->regfile.savePP_DIVISOR);
302 I915_WRITE(PCH_PP_CONTROL, dev_priv->regfile.savePP_CONTROL); 284 I915_WRITE(PCH_PP_CONTROL, dev_priv->regfile.savePP_CONTROL);
303 I915_WRITE(RSTDBYCTL, 285 I915_WRITE(RSTDBYCTL,
304 dev_priv->regfile.saveMCHBAR_RENDER_STANDBY); 286 dev_priv->regfile.saveMCHBAR_RENDER_STANDBY);
287 } else if (IS_VALLEYVIEW(dev)) {
288 I915_WRITE(VLV_BLC_HIST_CTL(PIPE_A),
289 dev_priv->regfile.saveBLC_HIST_CTL);
290 I915_WRITE(VLV_BLC_HIST_CTL(PIPE_B),
291 dev_priv->regfile.saveBLC_HIST_CTL);
305 } else { 292 } else {
306 I915_WRITE(PFIT_PGM_RATIOS, dev_priv->regfile.savePFIT_PGM_RATIOS); 293 I915_WRITE(PFIT_PGM_RATIOS, dev_priv->regfile.savePFIT_PGM_RATIOS);
307 I915_WRITE(BLC_PWM_CTL, dev_priv->regfile.saveBLC_PWM_CTL);
308 I915_WRITE(BLC_HIST_CTL, dev_priv->regfile.saveBLC_HIST_CTL); 294 I915_WRITE(BLC_HIST_CTL, dev_priv->regfile.saveBLC_HIST_CTL);
309 I915_WRITE(PP_ON_DELAYS, dev_priv->regfile.savePP_ON_DELAYS); 295 I915_WRITE(PP_ON_DELAYS, dev_priv->regfile.savePP_ON_DELAYS);
310 I915_WRITE(PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS); 296 I915_WRITE(PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS);
@@ -312,8 +298,6 @@ static void i915_restore_display(struct drm_device *dev)
312 I915_WRITE(PP_CONTROL, dev_priv->regfile.savePP_CONTROL); 298 I915_WRITE(PP_CONTROL, dev_priv->regfile.savePP_CONTROL);
313 } 299 }
314 300
315 spin_unlock_irqrestore(&dev_priv->backlight.lock, flags);
316
317 /* only restore FBC info on the platform that supports FBC*/ 301 /* only restore FBC info on the platform that supports FBC*/
318 intel_disable_fbc(dev); 302 intel_disable_fbc(dev);
319 if (I915_HAS_FBC(dev)) { 303 if (I915_HAS_FBC(dev)) {
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index fdce8824723c..05d8b1680c22 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -32,6 +32,8 @@
32#include "intel_drv.h" 32#include "intel_drv.h"
33#include "i915_drv.h" 33#include "i915_drv.h"
34 34
35#define dev_to_drm_minor(d) dev_get_drvdata((d))
36
35#ifdef CONFIG_PM 37#ifdef CONFIG_PM
36static u32 calc_residency(struct drm_device *dev, const u32 reg) 38static u32 calc_residency(struct drm_device *dev, const u32 reg)
37{ 39{
@@ -66,14 +68,14 @@ static u32 calc_residency(struct drm_device *dev, const u32 reg)
66static ssize_t 68static ssize_t
67show_rc6_mask(struct device *kdev, struct device_attribute *attr, char *buf) 69show_rc6_mask(struct device *kdev, struct device_attribute *attr, char *buf)
68{ 70{
69 struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev); 71 struct drm_minor *dminor = dev_to_drm_minor(kdev);
70 return snprintf(buf, PAGE_SIZE, "%x\n", intel_enable_rc6(dminor->dev)); 72 return snprintf(buf, PAGE_SIZE, "%x\n", intel_enable_rc6(dminor->dev));
71} 73}
72 74
73static ssize_t 75static ssize_t
74show_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf) 76show_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf)
75{ 77{
76 struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev); 78 struct drm_minor *dminor = dev_get_drvdata(kdev);
77 u32 rc6_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6); 79 u32 rc6_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6);
78 return snprintf(buf, PAGE_SIZE, "%u\n", rc6_residency); 80 return snprintf(buf, PAGE_SIZE, "%u\n", rc6_residency);
79} 81}
@@ -81,7 +83,7 @@ show_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf)
81static ssize_t 83static ssize_t
82show_rc6p_ms(struct device *kdev, struct device_attribute *attr, char *buf) 84show_rc6p_ms(struct device *kdev, struct device_attribute *attr, char *buf)
83{ 85{
84 struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev); 86 struct drm_minor *dminor = dev_to_drm_minor(kdev);
85 u32 rc6p_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6p); 87 u32 rc6p_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6p);
86 if (IS_VALLEYVIEW(dminor->dev)) 88 if (IS_VALLEYVIEW(dminor->dev))
87 rc6p_residency = 0; 89 rc6p_residency = 0;
@@ -91,7 +93,7 @@ show_rc6p_ms(struct device *kdev, struct device_attribute *attr, char *buf)
91static ssize_t 93static ssize_t
92show_rc6pp_ms(struct device *kdev, struct device_attribute *attr, char *buf) 94show_rc6pp_ms(struct device *kdev, struct device_attribute *attr, char *buf)
93{ 95{
94 struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev); 96 struct drm_minor *dminor = dev_to_drm_minor(kdev);
95 u32 rc6pp_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6pp); 97 u32 rc6pp_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6pp);
96 if (IS_VALLEYVIEW(dminor->dev)) 98 if (IS_VALLEYVIEW(dminor->dev))
97 rc6pp_residency = 0; 99 rc6pp_residency = 0;
@@ -137,7 +139,7 @@ i915_l3_read(struct file *filp, struct kobject *kobj,
137 loff_t offset, size_t count) 139 loff_t offset, size_t count)
138{ 140{
139 struct device *dev = container_of(kobj, struct device, kobj); 141 struct device *dev = container_of(kobj, struct device, kobj);
140 struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev); 142 struct drm_minor *dminor = dev_to_drm_minor(dev);
141 struct drm_device *drm_dev = dminor->dev; 143 struct drm_device *drm_dev = dminor->dev;
142 struct drm_i915_private *dev_priv = drm_dev->dev_private; 144 struct drm_i915_private *dev_priv = drm_dev->dev_private;
143 int slice = (int)(uintptr_t)attr->private; 145 int slice = (int)(uintptr_t)attr->private;
@@ -173,7 +175,7 @@ i915_l3_write(struct file *filp, struct kobject *kobj,
173 loff_t offset, size_t count) 175 loff_t offset, size_t count)
174{ 176{
175 struct device *dev = container_of(kobj, struct device, kobj); 177 struct device *dev = container_of(kobj, struct device, kobj);
176 struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev); 178 struct drm_minor *dminor = dev_to_drm_minor(dev);
177 struct drm_device *drm_dev = dminor->dev; 179 struct drm_device *drm_dev = dminor->dev;
178 struct drm_i915_private *dev_priv = drm_dev->dev_private; 180 struct drm_i915_private *dev_priv = drm_dev->dev_private;
179 struct i915_hw_context *ctx; 181 struct i915_hw_context *ctx;
@@ -246,7 +248,7 @@ static struct bin_attribute dpf_attrs_1 = {
246static ssize_t gt_cur_freq_mhz_show(struct device *kdev, 248static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
247 struct device_attribute *attr, char *buf) 249 struct device_attribute *attr, char *buf)
248{ 250{
249 struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev); 251 struct drm_minor *minor = dev_to_drm_minor(kdev);
250 struct drm_device *dev = minor->dev; 252 struct drm_device *dev = minor->dev;
251 struct drm_i915_private *dev_priv = dev->dev_private; 253 struct drm_i915_private *dev_priv = dev->dev_private;
252 int ret; 254 int ret;
@@ -269,7 +271,7 @@ static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
269static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev, 271static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev,
270 struct device_attribute *attr, char *buf) 272 struct device_attribute *attr, char *buf)
271{ 273{
272 struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev); 274 struct drm_minor *minor = dev_to_drm_minor(kdev);
273 struct drm_device *dev = minor->dev; 275 struct drm_device *dev = minor->dev;
274 struct drm_i915_private *dev_priv = dev->dev_private; 276 struct drm_i915_private *dev_priv = dev->dev_private;
275 277
@@ -279,7 +281,7 @@ static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev,
279 281
280static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf) 282static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
281{ 283{
282 struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev); 284 struct drm_minor *minor = dev_to_drm_minor(kdev);
283 struct drm_device *dev = minor->dev; 285 struct drm_device *dev = minor->dev;
284 struct drm_i915_private *dev_priv = dev->dev_private; 286 struct drm_i915_private *dev_priv = dev->dev_private;
285 int ret; 287 int ret;
@@ -300,7 +302,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
300 struct device_attribute *attr, 302 struct device_attribute *attr,
301 const char *buf, size_t count) 303 const char *buf, size_t count)
302{ 304{
303 struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev); 305 struct drm_minor *minor = dev_to_drm_minor(kdev);
304 struct drm_device *dev = minor->dev; 306 struct drm_device *dev = minor->dev;
305 struct drm_i915_private *dev_priv = dev->dev_private; 307 struct drm_i915_private *dev_priv = dev->dev_private;
306 u32 val, rp_state_cap, hw_max, hw_min, non_oc_max; 308 u32 val, rp_state_cap, hw_max, hw_min, non_oc_max;
@@ -355,7 +357,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
355 357
356static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf) 358static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
357{ 359{
358 struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev); 360 struct drm_minor *minor = dev_to_drm_minor(kdev);
359 struct drm_device *dev = minor->dev; 361 struct drm_device *dev = minor->dev;
360 struct drm_i915_private *dev_priv = dev->dev_private; 362 struct drm_i915_private *dev_priv = dev->dev_private;
361 int ret; 363 int ret;
@@ -376,7 +378,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
376 struct device_attribute *attr, 378 struct device_attribute *attr,
377 const char *buf, size_t count) 379 const char *buf, size_t count)
378{ 380{
379 struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev); 381 struct drm_minor *minor = dev_to_drm_minor(kdev);
380 struct drm_device *dev = minor->dev; 382 struct drm_device *dev = minor->dev;
381 struct drm_i915_private *dev_priv = dev->dev_private; 383 struct drm_i915_private *dev_priv = dev->dev_private;
382 u32 val, rp_state_cap, hw_max, hw_min; 384 u32 val, rp_state_cap, hw_max, hw_min;
@@ -437,7 +439,7 @@ static DEVICE_ATTR(gt_RPn_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
437/* For now we have a static number of RP states */ 439/* For now we have a static number of RP states */
438static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf) 440static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
439{ 441{
440 struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev); 442 struct drm_minor *minor = dev_to_drm_minor(kdev);
441 struct drm_device *dev = minor->dev; 443 struct drm_device *dev = minor->dev;
442 struct drm_i915_private *dev_priv = dev->dev_private; 444 struct drm_i915_private *dev_priv = dev->dev_private;
443 u32 val, rp_state_cap; 445 u32 val, rp_state_cap;
@@ -485,7 +487,7 @@ static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
485{ 487{
486 488
487 struct device *kdev = container_of(kobj, struct device, kobj); 489 struct device *kdev = container_of(kobj, struct device, kobj);
488 struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev); 490 struct drm_minor *minor = dev_to_drm_minor(kdev);
489 struct drm_device *dev = minor->dev; 491 struct drm_device *dev = minor->dev;
490 struct i915_error_state_file_priv error_priv; 492 struct i915_error_state_file_priv error_priv;
491 struct drm_i915_error_state_buf error_str; 493 struct drm_i915_error_state_buf error_str;
@@ -520,7 +522,7 @@ static ssize_t error_state_write(struct file *file, struct kobject *kobj,
520 loff_t off, size_t count) 522 loff_t off, size_t count)
521{ 523{
522 struct device *kdev = container_of(kobj, struct device, kobj); 524 struct device *kdev = container_of(kobj, struct device, kobj);
523 struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev); 525 struct drm_minor *minor = dev_to_drm_minor(kdev);
524 struct drm_device *dev = minor->dev; 526 struct drm_device *dev = minor->dev;
525 int ret; 527 int ret;
526 528
@@ -550,19 +552,19 @@ void i915_setup_sysfs(struct drm_device *dev)
550 552
551#ifdef CONFIG_PM 553#ifdef CONFIG_PM
552 if (INTEL_INFO(dev)->gen >= 6) { 554 if (INTEL_INFO(dev)->gen >= 6) {
553 ret = sysfs_merge_group(&dev->primary->kdev.kobj, 555 ret = sysfs_merge_group(&dev->primary->kdev->kobj,
554 &rc6_attr_group); 556 &rc6_attr_group);
555 if (ret) 557 if (ret)
556 DRM_ERROR("RC6 residency sysfs setup failed\n"); 558 DRM_ERROR("RC6 residency sysfs setup failed\n");
557 } 559 }
558#endif 560#endif
559 if (HAS_L3_DPF(dev)) { 561 if (HAS_L3_DPF(dev)) {
560 ret = device_create_bin_file(&dev->primary->kdev, &dpf_attrs); 562 ret = device_create_bin_file(dev->primary->kdev, &dpf_attrs);
561 if (ret) 563 if (ret)
562 DRM_ERROR("l3 parity sysfs setup failed\n"); 564 DRM_ERROR("l3 parity sysfs setup failed\n");
563 565
564 if (NUM_L3_SLICES(dev) > 1) { 566 if (NUM_L3_SLICES(dev) > 1) {
565 ret = device_create_bin_file(&dev->primary->kdev, 567 ret = device_create_bin_file(dev->primary->kdev,
566 &dpf_attrs_1); 568 &dpf_attrs_1);
567 if (ret) 569 if (ret)
568 DRM_ERROR("l3 parity slice 1 setup failed\n"); 570 DRM_ERROR("l3 parity slice 1 setup failed\n");
@@ -571,13 +573,13 @@ void i915_setup_sysfs(struct drm_device *dev)
571 573
572 ret = 0; 574 ret = 0;
573 if (IS_VALLEYVIEW(dev)) 575 if (IS_VALLEYVIEW(dev))
574 ret = sysfs_create_files(&dev->primary->kdev.kobj, vlv_attrs); 576 ret = sysfs_create_files(&dev->primary->kdev->kobj, vlv_attrs);
575 else if (INTEL_INFO(dev)->gen >= 6) 577 else if (INTEL_INFO(dev)->gen >= 6)
576 ret = sysfs_create_files(&dev->primary->kdev.kobj, gen6_attrs); 578 ret = sysfs_create_files(&dev->primary->kdev->kobj, gen6_attrs);
577 if (ret) 579 if (ret)
578 DRM_ERROR("RPS sysfs setup failed\n"); 580 DRM_ERROR("RPS sysfs setup failed\n");
579 581
580 ret = sysfs_create_bin_file(&dev->primary->kdev.kobj, 582 ret = sysfs_create_bin_file(&dev->primary->kdev->kobj,
581 &error_state_attr); 583 &error_state_attr);
582 if (ret) 584 if (ret)
583 DRM_ERROR("error_state sysfs setup failed\n"); 585 DRM_ERROR("error_state sysfs setup failed\n");
@@ -585,14 +587,14 @@ void i915_setup_sysfs(struct drm_device *dev)
585 587
586void i915_teardown_sysfs(struct drm_device *dev) 588void i915_teardown_sysfs(struct drm_device *dev)
587{ 589{
588 sysfs_remove_bin_file(&dev->primary->kdev.kobj, &error_state_attr); 590 sysfs_remove_bin_file(&dev->primary->kdev->kobj, &error_state_attr);
589 if (IS_VALLEYVIEW(dev)) 591 if (IS_VALLEYVIEW(dev))
590 sysfs_remove_files(&dev->primary->kdev.kobj, vlv_attrs); 592 sysfs_remove_files(&dev->primary->kdev->kobj, vlv_attrs);
591 else 593 else
592 sysfs_remove_files(&dev->primary->kdev.kobj, gen6_attrs); 594 sysfs_remove_files(&dev->primary->kdev->kobj, gen6_attrs);
593 device_remove_bin_file(&dev->primary->kdev, &dpf_attrs_1); 595 device_remove_bin_file(dev->primary->kdev, &dpf_attrs_1);
594 device_remove_bin_file(&dev->primary->kdev, &dpf_attrs); 596 device_remove_bin_file(dev->primary->kdev, &dpf_attrs);
595#ifdef CONFIG_PM 597#ifdef CONFIG_PM
596 sysfs_unmerge_group(&dev->primary->kdev.kobj, &rc6_attr_group); 598 sysfs_unmerge_group(&dev->primary->kdev->kobj, &rc6_attr_group);
597#endif 599#endif
598} 600}
diff --git a/drivers/gpu/drm/i915/i915_ums.c b/drivers/gpu/drm/i915/i915_ums.c
index 967da4772c44..caa18e855815 100644
--- a/drivers/gpu/drm/i915/i915_ums.c
+++ b/drivers/gpu/drm/i915/i915_ums.c
@@ -270,6 +270,18 @@ void i915_save_display_reg(struct drm_device *dev)
270 } 270 }
271 /* FIXME: regfile.save TV & SDVO state */ 271 /* FIXME: regfile.save TV & SDVO state */
272 272
273 /* Backlight */
274 if (HAS_PCH_SPLIT(dev)) {
275 dev_priv->regfile.saveBLC_PWM_CTL = I915_READ(BLC_PWM_PCH_CTL1);
276 dev_priv->regfile.saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_PCH_CTL2);
277 dev_priv->regfile.saveBLC_CPU_PWM_CTL = I915_READ(BLC_PWM_CPU_CTL);
278 dev_priv->regfile.saveBLC_CPU_PWM_CTL2 = I915_READ(BLC_PWM_CPU_CTL2);
279 } else {
280 dev_priv->regfile.saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
281 if (INTEL_INFO(dev)->gen >= 4)
282 dev_priv->regfile.saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2);
283 }
284
273 return; 285 return;
274} 286}
275 287
@@ -280,6 +292,21 @@ void i915_restore_display_reg(struct drm_device *dev)
280 int dpll_b_reg, fpb0_reg, fpb1_reg; 292 int dpll_b_reg, fpb0_reg, fpb1_reg;
281 int i; 293 int i;
282 294
295 /* Backlight */
296 if (HAS_PCH_SPLIT(dev)) {
297 I915_WRITE(BLC_PWM_PCH_CTL1, dev_priv->regfile.saveBLC_PWM_CTL);
298 I915_WRITE(BLC_PWM_PCH_CTL2, dev_priv->regfile.saveBLC_PWM_CTL2);
299 /* NOTE: BLC_PWM_CPU_CTL must be written after BLC_PWM_CPU_CTL2;
300 * otherwise we get blank eDP screen after S3 on some machines
301 */
302 I915_WRITE(BLC_PWM_CPU_CTL2, dev_priv->regfile.saveBLC_CPU_PWM_CTL2);
303 I915_WRITE(BLC_PWM_CPU_CTL, dev_priv->regfile.saveBLC_CPU_PWM_CTL);
304 } else {
305 if (INTEL_INFO(dev)->gen >= 4)
306 I915_WRITE(BLC_PWM_CTL2, dev_priv->regfile.saveBLC_PWM_CTL2);
307 I915_WRITE(BLC_PWM_CTL, dev_priv->regfile.saveBLC_PWM_CTL);
308 }
309
283 /* Display port ratios (must be done before clock is set) */ 310 /* Display port ratios (must be done before clock is set) */
284 if (SUPPORTS_INTEGRATED_DP(dev)) { 311 if (SUPPORTS_INTEGRATED_DP(dev)) {
285 I915_WRITE(_PIPEA_DATA_M_G4X, dev_priv->regfile.savePIPEA_GMCH_DATA_M); 312 I915_WRITE(_PIPEA_DATA_M_G4X, dev_priv->regfile.savePIPEA_GMCH_DATA_M);
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index e29bcae1ef81..6dd622d733b9 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -624,11 +624,11 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
624 624
625 aux_channel = child->raw[25]; 625 aux_channel = child->raw[25];
626 626
627 is_dvi = child->common.device_type & (1 << 4); 627 is_dvi = child->common.device_type & DEVICE_TYPE_TMDS_DVI_SIGNALING;
628 is_dp = child->common.device_type & (1 << 2); 628 is_dp = child->common.device_type & DEVICE_TYPE_DISPLAYPORT_OUTPUT;
629 is_crt = child->common.device_type & (1 << 0); 629 is_crt = child->common.device_type & DEVICE_TYPE_ANALOG_OUTPUT;
630 is_hdmi = is_dvi && (child->common.device_type & (1 << 11)) == 0; 630 is_hdmi = is_dvi && (child->common.device_type & DEVICE_TYPE_NOT_HDMI_OUTPUT) == 0;
631 is_edp = is_dp && (child->common.device_type & (1 << 12)); 631 is_edp = is_dp && (child->common.device_type & DEVICE_TYPE_INTERNAL_CONNECTOR);
632 632
633 info->supports_dvi = is_dvi; 633 info->supports_dvi = is_dvi;
634 info->supports_hdmi = is_hdmi; 634 info->supports_hdmi = is_hdmi;
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
index 287cc5a21c2e..f580a2b0ddd3 100644
--- a/drivers/gpu/drm/i915/intel_bios.h
+++ b/drivers/gpu/drm/i915/intel_bios.h
@@ -638,6 +638,40 @@ int intel_parse_bios(struct drm_device *dev);
638#define DEVICE_TYPE_DP 0x68C6 638#define DEVICE_TYPE_DP 0x68C6
639#define DEVICE_TYPE_eDP 0x78C6 639#define DEVICE_TYPE_eDP 0x78C6
640 640
641#define DEVICE_TYPE_CLASS_EXTENSION (1 << 15)
642#define DEVICE_TYPE_POWER_MANAGEMENT (1 << 14)
643#define DEVICE_TYPE_HOTPLUG_SIGNALING (1 << 13)
644#define DEVICE_TYPE_INTERNAL_CONNECTOR (1 << 12)
645#define DEVICE_TYPE_NOT_HDMI_OUTPUT (1 << 11)
646#define DEVICE_TYPE_MIPI_OUTPUT (1 << 10)
647#define DEVICE_TYPE_COMPOSITE_OUTPUT (1 << 9)
648#define DEVICE_TYPE_DUAL_CHANNEL (1 << 8)
649#define DEVICE_TYPE_HIGH_SPEED_LINK (1 << 6)
650#define DEVICE_TYPE_LVDS_SINGALING (1 << 5)
651#define DEVICE_TYPE_TMDS_DVI_SIGNALING (1 << 4)
652#define DEVICE_TYPE_VIDEO_SIGNALING (1 << 3)
653#define DEVICE_TYPE_DISPLAYPORT_OUTPUT (1 << 2)
654#define DEVICE_TYPE_DIGITAL_OUTPUT (1 << 1)
655#define DEVICE_TYPE_ANALOG_OUTPUT (1 << 0)
656
657/*
658 * Bits we care about when checking for DEVICE_TYPE_eDP
659 * Depending on the system, the other bits may or may not
660 * be set for eDP outputs.
661 */
662#define DEVICE_TYPE_eDP_BITS \
663 (DEVICE_TYPE_INTERNAL_CONNECTOR | \
664 DEVICE_TYPE_NOT_HDMI_OUTPUT | \
665 DEVICE_TYPE_MIPI_OUTPUT | \
666 DEVICE_TYPE_COMPOSITE_OUTPUT | \
667 DEVICE_TYPE_DUAL_CHANNEL | \
668 DEVICE_TYPE_LVDS_SINGALING | \
669 DEVICE_TYPE_TMDS_DVI_SIGNALING | \
670 DEVICE_TYPE_VIDEO_SIGNALING | \
671 DEVICE_TYPE_DISPLAYPORT_OUTPUT | \
672 DEVICE_TYPE_DIGITAL_OUTPUT | \
673 DEVICE_TYPE_ANALOG_OUTPUT)
674
641/* define the DVO port for HDMI output type */ 675/* define the DVO port for HDMI output type */
642#define DVO_B 1 676#define DVO_B 1
643#define DVO_C 2 677#define DVO_C 2
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 2e01bd3a5d8c..b5b1b9b23adf 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -822,16 +822,15 @@ void intel_crt_init(struct drm_device *dev)
822 crt->base.mode_set = intel_crt_mode_set; 822 crt->base.mode_set = intel_crt_mode_set;
823 crt->base.disable = intel_disable_crt; 823 crt->base.disable = intel_disable_crt;
824 crt->base.enable = intel_enable_crt; 824 crt->base.enable = intel_enable_crt;
825 if (IS_HASWELL(dev))
826 crt->base.get_config = hsw_crt_get_config;
827 else
828 crt->base.get_config = intel_crt_get_config;
829 if (I915_HAS_HOTPLUG(dev)) 825 if (I915_HAS_HOTPLUG(dev))
830 crt->base.hpd_pin = HPD_CRT; 826 crt->base.hpd_pin = HPD_CRT;
831 if (HAS_DDI(dev)) 827 if (HAS_DDI(dev)) {
828 crt->base.get_config = hsw_crt_get_config;
832 crt->base.get_hw_state = intel_ddi_get_hw_state; 829 crt->base.get_hw_state = intel_ddi_get_hw_state;
833 else 830 } else {
831 crt->base.get_config = intel_crt_get_config;
834 crt->base.get_hw_state = intel_crt_get_hw_state; 832 crt->base.get_hw_state = intel_crt_get_hw_state;
833 }
835 intel_connector->get_hw_state = intel_connector_get_hw_state; 834 intel_connector->get_hw_state = intel_connector_get_hw_state;
836 835
837 drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs); 836 drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 31f4fe271388..1591576a6101 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -72,6 +72,45 @@ static const u32 hsw_ddi_translations_hdmi[] = {
72 0x80FFFFFF, 0x00030002, /* 11: 1000 1000 0 */ 72 0x80FFFFFF, 0x00030002, /* 11: 1000 1000 0 */
73}; 73};
74 74
75static const u32 bdw_ddi_translations_edp[] = {
76 0x00FFFFFF, 0x00000012, /* DP parameters */
77 0x00EBAFFF, 0x00020011,
78 0x00C71FFF, 0x0006000F,
79 0x00FFFFFF, 0x00020011,
80 0x00DB6FFF, 0x0005000F,
81 0x00BEEFFF, 0x000A000C,
82 0x00FFFFFF, 0x0005000F,
83 0x00DB6FFF, 0x000A000C,
84 0x00FFFFFF, 0x000A000C,
85 0x00FFFFFF, 0x00140006 /* HDMI parameters 800mV 0dB*/
86};
87
88static const u32 bdw_ddi_translations_dp[] = {
89 0x00FFFFFF, 0x0007000E, /* DP parameters */
90 0x00D75FFF, 0x000E000A,
91 0x00BEFFFF, 0x00140006,
92 0x00FFFFFF, 0x000E000A,
93 0x00D75FFF, 0x00180004,
94 0x80CB2FFF, 0x001B0002,
95 0x00F7DFFF, 0x00180004,
96 0x80D75FFF, 0x001B0002,
97 0x80FFFFFF, 0x001B0002,
98 0x00FFFFFF, 0x00140006 /* HDMI parameters 800mV 0dB*/
99};
100
101static const u32 bdw_ddi_translations_fdi[] = {
102 0x00FFFFFF, 0x0001000E, /* FDI parameters */
103 0x00D75FFF, 0x0004000A,
104 0x00C30FFF, 0x00070006,
105 0x00AAAFFF, 0x000C0000,
106 0x00FFFFFF, 0x0004000A,
107 0x00D75FFF, 0x00090004,
108 0x00C30FFF, 0x000C0000,
109 0x00FFFFFF, 0x00070006,
110 0x00D75FFF, 0x000C0000,
111 0x00FFFFFF, 0x00140006 /* HDMI parameters 800mV 0dB*/
112};
113
75enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder) 114enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder)
76{ 115{
77 struct drm_encoder *encoder = &intel_encoder->base; 116 struct drm_encoder *encoder = &intel_encoder->base;
@@ -92,8 +131,9 @@ enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder)
92 } 131 }
93} 132}
94 133
95/* On Haswell, DDI port buffers must be programmed with correct values 134/*
96 * in advance. The buffer values are different for FDI and DP modes, 135 * Starting with Haswell, DDI port buffers must be programmed with correct
136 * values in advance. The buffer values are different for FDI and DP modes,
97 * but the HDMI/DVI fields are shared among those. So we program the DDI 137 * but the HDMI/DVI fields are shared among those. So we program the DDI
98 * in either FDI or DP modes only, as HDMI connections will work with both 138 * in either FDI or DP modes only, as HDMI connections will work with both
99 * of those 139 * of those
@@ -103,10 +143,47 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port)
103 struct drm_i915_private *dev_priv = dev->dev_private; 143 struct drm_i915_private *dev_priv = dev->dev_private;
104 u32 reg; 144 u32 reg;
105 int i; 145 int i;
106 const u32 *ddi_translations = (port == PORT_E) ?
107 hsw_ddi_translations_fdi :
108 hsw_ddi_translations_dp;
109 int hdmi_level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift; 146 int hdmi_level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift;
147 const u32 *ddi_translations_fdi;
148 const u32 *ddi_translations_dp;
149 const u32 *ddi_translations_edp;
150 const u32 *ddi_translations;
151
152 if (IS_BROADWELL(dev)) {
153 ddi_translations_fdi = bdw_ddi_translations_fdi;
154 ddi_translations_dp = bdw_ddi_translations_dp;
155 ddi_translations_edp = bdw_ddi_translations_edp;
156 } else if (IS_HASWELL(dev)) {
157 ddi_translations_fdi = hsw_ddi_translations_fdi;
158 ddi_translations_dp = hsw_ddi_translations_dp;
159 ddi_translations_edp = hsw_ddi_translations_dp;
160 } else {
161 WARN(1, "ddi translation table missing\n");
162 ddi_translations_edp = bdw_ddi_translations_dp;
163 ddi_translations_fdi = bdw_ddi_translations_fdi;
164 ddi_translations_dp = bdw_ddi_translations_dp;
165 }
166
167 switch (port) {
168 case PORT_A:
169 ddi_translations = ddi_translations_edp;
170 break;
171 case PORT_B:
172 case PORT_C:
173 ddi_translations = ddi_translations_dp;
174 break;
175 case PORT_D:
176 if (intel_dpd_is_edp(dev))
177 ddi_translations = ddi_translations_edp;
178 else
179 ddi_translations = ddi_translations_dp;
180 break;
181 case PORT_E:
182 ddi_translations = ddi_translations_fdi;
183 break;
184 default:
185 BUG();
186 }
110 187
111 for (i = 0, reg = DDI_BUF_TRANS(port); 188 for (i = 0, reg = DDI_BUF_TRANS(port);
112 i < ARRAY_SIZE(hsw_ddi_translations_fdi); i++) { 189 i < ARRAY_SIZE(hsw_ddi_translations_fdi); i++) {
@@ -756,7 +833,8 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
756 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 833 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
757 struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc); 834 struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
758 struct drm_encoder *encoder = &intel_encoder->base; 835 struct drm_encoder *encoder = &intel_encoder->base;
759 struct drm_i915_private *dev_priv = crtc->dev->dev_private; 836 struct drm_device *dev = crtc->dev;
837 struct drm_i915_private *dev_priv = dev->dev_private;
760 enum pipe pipe = intel_crtc->pipe; 838 enum pipe pipe = intel_crtc->pipe;
761 enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder; 839 enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
762 enum port port = intel_ddi_get_encoder_port(intel_encoder); 840 enum port port = intel_ddi_get_encoder_port(intel_encoder);
@@ -792,10 +870,11 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
792 if (cpu_transcoder == TRANSCODER_EDP) { 870 if (cpu_transcoder == TRANSCODER_EDP) {
793 switch (pipe) { 871 switch (pipe) {
794 case PIPE_A: 872 case PIPE_A:
795 /* Can only use the always-on power well for eDP when 873 /* On Haswell, can only use the always-on power well for
796 * not using the panel fitter, and when not using motion 874 * eDP when not using the panel fitter, and when not
797 * blur mitigation (which we don't support). */ 875 * using motion blur mitigation (which we don't
798 if (intel_crtc->config.pch_pfit.enabled) 876 * support). */
877 if (IS_HASWELL(dev) && intel_crtc->config.pch_pfit.enabled)
799 temp |= TRANS_DDI_EDP_INPUT_A_ONOFF; 878 temp |= TRANS_DDI_EDP_INPUT_A_ONOFF;
800 else 879 else
801 temp |= TRANS_DDI_EDP_INPUT_A_ON; 880 temp |= TRANS_DDI_EDP_INPUT_A_ON;
@@ -1156,18 +1235,29 @@ static void intel_disable_ddi(struct intel_encoder *intel_encoder)
1156 1235
1157int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv) 1236int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv)
1158{ 1237{
1238 struct drm_device *dev = dev_priv->dev;
1159 uint32_t lcpll = I915_READ(LCPLL_CTL); 1239 uint32_t lcpll = I915_READ(LCPLL_CTL);
1240 uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK;
1160 1241
1161 if (lcpll & LCPLL_CD_SOURCE_FCLK) 1242 if (lcpll & LCPLL_CD_SOURCE_FCLK) {
1162 return 800000; 1243 return 800000;
1163 else if (I915_READ(HSW_FUSE_STRAP) & HSW_CDCLK_LIMIT) 1244 } else if (I915_READ(HSW_FUSE_STRAP) & HSW_CDCLK_LIMIT) {
1164 return 450000; 1245 return 450000;
1165 else if ((lcpll & LCPLL_CLK_FREQ_MASK) == LCPLL_CLK_FREQ_450) 1246 } else if (freq == LCPLL_CLK_FREQ_450) {
1166 return 450000; 1247 return 450000;
1167 else if (IS_ULT(dev_priv->dev)) 1248 } else if (IS_HASWELL(dev)) {
1168 return 337500; 1249 if (IS_ULT(dev))
1169 else 1250 return 337500;
1170 return 540000; 1251 else
1252 return 540000;
1253 } else {
1254 if (freq == LCPLL_CLK_FREQ_54O_BDW)
1255 return 540000;
1256 else if (freq == LCPLL_CLK_FREQ_337_5_BDW)
1257 return 337500;
1258 else
1259 return 675000;
1260 }
1171} 1261}
1172 1262
1173void intel_ddi_pll_init(struct drm_device *dev) 1263void intel_ddi_pll_init(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 752d83019f36..3b7f1c4eb48e 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -2164,7 +2164,7 @@ static int ironlake_update_plane(struct drm_crtc *crtc,
2164 else 2164 else
2165 dspcntr &= ~DISPPLANE_TILED; 2165 dspcntr &= ~DISPPLANE_TILED;
2166 2166
2167 if (IS_HASWELL(dev)) 2167 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2168 dspcntr &= ~DISPPLANE_TRICKLE_FEED_DISABLE; 2168 dspcntr &= ~DISPPLANE_TRICKLE_FEED_DISABLE;
2169 else 2169 else
2170 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; 2170 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
@@ -2184,7 +2184,7 @@ static int ironlake_update_plane(struct drm_crtc *crtc,
2184 I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]); 2184 I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
2185 I915_MODIFY_DISPBASE(DSPSURF(plane), 2185 I915_MODIFY_DISPBASE(DSPSURF(plane),
2186 i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset); 2186 i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
2187 if (IS_HASWELL(dev)) { 2187 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2188 I915_WRITE(DSPOFFSET(plane), (y << 16) | x); 2188 I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
2189 } else { 2189 } else {
2190 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x); 2190 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
@@ -3401,15 +3401,26 @@ void hsw_enable_ips(struct intel_crtc *crtc)
3401 * only after intel_enable_plane. And intel_enable_plane already waits 3401 * only after intel_enable_plane. And intel_enable_plane already waits
3402 * for a vblank, so all we need to do here is to enable the IPS bit. */ 3402 * for a vblank, so all we need to do here is to enable the IPS bit. */
3403 assert_plane_enabled(dev_priv, crtc->plane); 3403 assert_plane_enabled(dev_priv, crtc->plane);
3404 I915_WRITE(IPS_CTL, IPS_ENABLE); 3404 if (IS_BROADWELL(crtc->base.dev)) {
3405 3405 mutex_lock(&dev_priv->rps.hw_lock);
3406 /* The bit only becomes 1 in the next vblank, so this wait here is 3406 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0xc0000000));
3407 * essentially intel_wait_for_vblank. If we don't have this and don't 3407 mutex_unlock(&dev_priv->rps.hw_lock);
3408 * wait for vblanks until the end of crtc_enable, then the HW state 3408 /* Quoting Art Runyan: "its not safe to expect any particular
3409 * readout code will complain that the expected IPS_CTL value is not the 3409 * value in IPS_CTL bit 31 after enabling IPS through the
3410 * one we read. */ 3410 * mailbox." Therefore we need to defer waiting on the state
3411 if (wait_for(I915_READ_NOTRACE(IPS_CTL) & IPS_ENABLE, 50)) 3411 * change.
3412 DRM_ERROR("Timed out waiting for IPS enable\n"); 3412 * TODO: need to fix this for state checker
3413 */
3414 } else {
3415 I915_WRITE(IPS_CTL, IPS_ENABLE);
3416 /* The bit only becomes 1 in the next vblank, so this wait here
3417 * is essentially intel_wait_for_vblank. If we don't have this
3418 * and don't wait for vblanks until the end of crtc_enable, then
3419 * the HW state readout code will complain that the expected
3420 * IPS_CTL value is not the one we read. */
3421 if (wait_for(I915_READ_NOTRACE(IPS_CTL) & IPS_ENABLE, 50))
3422 DRM_ERROR("Timed out waiting for IPS enable\n");
3423 }
3413} 3424}
3414 3425
3415void hsw_disable_ips(struct intel_crtc *crtc) 3426void hsw_disable_ips(struct intel_crtc *crtc)
@@ -3421,7 +3432,12 @@ void hsw_disable_ips(struct intel_crtc *crtc)
3421 return; 3432 return;
3422 3433
3423 assert_plane_enabled(dev_priv, crtc->plane); 3434 assert_plane_enabled(dev_priv, crtc->plane);
3424 I915_WRITE(IPS_CTL, 0); 3435 if (IS_BROADWELL(crtc->base.dev)) {
3436 mutex_lock(&dev_priv->rps.hw_lock);
3437 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
3438 mutex_unlock(&dev_priv->rps.hw_lock);
3439 } else
3440 I915_WRITE(IPS_CTL, 0);
3425 POSTING_READ(IPS_CTL); 3441 POSTING_READ(IPS_CTL);
3426 3442
3427 /* We need to wait for a vblank before we can disable the plane. */ 3443 /* We need to wait for a vblank before we can disable the plane. */
@@ -4420,7 +4436,7 @@ static bool ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
4420 return false; 4436 return false;
4421 } 4437 }
4422 4438
4423 if (IS_HASWELL(dev)) { 4439 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
4424 if (pipe_config->fdi_lanes > 2) { 4440 if (pipe_config->fdi_lanes > 2) {
4425 DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n", 4441 DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n",
4426 pipe_config->fdi_lanes); 4442 pipe_config->fdi_lanes);
@@ -5994,14 +6010,16 @@ static void intel_set_pipe_csc(struct drm_crtc *crtc)
5994 6010
5995static void haswell_set_pipeconf(struct drm_crtc *crtc) 6011static void haswell_set_pipeconf(struct drm_crtc *crtc)
5996{ 6012{
5997 struct drm_i915_private *dev_priv = crtc->dev->dev_private; 6013 struct drm_device *dev = crtc->dev;
6014 struct drm_i915_private *dev_priv = dev->dev_private;
5998 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 6015 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6016 enum pipe pipe = intel_crtc->pipe;
5999 enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder; 6017 enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
6000 uint32_t val; 6018 uint32_t val;
6001 6019
6002 val = 0; 6020 val = 0;
6003 6021
6004 if (intel_crtc->config.dither) 6022 if (IS_HASWELL(dev) && intel_crtc->config.dither)
6005 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP); 6023 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
6006 6024
6007 if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) 6025 if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
@@ -6014,6 +6032,33 @@ static void haswell_set_pipeconf(struct drm_crtc *crtc)
6014 6032
6015 I915_WRITE(GAMMA_MODE(intel_crtc->pipe), GAMMA_MODE_MODE_8BIT); 6033 I915_WRITE(GAMMA_MODE(intel_crtc->pipe), GAMMA_MODE_MODE_8BIT);
6016 POSTING_READ(GAMMA_MODE(intel_crtc->pipe)); 6034 POSTING_READ(GAMMA_MODE(intel_crtc->pipe));
6035
6036 if (IS_BROADWELL(dev)) {
6037 val = 0;
6038
6039 switch (intel_crtc->config.pipe_bpp) {
6040 case 18:
6041 val |= PIPEMISC_DITHER_6_BPC;
6042 break;
6043 case 24:
6044 val |= PIPEMISC_DITHER_8_BPC;
6045 break;
6046 case 30:
6047 val |= PIPEMISC_DITHER_10_BPC;
6048 break;
6049 case 36:
6050 val |= PIPEMISC_DITHER_12_BPC;
6051 break;
6052 default:
6053 /* Case prevented by pipe_config_set_bpp. */
6054 BUG();
6055 }
6056
6057 if (intel_crtc->config.dither)
6058 val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
6059
6060 I915_WRITE(PIPEMISC(pipe), val);
6061 }
6017} 6062}
6018 6063
6019static bool ironlake_compute_clocks(struct drm_crtc *crtc, 6064static bool ironlake_compute_clocks(struct drm_crtc *crtc,
@@ -7165,6 +7210,11 @@ static void ironlake_write_eld(struct drm_connector *connector,
7165 aud_config = IBX_AUD_CFG(pipe); 7210 aud_config = IBX_AUD_CFG(pipe);
7166 aud_cntl_st = IBX_AUD_CNTL_ST(pipe); 7211 aud_cntl_st = IBX_AUD_CNTL_ST(pipe);
7167 aud_cntrl_st2 = IBX_AUD_CNTL_ST2; 7212 aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
7213 } else if (IS_VALLEYVIEW(connector->dev)) {
7214 hdmiw_hdmiedid = VLV_HDMIW_HDMIEDID(pipe);
7215 aud_config = VLV_AUD_CFG(pipe);
7216 aud_cntl_st = VLV_AUD_CNTL_ST(pipe);
7217 aud_cntrl_st2 = VLV_AUD_CNTL_ST2;
7168 } else { 7218 } else {
7169 hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID(pipe); 7219 hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID(pipe);
7170 aud_config = CPT_AUD_CFG(pipe); 7220 aud_config = CPT_AUD_CFG(pipe);
@@ -7174,8 +7224,19 @@ static void ironlake_write_eld(struct drm_connector *connector,
7174 7224
7175 DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe)); 7225 DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe));
7176 7226
7177 i = I915_READ(aud_cntl_st); 7227 if (IS_VALLEYVIEW(connector->dev)) {
7178 i = (i >> 29) & DIP_PORT_SEL_MASK; /* DIP_Port_Select, 0x1 = PortB */ 7228 struct intel_encoder *intel_encoder;
7229 struct intel_digital_port *intel_dig_port;
7230
7231 intel_encoder = intel_attached_encoder(connector);
7232 intel_dig_port = enc_to_dig_port(&intel_encoder->base);
7233 i = intel_dig_port->port;
7234 } else {
7235 i = I915_READ(aud_cntl_st);
7236 i = (i >> 29) & DIP_PORT_SEL_MASK;
7237 /* DIP_Port_Select, 0x1 = PortB */
7238 }
7239
7179 if (!i) { 7240 if (!i) {
7180 DRM_DEBUG_DRIVER("Audio directed to unknown port\n"); 7241 DRM_DEBUG_DRIVER("Audio directed to unknown port\n");
7181 /* operate blindly on all ports */ 7242 /* operate blindly on all ports */
@@ -7319,7 +7380,7 @@ static void ivb_update_cursor(struct drm_crtc *crtc, u32 base)
7319 cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE); 7380 cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
7320 cntl |= CURSOR_MODE_DISABLE; 7381 cntl |= CURSOR_MODE_DISABLE;
7321 } 7382 }
7322 if (IS_HASWELL(dev)) { 7383 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
7323 cntl |= CURSOR_PIPE_CSC_ENABLE; 7384 cntl |= CURSOR_PIPE_CSC_ENABLE;
7324 cntl &= ~CURSOR_TRICKLE_FEED_DISABLE; 7385 cntl &= ~CURSOR_TRICKLE_FEED_DISABLE;
7325 } 7386 }
@@ -7375,7 +7436,7 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
7375 if (!visible && !intel_crtc->cursor_visible) 7436 if (!visible && !intel_crtc->cursor_visible)
7376 return; 7437 return;
7377 7438
7378 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) { 7439 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev) || IS_BROADWELL(dev)) {
7379 I915_WRITE(CURPOS_IVB(pipe), pos); 7440 I915_WRITE(CURPOS_IVB(pipe), pos);
7380 ivb_update_cursor(crtc, base); 7441 ivb_update_cursor(crtc, base);
7381 } else { 7442 } else {
@@ -10049,6 +10110,18 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
10049 drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs); 10110 drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
10050} 10111}
10051 10112
10113enum pipe intel_get_pipe_from_connector(struct intel_connector *connector)
10114{
10115 struct drm_encoder *encoder = connector->base.encoder;
10116
10117 WARN_ON(!mutex_is_locked(&connector->base.dev->mode_config.mutex));
10118
10119 if (!encoder)
10120 return INVALID_PIPE;
10121
10122 return to_intel_crtc(encoder->crtc)->pipe;
10123}
10124
10052int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, 10125int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
10053 struct drm_file *file) 10126 struct drm_file *file)
10054{ 10127{
@@ -10064,7 +10137,7 @@ int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
10064 10137
10065 if (!drmmode_obj) { 10138 if (!drmmode_obj) {
10066 DRM_ERROR("no such CRTC id\n"); 10139 DRM_ERROR("no such CRTC id\n");
10067 return -EINVAL; 10140 return -ENOENT;
10068 } 10141 }
10069 10142
10070 crtc = to_intel_crtc(obj_to_crtc(drmmode_obj)); 10143 crtc = to_intel_crtc(obj_to_crtc(drmmode_obj));
@@ -10500,7 +10573,7 @@ static void intel_init_display(struct drm_device *dev)
10500 dev_priv->display.write_eld = ironlake_write_eld; 10573 dev_priv->display.write_eld = ironlake_write_eld;
10501 dev_priv->display.modeset_global_resources = 10574 dev_priv->display.modeset_global_resources =
10502 ivb_modeset_global_resources; 10575 ivb_modeset_global_resources;
10503 } else if (IS_HASWELL(dev)) { 10576 } else if (IS_HASWELL(dev) || IS_GEN8(dev)) {
10504 dev_priv->display.fdi_link_train = hsw_fdi_link_train; 10577 dev_priv->display.fdi_link_train = hsw_fdi_link_train;
10505 dev_priv->display.write_eld = haswell_write_eld; 10578 dev_priv->display.write_eld = haswell_write_eld;
10506 dev_priv->display.modeset_global_resources = 10579 dev_priv->display.modeset_global_resources =
@@ -10511,6 +10584,7 @@ static void intel_init_display(struct drm_device *dev)
10511 } else if (IS_VALLEYVIEW(dev)) { 10584 } else if (IS_VALLEYVIEW(dev)) {
10512 dev_priv->display.modeset_global_resources = 10585 dev_priv->display.modeset_global_resources =
10513 valleyview_modeset_global_resources; 10586 valleyview_modeset_global_resources;
10587 dev_priv->display.write_eld = ironlake_write_eld;
10514 } 10588 }
10515 10589
10516 /* Default just returns -ENODEV to indicate unsupported */ 10590 /* Default just returns -ENODEV to indicate unsupported */
@@ -10534,9 +10608,12 @@ static void intel_init_display(struct drm_device *dev)
10534 dev_priv->display.queue_flip = intel_gen6_queue_flip; 10608 dev_priv->display.queue_flip = intel_gen6_queue_flip;
10535 break; 10609 break;
10536 case 7: 10610 case 7:
10611 case 8: /* FIXME(BDW): Check that the gen8 RCS flip works. */
10537 dev_priv->display.queue_flip = intel_gen7_queue_flip; 10612 dev_priv->display.queue_flip = intel_gen7_queue_flip;
10538 break; 10613 break;
10539 } 10614 }
10615
10616 intel_panel_init_backlight_funcs(dev);
10540} 10617}
10541 10618
10542/* 10619/*
@@ -10573,17 +10650,6 @@ static void quirk_invert_brightness(struct drm_device *dev)
10573 DRM_INFO("applying inverted panel brightness quirk\n"); 10650 DRM_INFO("applying inverted panel brightness quirk\n");
10574} 10651}
10575 10652
10576/*
10577 * Some machines (Dell XPS13) suffer broken backlight controls if
10578 * BLM_PCH_PWM_ENABLE is set.
10579 */
10580static void quirk_no_pcm_pwm_enable(struct drm_device *dev)
10581{
10582 struct drm_i915_private *dev_priv = dev->dev_private;
10583 dev_priv->quirks |= QUIRK_NO_PCH_PWM_ENABLE;
10584 DRM_INFO("applying no-PCH_PWM_ENABLE quirk\n");
10585}
10586
10587struct intel_quirk { 10653struct intel_quirk {
10588 int device; 10654 int device;
10589 int subsystem_vendor; 10655 int subsystem_vendor;
@@ -10643,11 +10709,6 @@ static struct intel_quirk intel_quirks[] = {
10643 * seem to use inverted backlight PWM. 10709 * seem to use inverted backlight PWM.
10644 */ 10710 */
10645 { 0x2a42, 0x1025, PCI_ANY_ID, quirk_invert_brightness }, 10711 { 0x2a42, 0x1025, PCI_ANY_ID, quirk_invert_brightness },
10646
10647 /* Dell XPS13 HD Sandy Bridge */
10648 { 0x0116, 0x1028, 0x052e, quirk_no_pcm_pwm_enable },
10649 /* Dell XPS13 HD and XPS13 FHD Ivy Bridge */
10650 { 0x0166, 0x1028, 0x058b, quirk_no_pcm_pwm_enable },
10651}; 10712};
10652 10713
10653static void intel_init_quirks(struct drm_device *dev) 10714static void intel_init_quirks(struct drm_device *dev)
@@ -11189,12 +11250,11 @@ void intel_modeset_cleanup(struct drm_device *dev)
11189 /* flush any delayed tasks or pending work */ 11250 /* flush any delayed tasks or pending work */
11190 flush_scheduled_work(); 11251 flush_scheduled_work();
11191 11252
11192 /* destroy backlight, if any, before the connectors */ 11253 /* destroy the backlight and sysfs files before encoders/connectors */
11193 intel_panel_destroy_backlight(dev); 11254 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
11194 11255 intel_panel_destroy_backlight(connector);
11195 /* destroy the sysfs files before encoders/connectors */
11196 list_for_each_entry(connector, &dev->mode_config.connector_list, head)
11197 drm_sysfs_connector_remove(connector); 11256 drm_sysfs_connector_remove(connector);
11257 }
11198 11258
11199 drm_mode_config_cleanup(dev); 11259 drm_mode_config_cleanup(dev);
11200 11260
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 34d605762a60..dbe4840d6fb8 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -405,6 +405,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
405 uint32_t status; 405 uint32_t status;
406 int try, precharge, clock = 0; 406 int try, precharge, clock = 0;
407 bool has_aux_irq = INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev); 407 bool has_aux_irq = INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev);
408 uint32_t timeout;
408 409
409 /* dp aux is extremely sensitive to irq latency, hence request the 410 /* dp aux is extremely sensitive to irq latency, hence request the
410 * lowest possible wakeup latency and so prevent the cpu from going into 411 * lowest possible wakeup latency and so prevent the cpu from going into
@@ -419,6 +420,11 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
419 else 420 else
420 precharge = 5; 421 precharge = 5;
421 422
423 if (IS_BROADWELL(dev) && ch_ctl == DPA_AUX_CH_CTL)
424 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
425 else
426 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
427
422 intel_aux_display_runtime_get(dev_priv); 428 intel_aux_display_runtime_get(dev_priv);
423 429
424 /* Try to wait for any previous AUX channel activity */ 430 /* Try to wait for any previous AUX channel activity */
@@ -454,7 +460,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
454 I915_WRITE(ch_ctl, 460 I915_WRITE(ch_ctl,
455 DP_AUX_CH_CTL_SEND_BUSY | 461 DP_AUX_CH_CTL_SEND_BUSY |
456 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) | 462 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
457 DP_AUX_CH_CTL_TIME_OUT_400us | 463 timeout |
458 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | 464 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
459 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) | 465 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
460 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) | 466 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) |
@@ -747,7 +753,7 @@ intel_dp_i2c_init(struct intel_dp *intel_dp,
747 strncpy(intel_dp->adapter.name, name, sizeof(intel_dp->adapter.name) - 1); 753 strncpy(intel_dp->adapter.name, name, sizeof(intel_dp->adapter.name) - 1);
748 intel_dp->adapter.name[sizeof(intel_dp->adapter.name) - 1] = '\0'; 754 intel_dp->adapter.name[sizeof(intel_dp->adapter.name) - 1] = '\0';
749 intel_dp->adapter.algo_data = &intel_dp->algo; 755 intel_dp->adapter.algo_data = &intel_dp->algo;
750 intel_dp->adapter.dev.parent = &intel_connector->base.kdev; 756 intel_dp->adapter.dev.parent = intel_connector->base.kdev;
751 757
752 ret = i2c_dp_aux_add_bus(&intel_dp->adapter); 758 ret = i2c_dp_aux_add_bus(&intel_dp->adapter);
753 return ret; 759 return ret;
@@ -1249,7 +1255,6 @@ void ironlake_edp_backlight_on(struct intel_dp *intel_dp)
1249 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 1255 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1250 struct drm_device *dev = intel_dig_port->base.base.dev; 1256 struct drm_device *dev = intel_dig_port->base.base.dev;
1251 struct drm_i915_private *dev_priv = dev->dev_private; 1257 struct drm_i915_private *dev_priv = dev->dev_private;
1252 int pipe = to_intel_crtc(intel_dig_port->base.base.crtc)->pipe;
1253 u32 pp; 1258 u32 pp;
1254 u32 pp_ctrl_reg; 1259 u32 pp_ctrl_reg;
1255 1260
@@ -1272,7 +1277,7 @@ void ironlake_edp_backlight_on(struct intel_dp *intel_dp)
1272 I915_WRITE(pp_ctrl_reg, pp); 1277 I915_WRITE(pp_ctrl_reg, pp);
1273 POSTING_READ(pp_ctrl_reg); 1278 POSTING_READ(pp_ctrl_reg);
1274 1279
1275 intel_panel_enable_backlight(dev, pipe); 1280 intel_panel_enable_backlight(intel_dp->attached_connector);
1276} 1281}
1277 1282
1278void ironlake_edp_backlight_off(struct intel_dp *intel_dp) 1283void ironlake_edp_backlight_off(struct intel_dp *intel_dp)
@@ -1285,7 +1290,7 @@ void ironlake_edp_backlight_off(struct intel_dp *intel_dp)
1285 if (!is_edp(intel_dp)) 1290 if (!is_edp(intel_dp))
1286 return; 1291 return;
1287 1292
1288 intel_panel_disable_backlight(dev); 1293 intel_panel_disable_backlight(intel_dp->attached_connector);
1289 1294
1290 DRM_DEBUG_KMS("\n"); 1295 DRM_DEBUG_KMS("\n");
1291 pp = ironlake_get_pp_control(intel_dp); 1296 pp = ironlake_get_pp_control(intel_dp);
@@ -1611,6 +1616,7 @@ static void intel_edp_psr_enable_source(struct intel_dp *intel_dp)
1611 uint32_t max_sleep_time = 0x1f; 1616 uint32_t max_sleep_time = 0x1f;
1612 uint32_t idle_frames = 1; 1617 uint32_t idle_frames = 1;
1613 uint32_t val = 0x0; 1618 uint32_t val = 0x0;
1619 const uint32_t link_entry_time = EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
1614 1620
1615 if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT) { 1621 if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT) {
1616 val |= EDP_PSR_LINK_STANDBY; 1622 val |= EDP_PSR_LINK_STANDBY;
@@ -1621,7 +1627,7 @@ static void intel_edp_psr_enable_source(struct intel_dp *intel_dp)
1621 val |= EDP_PSR_LINK_DISABLE; 1627 val |= EDP_PSR_LINK_DISABLE;
1622 1628
1623 I915_WRITE(EDP_PSR_CTL(dev), val | 1629 I915_WRITE(EDP_PSR_CTL(dev), val |
1624 EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES | 1630 IS_BROADWELL(dev) ? 0 : link_entry_time |
1625 max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT | 1631 max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT |
1626 idle_frames << EDP_PSR_IDLE_FRAME_SHIFT | 1632 idle_frames << EDP_PSR_IDLE_FRAME_SHIFT |
1627 EDP_PSR_ENABLE); 1633 EDP_PSR_ENABLE);
@@ -1958,7 +1964,7 @@ intel_dp_voltage_max(struct intel_dp *intel_dp)
1958 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1964 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1959 enum port port = dp_to_dig_port(intel_dp)->port; 1965 enum port port = dp_to_dig_port(intel_dp)->port;
1960 1966
1961 if (IS_VALLEYVIEW(dev)) 1967 if (IS_VALLEYVIEW(dev) || IS_BROADWELL(dev))
1962 return DP_TRAIN_VOLTAGE_SWING_1200; 1968 return DP_TRAIN_VOLTAGE_SWING_1200;
1963 else if (IS_GEN7(dev) && port == PORT_A) 1969 else if (IS_GEN7(dev) && port == PORT_A)
1964 return DP_TRAIN_VOLTAGE_SWING_800; 1970 return DP_TRAIN_VOLTAGE_SWING_800;
@@ -1974,7 +1980,18 @@ intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
1974 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1980 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1975 enum port port = dp_to_dig_port(intel_dp)->port; 1981 enum port port = dp_to_dig_port(intel_dp)->port;
1976 1982
1977 if (HAS_DDI(dev)) { 1983 if (IS_BROADWELL(dev)) {
1984 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
1985 case DP_TRAIN_VOLTAGE_SWING_400:
1986 case DP_TRAIN_VOLTAGE_SWING_600:
1987 return DP_TRAIN_PRE_EMPHASIS_6;
1988 case DP_TRAIN_VOLTAGE_SWING_800:
1989 return DP_TRAIN_PRE_EMPHASIS_3_5;
1990 case DP_TRAIN_VOLTAGE_SWING_1200:
1991 default:
1992 return DP_TRAIN_PRE_EMPHASIS_0;
1993 }
1994 } else if (IS_HASWELL(dev)) {
1978 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { 1995 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
1979 case DP_TRAIN_VOLTAGE_SWING_400: 1996 case DP_TRAIN_VOLTAGE_SWING_400:
1980 return DP_TRAIN_PRE_EMPHASIS_9_5; 1997 return DP_TRAIN_PRE_EMPHASIS_9_5;
@@ -2286,6 +2303,41 @@ intel_hsw_signal_levels(uint8_t train_set)
2286 } 2303 }
2287} 2304}
2288 2305
2306static uint32_t
2307intel_bdw_signal_levels(uint8_t train_set)
2308{
2309 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
2310 DP_TRAIN_PRE_EMPHASIS_MASK);
2311 switch (signal_levels) {
2312 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
2313 return DDI_BUF_EMP_400MV_0DB_BDW; /* Sel0 */
2314 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
2315 return DDI_BUF_EMP_400MV_3_5DB_BDW; /* Sel1 */
2316 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
2317 return DDI_BUF_EMP_400MV_6DB_BDW; /* Sel2 */
2318
2319 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
2320 return DDI_BUF_EMP_600MV_0DB_BDW; /* Sel3 */
2321 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
2322 return DDI_BUF_EMP_600MV_3_5DB_BDW; /* Sel4 */
2323 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6:
2324 return DDI_BUF_EMP_600MV_6DB_BDW; /* Sel5 */
2325
2326 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
2327 return DDI_BUF_EMP_800MV_0DB_BDW; /* Sel6 */
2328 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
2329 return DDI_BUF_EMP_800MV_3_5DB_BDW; /* Sel7 */
2330
2331 case DP_TRAIN_VOLTAGE_SWING_1200 | DP_TRAIN_PRE_EMPHASIS_0:
2332 return DDI_BUF_EMP_1200MV_0DB_BDW; /* Sel8 */
2333
2334 default:
2335 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
2336 "0x%x\n", signal_levels);
2337 return DDI_BUF_EMP_400MV_0DB_BDW; /* Sel0 */
2338 }
2339}
2340
2289/* Properly updates "DP" with the correct signal levels. */ 2341/* Properly updates "DP" with the correct signal levels. */
2290static void 2342static void
2291intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP) 2343intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
@@ -2296,7 +2348,10 @@ intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
2296 uint32_t signal_levels, mask; 2348 uint32_t signal_levels, mask;
2297 uint8_t train_set = intel_dp->train_set[0]; 2349 uint8_t train_set = intel_dp->train_set[0];
2298 2350
2299 if (HAS_DDI(dev)) { 2351 if (IS_BROADWELL(dev)) {
2352 signal_levels = intel_bdw_signal_levels(train_set);
2353 mask = DDI_BUF_EMP_MASK;
2354 } else if (IS_HASWELL(dev)) {
2300 signal_levels = intel_hsw_signal_levels(train_set); 2355 signal_levels = intel_hsw_signal_levels(train_set);
2301 mask = DDI_BUF_EMP_MASK; 2356 mask = DDI_BUF_EMP_MASK;
2302 } else if (IS_VALLEYVIEW(dev)) { 2357 } else if (IS_VALLEYVIEW(dev)) {
@@ -3284,7 +3339,8 @@ bool intel_dpd_is_edp(struct drm_device *dev)
3284 p_child = dev_priv->vbt.child_dev + i; 3339 p_child = dev_priv->vbt.child_dev + i;
3285 3340
3286 if (p_child->common.dvo_port == PORT_IDPD && 3341 if (p_child->common.dvo_port == PORT_IDPD &&
3287 p_child->common.device_type == DEVICE_TYPE_eDP) 3342 (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
3343 (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
3288 return true; 3344 return true;
3289 } 3345 }
3290 return false; 3346 return false;
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 9134a5464dd5..02312810374d 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -156,6 +156,17 @@ struct intel_encoder {
156struct intel_panel { 156struct intel_panel {
157 struct drm_display_mode *fixed_mode; 157 struct drm_display_mode *fixed_mode;
158 int fitting_mode; 158 int fitting_mode;
159
160 /* backlight */
161 struct {
162 bool present;
163 u32 level;
164 u32 max;
165 bool enabled;
166 bool combination_mode; /* gen 2/4 only */
167 bool active_low_pwm;
168 struct backlight_device *device;
169 } backlight;
159}; 170};
160 171
161struct intel_connector { 172struct intel_connector {
@@ -630,6 +641,7 @@ void intel_connector_attach_encoder(struct intel_connector *connector,
630struct drm_encoder *intel_best_encoder(struct drm_connector *connector); 641struct drm_encoder *intel_best_encoder(struct drm_connector *connector);
631struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, 642struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
632 struct drm_crtc *crtc); 643 struct drm_crtc *crtc);
644enum pipe intel_get_pipe_from_connector(struct intel_connector *connector);
633int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, 645int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
634 struct drm_file *file_priv); 646 struct drm_file *file_priv);
635enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv, 647enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
@@ -803,11 +815,13 @@ void intel_pch_panel_fitting(struct intel_crtc *crtc,
803void intel_gmch_panel_fitting(struct intel_crtc *crtc, 815void intel_gmch_panel_fitting(struct intel_crtc *crtc,
804 struct intel_crtc_config *pipe_config, 816 struct intel_crtc_config *pipe_config,
805 int fitting_mode); 817 int fitting_mode);
806void intel_panel_set_backlight(struct drm_device *dev, u32 level, u32 max); 818void intel_panel_set_backlight(struct intel_connector *connector, u32 level,
819 u32 max);
807int intel_panel_setup_backlight(struct drm_connector *connector); 820int intel_panel_setup_backlight(struct drm_connector *connector);
808void intel_panel_enable_backlight(struct drm_device *dev, enum pipe pipe); 821void intel_panel_enable_backlight(struct intel_connector *connector);
809void intel_panel_disable_backlight(struct drm_device *dev); 822void intel_panel_disable_backlight(struct intel_connector *connector);
810void intel_panel_destroy_backlight(struct drm_device *dev); 823void intel_panel_destroy_backlight(struct drm_connector *connector);
824void intel_panel_init_backlight_funcs(struct drm_device *dev);
811enum drm_connector_status intel_panel_detect(struct drm_device *dev); 825enum drm_connector_status intel_panel_detect(struct drm_device *dev);
812 826
813 827
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 61cff670ff3f..d0c81b170149 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -847,7 +847,7 @@ static int hdmi_portclock_limit(struct intel_hdmi *hdmi)
847 847
848 if (IS_G4X(dev)) 848 if (IS_G4X(dev))
849 return 165000; 849 return 165000;
850 else if (IS_HASWELL(dev)) 850 else if (IS_HASWELL(dev) || INTEL_INFO(dev)->gen >= 8)
851 return 300000; 851 return 300000;
852 else 852 else
853 return 225000; 853 return 225000;
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index b0ef55833087..c3b4da7895ed 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -206,7 +206,8 @@ static void intel_enable_lvds(struct intel_encoder *encoder)
206{ 206{
207 struct drm_device *dev = encoder->base.dev; 207 struct drm_device *dev = encoder->base.dev;
208 struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base); 208 struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
209 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); 209 struct intel_connector *intel_connector =
210 &lvds_encoder->attached_connector->base;
210 struct drm_i915_private *dev_priv = dev->dev_private; 211 struct drm_i915_private *dev_priv = dev->dev_private;
211 u32 ctl_reg, stat_reg; 212 u32 ctl_reg, stat_reg;
212 213
@@ -225,13 +226,15 @@ static void intel_enable_lvds(struct intel_encoder *encoder)
225 if (wait_for((I915_READ(stat_reg) & PP_ON) != 0, 1000)) 226 if (wait_for((I915_READ(stat_reg) & PP_ON) != 0, 1000))
226 DRM_ERROR("timed out waiting for panel to power on\n"); 227 DRM_ERROR("timed out waiting for panel to power on\n");
227 228
228 intel_panel_enable_backlight(dev, intel_crtc->pipe); 229 intel_panel_enable_backlight(intel_connector);
229} 230}
230 231
231static void intel_disable_lvds(struct intel_encoder *encoder) 232static void intel_disable_lvds(struct intel_encoder *encoder)
232{ 233{
233 struct drm_device *dev = encoder->base.dev; 234 struct drm_device *dev = encoder->base.dev;
234 struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base); 235 struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
236 struct intel_connector *intel_connector =
237 &lvds_encoder->attached_connector->base;
235 struct drm_i915_private *dev_priv = dev->dev_private; 238 struct drm_i915_private *dev_priv = dev->dev_private;
236 u32 ctl_reg, stat_reg; 239 u32 ctl_reg, stat_reg;
237 240
@@ -243,7 +246,7 @@ static void intel_disable_lvds(struct intel_encoder *encoder)
243 stat_reg = PP_STATUS; 246 stat_reg = PP_STATUS;
244 } 247 }
245 248
246 intel_panel_disable_backlight(dev); 249 intel_panel_disable_backlight(intel_connector);
247 250
248 I915_WRITE(ctl_reg, I915_READ(ctl_reg) & ~POWER_TARGET_ON); 251 I915_WRITE(ctl_reg, I915_READ(ctl_reg) & ~POWER_TARGET_ON);
249 if (wait_for((I915_READ(stat_reg) & PP_ON) == 0, 1000)) 252 if (wait_for((I915_READ(stat_reg) & PP_ON) == 0, 1000))
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index b82050c96f3e..6506df26ac9e 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -396,6 +396,9 @@ int intel_opregion_notify_adapter(struct drm_device *dev, pci_power_t state)
396static u32 asle_set_backlight(struct drm_device *dev, u32 bclp) 396static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
397{ 397{
398 struct drm_i915_private *dev_priv = dev->dev_private; 398 struct drm_i915_private *dev_priv = dev->dev_private;
399 struct drm_connector *connector;
400 struct intel_connector *intel_connector;
401 struct intel_panel *panel;
399 struct opregion_asle __iomem *asle = dev_priv->opregion.asle; 402 struct opregion_asle __iomem *asle = dev_priv->opregion.asle;
400 403
401 DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp); 404 DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp);
@@ -407,10 +410,24 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
407 if (bclp > 255) 410 if (bclp > 255)
408 return ASLC_BACKLIGHT_FAILED; 411 return ASLC_BACKLIGHT_FAILED;
409 412
413 mutex_lock(&dev->mode_config.mutex);
414
415 /*
416 * Update backlight on all connectors that support backlight (usually
417 * only one).
418 */
410 DRM_DEBUG_KMS("updating opregion backlight %d/255\n", bclp); 419 DRM_DEBUG_KMS("updating opregion backlight %d/255\n", bclp);
411 intel_panel_set_backlight(dev, bclp, 255); 420 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
421 intel_connector = to_intel_connector(connector);
422 panel = &intel_connector->panel;
423 if (panel->backlight.present)
424 intel_panel_set_backlight(intel_connector, bclp, 255);
425 }
412 iowrite32(DIV_ROUND_UP(bclp * 100, 255) | ASLE_CBLV_VALID, &asle->cblv); 426 iowrite32(DIV_ROUND_UP(bclp * 100, 255) | ASLE_CBLV_VALID, &asle->cblv);
413 427
428 mutex_unlock(&dev->mode_config.mutex);
429
430
414 return 0; 431 return 0;
415} 432}
416 433
@@ -486,9 +503,13 @@ static u32 asle_isct_state(struct drm_device *dev)
486 return ASLC_ISCT_STATE_FAILED; 503 return ASLC_ISCT_STATE_FAILED;
487} 504}
488 505
489void intel_opregion_asle_intr(struct drm_device *dev) 506static void asle_work(struct work_struct *work)
490{ 507{
491 struct drm_i915_private *dev_priv = dev->dev_private; 508 struct intel_opregion *opregion =
509 container_of(work, struct intel_opregion, asle_work);
510 struct drm_i915_private *dev_priv =
511 container_of(opregion, struct drm_i915_private, opregion);
512 struct drm_device *dev = dev_priv->dev;
492 struct opregion_asle __iomem *asle = dev_priv->opregion.asle; 513 struct opregion_asle __iomem *asle = dev_priv->opregion.asle;
493 u32 aslc_stat = 0; 514 u32 aslc_stat = 0;
494 u32 aslc_req; 515 u32 aslc_req;
@@ -535,6 +556,14 @@ void intel_opregion_asle_intr(struct drm_device *dev)
535 iowrite32(aslc_stat, &asle->aslc); 556 iowrite32(aslc_stat, &asle->aslc);
536} 557}
537 558
559void intel_opregion_asle_intr(struct drm_device *dev)
560{
561 struct drm_i915_private *dev_priv = dev->dev_private;
562
563 if (dev_priv->opregion.asle)
564 schedule_work(&dev_priv->opregion.asle_work);
565}
566
538#define ACPI_EV_DISPLAY_SWITCH (1<<0) 567#define ACPI_EV_DISPLAY_SWITCH (1<<0)
539#define ACPI_EV_LID (1<<1) 568#define ACPI_EV_LID (1<<1)
540#define ACPI_EV_DOCK (1<<2) 569#define ACPI_EV_DOCK (1<<2)
@@ -735,6 +764,8 @@ void intel_opregion_fini(struct drm_device *dev)
735 if (opregion->asle) 764 if (opregion->asle)
736 iowrite32(ASLE_ARDY_NOT_READY, &opregion->asle->ardy); 765 iowrite32(ASLE_ARDY_NOT_READY, &opregion->asle->ardy);
737 766
767 cancel_work_sync(&dev_priv->opregion.asle_work);
768
738 if (opregion->acpi) { 769 if (opregion->acpi) {
739 iowrite32(0, &opregion->acpi->drdy); 770 iowrite32(0, &opregion->acpi->drdy);
740 771
@@ -828,6 +859,10 @@ int intel_opregion_setup(struct drm_device *dev)
828 return -ENOTSUPP; 859 return -ENOTSUPP;
829 } 860 }
830 861
862#ifdef CONFIG_ACPI
863 INIT_WORK(&opregion->asle_work, asle_work);
864#endif
865
831 base = acpi_os_ioremap(asls, OPREGION_SIZE); 866 base = acpi_os_ioremap(asls, OPREGION_SIZE);
832 if (!base) 867 if (!base)
833 return -ENOMEM; 868 return -ENOMEM;
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index de1518614827..e480cf41c536 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -325,214 +325,263 @@ out:
325 pipe_config->gmch_pfit.lvds_border_bits = border; 325 pipe_config->gmch_pfit.lvds_border_bits = border;
326} 326}
327 327
328static int is_backlight_combination_mode(struct drm_device *dev) 328static int i915_panel_invert_brightness;
329MODULE_PARM_DESC(invert_brightness, "Invert backlight brightness "
330 "(-1 force normal, 0 machine defaults, 1 force inversion), please "
331 "report PCI device ID, subsystem vendor and subsystem device ID "
332 "to dri-devel@lists.freedesktop.org, if your machine needs it. "
333 "It will then be included in an upcoming module version.");
334module_param_named(invert_brightness, i915_panel_invert_brightness, int, 0600);
335static u32 intel_panel_compute_brightness(struct intel_connector *connector,
336 u32 val)
329{ 337{
338 struct drm_device *dev = connector->base.dev;
330 struct drm_i915_private *dev_priv = dev->dev_private; 339 struct drm_i915_private *dev_priv = dev->dev_private;
340 struct intel_panel *panel = &connector->panel;
331 341
332 if (IS_GEN4(dev)) 342 WARN_ON(panel->backlight.max == 0);
333 return I915_READ(BLC_PWM_CTL2) & BLM_COMBINATION_MODE;
334 343
335 if (IS_GEN2(dev)) 344 if (i915_panel_invert_brightness < 0)
336 return I915_READ(BLC_PWM_CTL) & BLM_LEGACY_MODE; 345 return val;
337 346
338 return 0; 347 if (i915_panel_invert_brightness > 0 ||
348 dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS) {
349 return panel->backlight.max - val;
350 }
351
352 return val;
339} 353}
340 354
341/* XXX: query mode clock or hardware clock and program max PWM appropriately 355static u32 bdw_get_backlight(struct intel_connector *connector)
342 * when it's 0.
343 */
344static u32 i915_read_blc_pwm_ctl(struct drm_device *dev)
345{ 356{
357 struct drm_device *dev = connector->base.dev;
346 struct drm_i915_private *dev_priv = dev->dev_private; 358 struct drm_i915_private *dev_priv = dev->dev_private;
347 u32 val;
348
349 WARN_ON_SMP(!spin_is_locked(&dev_priv->backlight.lock));
350 359
351 /* Restore the CTL value if it lost, e.g. GPU reset */ 360 return I915_READ(BLC_PWM_PCH_CTL2) & BACKLIGHT_DUTY_CYCLE_MASK;
352 361}
353 if (HAS_PCH_SPLIT(dev_priv->dev)) {
354 val = I915_READ(BLC_PWM_PCH_CTL2);
355 if (dev_priv->regfile.saveBLC_PWM_CTL2 == 0) {
356 dev_priv->regfile.saveBLC_PWM_CTL2 = val;
357 } else if (val == 0) {
358 val = dev_priv->regfile.saveBLC_PWM_CTL2;
359 I915_WRITE(BLC_PWM_PCH_CTL2, val);
360 }
361 } else {
362 val = I915_READ(BLC_PWM_CTL);
363 if (dev_priv->regfile.saveBLC_PWM_CTL == 0) {
364 dev_priv->regfile.saveBLC_PWM_CTL = val;
365 if (INTEL_INFO(dev)->gen >= 4)
366 dev_priv->regfile.saveBLC_PWM_CTL2 =
367 I915_READ(BLC_PWM_CTL2);
368 } else if (val == 0) {
369 val = dev_priv->regfile.saveBLC_PWM_CTL;
370 I915_WRITE(BLC_PWM_CTL, val);
371 if (INTEL_INFO(dev)->gen >= 4)
372 I915_WRITE(BLC_PWM_CTL2,
373 dev_priv->regfile.saveBLC_PWM_CTL2);
374 }
375 362
376 if (IS_VALLEYVIEW(dev) && !val) 363static u32 pch_get_backlight(struct intel_connector *connector)
377 val = 0x0f42ffff; 364{
378 } 365 struct drm_device *dev = connector->base.dev;
366 struct drm_i915_private *dev_priv = dev->dev_private;
379 367
380 return val; 368 return I915_READ(BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
381} 369}
382 370
383static u32 intel_panel_get_max_backlight(struct drm_device *dev) 371static u32 i9xx_get_backlight(struct intel_connector *connector)
384{ 372{
385 u32 max; 373 struct drm_device *dev = connector->base.dev;
374 struct drm_i915_private *dev_priv = dev->dev_private;
375 struct intel_panel *panel = &connector->panel;
376 u32 val;
386 377
387 max = i915_read_blc_pwm_ctl(dev); 378 val = I915_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
379 if (INTEL_INFO(dev)->gen < 4)
380 val >>= 1;
388 381
389 if (HAS_PCH_SPLIT(dev)) { 382 if (panel->backlight.combination_mode) {
390 max >>= 16; 383 u8 lbpc;
391 } else {
392 if (INTEL_INFO(dev)->gen < 4)
393 max >>= 17;
394 else
395 max >>= 16;
396 384
397 if (is_backlight_combination_mode(dev)) 385 pci_read_config_byte(dev->pdev, PCI_LBPC, &lbpc);
398 max *= 0xff; 386 val *= lbpc;
399 } 387 }
400 388
401 DRM_DEBUG_DRIVER("max backlight PWM = %d\n", max); 389 return val;
402
403 return max;
404} 390}
405 391
406static int i915_panel_invert_brightness; 392static u32 _vlv_get_backlight(struct drm_device *dev, enum pipe pipe)
407MODULE_PARM_DESC(invert_brightness, "Invert backlight brightness "
408 "(-1 force normal, 0 machine defaults, 1 force inversion), please "
409 "report PCI device ID, subsystem vendor and subsystem device ID "
410 "to dri-devel@lists.freedesktop.org, if your machine needs it. "
411 "It will then be included in an upcoming module version.");
412module_param_named(invert_brightness, i915_panel_invert_brightness, int, 0600);
413static u32 intel_panel_compute_brightness(struct drm_device *dev, u32 val)
414{ 393{
415 struct drm_i915_private *dev_priv = dev->dev_private; 394 struct drm_i915_private *dev_priv = dev->dev_private;
416 395
417 if (i915_panel_invert_brightness < 0) 396 return I915_READ(VLV_BLC_PWM_CTL(pipe)) & BACKLIGHT_DUTY_CYCLE_MASK;
418 return val; 397}
419 398
420 if (i915_panel_invert_brightness > 0 || 399static u32 vlv_get_backlight(struct intel_connector *connector)
421 dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS) { 400{
422 u32 max = intel_panel_get_max_backlight(dev); 401 struct drm_device *dev = connector->base.dev;
423 if (max) 402 enum pipe pipe = intel_get_pipe_from_connector(connector);
424 return max - val;
425 }
426 403
427 return val; 404 return _vlv_get_backlight(dev, pipe);
428} 405}
429 406
430static u32 intel_panel_get_backlight(struct drm_device *dev) 407static u32 intel_panel_get_backlight(struct intel_connector *connector)
431{ 408{
409 struct drm_device *dev = connector->base.dev;
432 struct drm_i915_private *dev_priv = dev->dev_private; 410 struct drm_i915_private *dev_priv = dev->dev_private;
433 u32 val; 411 u32 val;
434 unsigned long flags; 412 unsigned long flags;
435 413
436 spin_lock_irqsave(&dev_priv->backlight.lock, flags); 414 spin_lock_irqsave(&dev_priv->backlight_lock, flags);
437
438 if (HAS_PCH_SPLIT(dev)) {
439 val = I915_READ(BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
440 } else {
441 val = I915_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
442 if (INTEL_INFO(dev)->gen < 4)
443 val >>= 1;
444
445 if (is_backlight_combination_mode(dev)) {
446 u8 lbpc;
447
448 pci_read_config_byte(dev->pdev, PCI_LBPC, &lbpc);
449 val *= lbpc;
450 }
451 }
452 415
453 val = intel_panel_compute_brightness(dev, val); 416 val = dev_priv->display.get_backlight(connector);
417 val = intel_panel_compute_brightness(connector, val);
454 418
455 spin_unlock_irqrestore(&dev_priv->backlight.lock, flags); 419 spin_unlock_irqrestore(&dev_priv->backlight_lock, flags);
456 420
457 DRM_DEBUG_DRIVER("get backlight PWM = %d\n", val); 421 DRM_DEBUG_DRIVER("get backlight PWM = %d\n", val);
458 return val; 422 return val;
459} 423}
460 424
461static void intel_pch_panel_set_backlight(struct drm_device *dev, u32 level) 425static void bdw_set_backlight(struct intel_connector *connector, u32 level)
462{ 426{
427 struct drm_device *dev = connector->base.dev;
463 struct drm_i915_private *dev_priv = dev->dev_private; 428 struct drm_i915_private *dev_priv = dev->dev_private;
464 u32 val = I915_READ(BLC_PWM_CPU_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK; 429 u32 val = I915_READ(BLC_PWM_PCH_CTL2) & ~BACKLIGHT_DUTY_CYCLE_MASK;
465 I915_WRITE(BLC_PWM_CPU_CTL, val | level); 430 I915_WRITE(BLC_PWM_PCH_CTL2, val | level);
466} 431}
467 432
468static void intel_panel_actually_set_backlight(struct drm_device *dev, 433static void pch_set_backlight(struct intel_connector *connector, u32 level)
469 u32 level)
470{ 434{
435 struct drm_device *dev = connector->base.dev;
471 struct drm_i915_private *dev_priv = dev->dev_private; 436 struct drm_i915_private *dev_priv = dev->dev_private;
472 u32 tmp; 437 u32 tmp;
473 438
474 DRM_DEBUG_DRIVER("set backlight PWM = %d\n", level); 439 tmp = I915_READ(BLC_PWM_CPU_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK;
475 level = intel_panel_compute_brightness(dev, level); 440 I915_WRITE(BLC_PWM_CPU_CTL, tmp | level);
441}
442
443static void i9xx_set_backlight(struct intel_connector *connector, u32 level)
444{
445 struct drm_device *dev = connector->base.dev;
446 struct drm_i915_private *dev_priv = dev->dev_private;
447 struct intel_panel *panel = &connector->panel;
448 u32 tmp, mask;
476 449
477 if (HAS_PCH_SPLIT(dev)) 450 WARN_ON(panel->backlight.max == 0);
478 return intel_pch_panel_set_backlight(dev, level);
479 451
480 if (is_backlight_combination_mode(dev)) { 452 if (panel->backlight.combination_mode) {
481 u32 max = intel_panel_get_max_backlight(dev);
482 u8 lbpc; 453 u8 lbpc;
483 454
484 /* we're screwed, but keep behaviour backwards compatible */ 455 lbpc = level * 0xfe / panel->backlight.max + 1;
485 if (!max)
486 max = 1;
487
488 lbpc = level * 0xfe / max + 1;
489 level /= lbpc; 456 level /= lbpc;
490 pci_write_config_byte(dev->pdev, PCI_LBPC, lbpc); 457 pci_write_config_byte(dev->pdev, PCI_LBPC, lbpc);
491 } 458 }
492 459
493 tmp = I915_READ(BLC_PWM_CTL); 460 if (IS_GEN4(dev)) {
494 if (INTEL_INFO(dev)->gen < 4) 461 mask = BACKLIGHT_DUTY_CYCLE_MASK;
462 } else {
495 level <<= 1; 463 level <<= 1;
496 tmp &= ~BACKLIGHT_DUTY_CYCLE_MASK; 464 mask = BACKLIGHT_DUTY_CYCLE_MASK_PNV;
465 }
466
467 tmp = I915_READ(BLC_PWM_CTL) & ~mask;
497 I915_WRITE(BLC_PWM_CTL, tmp | level); 468 I915_WRITE(BLC_PWM_CTL, tmp | level);
498} 469}
499 470
471static void vlv_set_backlight(struct intel_connector *connector, u32 level)
472{
473 struct drm_device *dev = connector->base.dev;
474 struct drm_i915_private *dev_priv = dev->dev_private;
475 enum pipe pipe = intel_get_pipe_from_connector(connector);
476 u32 tmp;
477
478 tmp = I915_READ(VLV_BLC_PWM_CTL(pipe)) & ~BACKLIGHT_DUTY_CYCLE_MASK;
479 I915_WRITE(VLV_BLC_PWM_CTL(pipe), tmp | level);
480}
481
482static void
483intel_panel_actually_set_backlight(struct intel_connector *connector, u32 level)
484{
485 struct drm_device *dev = connector->base.dev;
486 struct drm_i915_private *dev_priv = dev->dev_private;
487
488 DRM_DEBUG_DRIVER("set backlight PWM = %d\n", level);
489
490 level = intel_panel_compute_brightness(connector, level);
491 dev_priv->display.set_backlight(connector, level);
492}
493
500/* set backlight brightness to level in range [0..max] */ 494/* set backlight brightness to level in range [0..max] */
501void intel_panel_set_backlight(struct drm_device *dev, u32 level, u32 max) 495void intel_panel_set_backlight(struct intel_connector *connector, u32 level,
496 u32 max)
502{ 497{
498 struct drm_device *dev = connector->base.dev;
503 struct drm_i915_private *dev_priv = dev->dev_private; 499 struct drm_i915_private *dev_priv = dev->dev_private;
500 struct intel_panel *panel = &connector->panel;
501 enum pipe pipe = intel_get_pipe_from_connector(connector);
504 u32 freq; 502 u32 freq;
505 unsigned long flags; 503 unsigned long flags;
506 504
507 spin_lock_irqsave(&dev_priv->backlight.lock, flags); 505 if (pipe == INVALID_PIPE)
506 return;
508 507
509 freq = intel_panel_get_max_backlight(dev); 508 spin_lock_irqsave(&dev_priv->backlight_lock, flags);
510 if (!freq) { 509
511 /* we are screwed, bail out */ 510 WARN_ON(panel->backlight.max == 0);
512 goto out;
513 }
514 511
515 /* scale to hardware, but be careful to not overflow */ 512 /* scale to hardware max, but be careful to not overflow */
513 freq = panel->backlight.max;
516 if (freq < max) 514 if (freq < max)
517 level = level * freq / max; 515 level = level * freq / max;
518 else 516 else
519 level = freq / max * level; 517 level = freq / max * level;
520 518
521 dev_priv->backlight.level = level; 519 panel->backlight.level = level;
522 if (dev_priv->backlight.device) 520 if (panel->backlight.device)
523 dev_priv->backlight.device->props.brightness = level; 521 panel->backlight.device->props.brightness = level;
524 522
525 if (dev_priv->backlight.enabled) 523 if (panel->backlight.enabled)
526 intel_panel_actually_set_backlight(dev, level); 524 intel_panel_actually_set_backlight(connector, level);
527out: 525
528 spin_unlock_irqrestore(&dev_priv->backlight.lock, flags); 526 spin_unlock_irqrestore(&dev_priv->backlight_lock, flags);
527}
528
529static void pch_disable_backlight(struct intel_connector *connector)
530{
531 struct drm_device *dev = connector->base.dev;
532 struct drm_i915_private *dev_priv = dev->dev_private;
533 u32 tmp;
534
535 intel_panel_actually_set_backlight(connector, 0);
536
537 tmp = I915_READ(BLC_PWM_CPU_CTL2);
538 I915_WRITE(BLC_PWM_CPU_CTL2, tmp & ~BLM_PWM_ENABLE);
539
540 tmp = I915_READ(BLC_PWM_PCH_CTL1);
541 I915_WRITE(BLC_PWM_PCH_CTL1, tmp & ~BLM_PCH_PWM_ENABLE);
542}
543
544static void i9xx_disable_backlight(struct intel_connector *connector)
545{
546 intel_panel_actually_set_backlight(connector, 0);
529} 547}
530 548
531void intel_panel_disable_backlight(struct drm_device *dev) 549static void i965_disable_backlight(struct intel_connector *connector)
532{ 550{
551 struct drm_device *dev = connector->base.dev;
533 struct drm_i915_private *dev_priv = dev->dev_private; 552 struct drm_i915_private *dev_priv = dev->dev_private;
553 u32 tmp;
554
555 intel_panel_actually_set_backlight(connector, 0);
556
557 tmp = I915_READ(BLC_PWM_CTL2);
558 I915_WRITE(BLC_PWM_CTL2, tmp & ~BLM_PWM_ENABLE);
559}
560
561static void vlv_disable_backlight(struct intel_connector *connector)
562{
563 struct drm_device *dev = connector->base.dev;
564 struct drm_i915_private *dev_priv = dev->dev_private;
565 enum pipe pipe = intel_get_pipe_from_connector(connector);
566 u32 tmp;
567
568 intel_panel_actually_set_backlight(connector, 0);
569
570 tmp = I915_READ(VLV_BLC_PWM_CTL2(pipe));
571 I915_WRITE(VLV_BLC_PWM_CTL2(pipe), tmp & ~BLM_PWM_ENABLE);
572}
573
574void intel_panel_disable_backlight(struct intel_connector *connector)
575{
576 struct drm_device *dev = connector->base.dev;
577 struct drm_i915_private *dev_priv = dev->dev_private;
578 struct intel_panel *panel = &connector->panel;
579 enum pipe pipe = intel_get_pipe_from_connector(connector);
534 unsigned long flags; 580 unsigned long flags;
535 581
582 if (pipe == INVALID_PIPE)
583 return;
584
536 /* 585 /*
537 * Do not disable backlight on the vgaswitcheroo path. When switching 586 * Do not disable backlight on the vgaswitcheroo path. When switching
538 * away from i915, the other client may depend on i915 to handle the 587 * away from i915, the other client may depend on i915 to handle the
@@ -544,116 +593,215 @@ void intel_panel_disable_backlight(struct drm_device *dev)
544 return; 593 return;
545 } 594 }
546 595
547 spin_lock_irqsave(&dev_priv->backlight.lock, flags); 596 spin_lock_irqsave(&dev_priv->backlight_lock, flags);
548 597
549 dev_priv->backlight.enabled = false; 598 panel->backlight.enabled = false;
550 intel_panel_actually_set_backlight(dev, 0); 599 dev_priv->display.disable_backlight(connector);
551 600
552 if (INTEL_INFO(dev)->gen >= 4) { 601 spin_unlock_irqrestore(&dev_priv->backlight_lock, flags);
553 uint32_t reg, tmp; 602}
603
604static void bdw_enable_backlight(struct intel_connector *connector)
605{
606 struct drm_device *dev = connector->base.dev;
607 struct drm_i915_private *dev_priv = dev->dev_private;
608 struct intel_panel *panel = &connector->panel;
609 u32 pch_ctl1, pch_ctl2;
610
611 pch_ctl1 = I915_READ(BLC_PWM_PCH_CTL1);
612 if (pch_ctl1 & BLM_PCH_PWM_ENABLE) {
613 DRM_DEBUG_KMS("pch backlight already enabled\n");
614 pch_ctl1 &= ~BLM_PCH_PWM_ENABLE;
615 I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1);
616 }
554 617
555 reg = HAS_PCH_SPLIT(dev) ? BLC_PWM_CPU_CTL2 : BLC_PWM_CTL2; 618 pch_ctl2 = panel->backlight.max << 16;
619 I915_WRITE(BLC_PWM_PCH_CTL2, pch_ctl2);
556 620
557 I915_WRITE(reg, I915_READ(reg) & ~BLM_PWM_ENABLE); 621 pch_ctl1 = 0;
622 if (panel->backlight.active_low_pwm)
623 pch_ctl1 |= BLM_PCH_POLARITY;
558 624
559 if (HAS_PCH_SPLIT(dev)) { 625 /* BDW always uses the pch pwm controls. */
560 tmp = I915_READ(BLC_PWM_PCH_CTL1); 626 pch_ctl1 |= BLM_PCH_OVERRIDE_ENABLE;
561 tmp &= ~BLM_PCH_PWM_ENABLE; 627
562 I915_WRITE(BLC_PWM_PCH_CTL1, tmp); 628 I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1);
563 } 629 POSTING_READ(BLC_PWM_PCH_CTL1);
564 } 630 I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1 | BLM_PCH_PWM_ENABLE);
565 631
566 spin_unlock_irqrestore(&dev_priv->backlight.lock, flags); 632 /* This won't stick until the above enable. */
633 intel_panel_actually_set_backlight(connector, panel->backlight.level);
567} 634}
568 635
569void intel_panel_enable_backlight(struct drm_device *dev, 636static void pch_enable_backlight(struct intel_connector *connector)
570 enum pipe pipe)
571{ 637{
638 struct drm_device *dev = connector->base.dev;
572 struct drm_i915_private *dev_priv = dev->dev_private; 639 struct drm_i915_private *dev_priv = dev->dev_private;
640 struct intel_panel *panel = &connector->panel;
641 enum pipe pipe = intel_get_pipe_from_connector(connector);
573 enum transcoder cpu_transcoder = 642 enum transcoder cpu_transcoder =
574 intel_pipe_to_cpu_transcoder(dev_priv, pipe); 643 intel_pipe_to_cpu_transcoder(dev_priv, pipe);
575 unsigned long flags; 644 u32 cpu_ctl2, pch_ctl1, pch_ctl2;
576
577 DRM_DEBUG_KMS("pipe %c\n", pipe_name(pipe));
578 645
579 spin_lock_irqsave(&dev_priv->backlight.lock, flags); 646 cpu_ctl2 = I915_READ(BLC_PWM_CPU_CTL2);
647 if (cpu_ctl2 & BLM_PWM_ENABLE) {
648 WARN(1, "cpu backlight already enabled\n");
649 cpu_ctl2 &= ~BLM_PWM_ENABLE;
650 I915_WRITE(BLC_PWM_CPU_CTL2, cpu_ctl2);
651 }
580 652
581 if (dev_priv->backlight.level == 0) { 653 pch_ctl1 = I915_READ(BLC_PWM_PCH_CTL1);
582 dev_priv->backlight.level = intel_panel_get_max_backlight(dev); 654 if (pch_ctl1 & BLM_PCH_PWM_ENABLE) {
583 if (dev_priv->backlight.device) 655 DRM_DEBUG_KMS("pch backlight already enabled\n");
584 dev_priv->backlight.device->props.brightness = 656 pch_ctl1 &= ~BLM_PCH_PWM_ENABLE;
585 dev_priv->backlight.level; 657 I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1);
586 } 658 }
587 659
588 if (INTEL_INFO(dev)->gen >= 4) { 660 if (cpu_transcoder == TRANSCODER_EDP)
589 uint32_t reg, tmp; 661 cpu_ctl2 = BLM_TRANSCODER_EDP;
662 else
663 cpu_ctl2 = BLM_PIPE(cpu_transcoder);
664 I915_WRITE(BLC_PWM_CPU_CTL2, cpu_ctl2);
665 POSTING_READ(BLC_PWM_CPU_CTL2);
666 I915_WRITE(BLC_PWM_CPU_CTL2, cpu_ctl2 | BLM_PWM_ENABLE);
590 667
591 reg = HAS_PCH_SPLIT(dev) ? BLC_PWM_CPU_CTL2 : BLC_PWM_CTL2; 668 /* This won't stick until the above enable. */
669 intel_panel_actually_set_backlight(connector, panel->backlight.level);
592 670
671 pch_ctl2 = panel->backlight.max << 16;
672 I915_WRITE(BLC_PWM_PCH_CTL2, pch_ctl2);
593 673
594 tmp = I915_READ(reg); 674 pch_ctl1 = 0;
675 if (panel->backlight.active_low_pwm)
676 pch_ctl1 |= BLM_PCH_POLARITY;
595 677
596 /* Note that this can also get called through dpms changes. And 678 I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1);
597 * we don't track the backlight dpms state, hence check whether 679 POSTING_READ(BLC_PWM_PCH_CTL1);
598 * we have to do anything first. */ 680 I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1 | BLM_PCH_PWM_ENABLE);
599 if (tmp & BLM_PWM_ENABLE) 681}
600 goto set_level;
601 682
602 if (INTEL_INFO(dev)->num_pipes == 3) 683static void i9xx_enable_backlight(struct intel_connector *connector)
603 tmp &= ~BLM_PIPE_SELECT_IVB; 684{
604 else 685 struct drm_device *dev = connector->base.dev;
605 tmp &= ~BLM_PIPE_SELECT; 686 struct drm_i915_private *dev_priv = dev->dev_private;
687 struct intel_panel *panel = &connector->panel;
688 u32 ctl, freq;
606 689
607 if (cpu_transcoder == TRANSCODER_EDP) 690 ctl = I915_READ(BLC_PWM_CTL);
608 tmp |= BLM_TRANSCODER_EDP; 691 if (ctl & BACKLIGHT_DUTY_CYCLE_MASK_PNV) {
609 else 692 WARN(1, "backlight already enabled\n");
610 tmp |= BLM_PIPE(cpu_transcoder); 693 I915_WRITE(BLC_PWM_CTL, 0);
611 tmp &= ~BLM_PWM_ENABLE;
612
613 I915_WRITE(reg, tmp);
614 POSTING_READ(reg);
615 I915_WRITE(reg, tmp | BLM_PWM_ENABLE);
616
617 if (HAS_PCH_SPLIT(dev) &&
618 !(dev_priv->quirks & QUIRK_NO_PCH_PWM_ENABLE)) {
619 tmp = I915_READ(BLC_PWM_PCH_CTL1);
620 tmp |= BLM_PCH_PWM_ENABLE;
621 tmp &= ~BLM_PCH_OVERRIDE_ENABLE;
622 I915_WRITE(BLC_PWM_PCH_CTL1, tmp);
623 }
624 } 694 }
625 695
626set_level: 696 freq = panel->backlight.max;
627 /* Call below after setting BLC_PWM_CPU_CTL2 and BLC_PWM_PCH_CTL1. 697 if (panel->backlight.combination_mode)
628 * BLC_PWM_CPU_CTL may be cleared to zero automatically when these 698 freq /= 0xff;
629 * registers are set. 699
630 */ 700 ctl = freq << 17;
631 dev_priv->backlight.enabled = true; 701 if (IS_GEN2(dev) && panel->backlight.combination_mode)
632 intel_panel_actually_set_backlight(dev, dev_priv->backlight.level); 702 ctl |= BLM_LEGACY_MODE;
703 if (IS_PINEVIEW(dev) && panel->backlight.active_low_pwm)
704 ctl |= BLM_POLARITY_PNV;
633 705
634 spin_unlock_irqrestore(&dev_priv->backlight.lock, flags); 706 I915_WRITE(BLC_PWM_CTL, ctl);
707 POSTING_READ(BLC_PWM_CTL);
708
709 /* XXX: combine this into above write? */
710 intel_panel_actually_set_backlight(connector, panel->backlight.level);
635} 711}
636 712
637/* FIXME: use VBT vals to init PWM_CTL and PWM_CTL2 correctly */ 713static void i965_enable_backlight(struct intel_connector *connector)
638static void intel_panel_init_backlight_regs(struct drm_device *dev)
639{ 714{
715 struct drm_device *dev = connector->base.dev;
640 struct drm_i915_private *dev_priv = dev->dev_private; 716 struct drm_i915_private *dev_priv = dev->dev_private;
717 struct intel_panel *panel = &connector->panel;
718 enum pipe pipe = intel_get_pipe_from_connector(connector);
719 u32 ctl, ctl2, freq;
720
721 ctl2 = I915_READ(BLC_PWM_CTL2);
722 if (ctl2 & BLM_PWM_ENABLE) {
723 WARN(1, "backlight already enabled\n");
724 ctl2 &= ~BLM_PWM_ENABLE;
725 I915_WRITE(BLC_PWM_CTL2, ctl2);
726 }
727
728 freq = panel->backlight.max;
729 if (panel->backlight.combination_mode)
730 freq /= 0xff;
731
732 ctl = freq << 16;
733 I915_WRITE(BLC_PWM_CTL, ctl);
641 734
642 if (IS_VALLEYVIEW(dev)) { 735 /* XXX: combine this into above write? */
643 u32 cur_val = I915_READ(BLC_PWM_CTL) & 736 intel_panel_actually_set_backlight(connector, panel->backlight.level);
644 BACKLIGHT_DUTY_CYCLE_MASK; 737
645 I915_WRITE(BLC_PWM_CTL, (0xf42 << 16) | cur_val); 738 ctl2 = BLM_PIPE(pipe);
739 if (panel->backlight.combination_mode)
740 ctl2 |= BLM_COMBINATION_MODE;
741 if (panel->backlight.active_low_pwm)
742 ctl2 |= BLM_POLARITY_I965;
743 I915_WRITE(BLC_PWM_CTL2, ctl2);
744 POSTING_READ(BLC_PWM_CTL2);
745 I915_WRITE(BLC_PWM_CTL2, ctl2 | BLM_PWM_ENABLE);
746}
747
748static void vlv_enable_backlight(struct intel_connector *connector)
749{
750 struct drm_device *dev = connector->base.dev;
751 struct drm_i915_private *dev_priv = dev->dev_private;
752 struct intel_panel *panel = &connector->panel;
753 enum pipe pipe = intel_get_pipe_from_connector(connector);
754 u32 ctl, ctl2;
755
756 ctl2 = I915_READ(VLV_BLC_PWM_CTL2(pipe));
757 if (ctl2 & BLM_PWM_ENABLE) {
758 WARN(1, "backlight already enabled\n");
759 ctl2 &= ~BLM_PWM_ENABLE;
760 I915_WRITE(VLV_BLC_PWM_CTL2(pipe), ctl2);
646 } 761 }
762
763 ctl = panel->backlight.max << 16;
764 I915_WRITE(VLV_BLC_PWM_CTL(pipe), ctl);
765
766 /* XXX: combine this into above write? */
767 intel_panel_actually_set_backlight(connector, panel->backlight.level);
768
769 ctl2 = 0;
770 if (panel->backlight.active_low_pwm)
771 ctl2 |= BLM_POLARITY_I965;
772 I915_WRITE(VLV_BLC_PWM_CTL2(pipe), ctl2);
773 POSTING_READ(VLV_BLC_PWM_CTL2(pipe));
774 I915_WRITE(VLV_BLC_PWM_CTL2(pipe), ctl2 | BLM_PWM_ENABLE);
647} 775}
648 776
649static void intel_panel_init_backlight(struct drm_device *dev) 777void intel_panel_enable_backlight(struct intel_connector *connector)
650{ 778{
779 struct drm_device *dev = connector->base.dev;
651 struct drm_i915_private *dev_priv = dev->dev_private; 780 struct drm_i915_private *dev_priv = dev->dev_private;
781 struct intel_panel *panel = &connector->panel;
782 enum pipe pipe = intel_get_pipe_from_connector(connector);
783 unsigned long flags;
784
785 if (pipe == INVALID_PIPE)
786 return;
787
788 DRM_DEBUG_KMS("pipe %c\n", pipe_name(pipe));
789
790 spin_lock_irqsave(&dev_priv->backlight_lock, flags);
791
792 WARN_ON(panel->backlight.max == 0);
793
794 if (panel->backlight.level == 0) {
795 panel->backlight.level = panel->backlight.max;
796 if (panel->backlight.device)
797 panel->backlight.device->props.brightness =
798 panel->backlight.level;
799 }
652 800
653 intel_panel_init_backlight_regs(dev); 801 dev_priv->display.enable_backlight(connector);
802 panel->backlight.enabled = true;
654 803
655 dev_priv->backlight.level = intel_panel_get_backlight(dev); 804 spin_unlock_irqrestore(&dev_priv->backlight_lock, flags);
656 dev_priv->backlight.enabled = dev_priv->backlight.level != 0;
657} 805}
658 806
659enum drm_connector_status 807enum drm_connector_status
@@ -679,85 +827,320 @@ intel_panel_detect(struct drm_device *dev)
679} 827}
680 828
681#if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE) 829#if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE)
682static int intel_panel_update_status(struct backlight_device *bd) 830static int intel_backlight_device_update_status(struct backlight_device *bd)
683{ 831{
684 struct drm_device *dev = bl_get_data(bd); 832 struct intel_connector *connector = bl_get_data(bd);
833 struct drm_device *dev = connector->base.dev;
834
835 mutex_lock(&dev->mode_config.mutex);
685 DRM_DEBUG_KMS("updating intel_backlight, brightness=%d/%d\n", 836 DRM_DEBUG_KMS("updating intel_backlight, brightness=%d/%d\n",
686 bd->props.brightness, bd->props.max_brightness); 837 bd->props.brightness, bd->props.max_brightness);
687 intel_panel_set_backlight(dev, bd->props.brightness, 838 intel_panel_set_backlight(connector, bd->props.brightness,
688 bd->props.max_brightness); 839 bd->props.max_brightness);
840 mutex_unlock(&dev->mode_config.mutex);
689 return 0; 841 return 0;
690} 842}
691 843
692static int intel_panel_get_brightness(struct backlight_device *bd) 844static int intel_backlight_device_get_brightness(struct backlight_device *bd)
693{ 845{
694 struct drm_device *dev = bl_get_data(bd); 846 struct intel_connector *connector = bl_get_data(bd);
695 return intel_panel_get_backlight(dev); 847 struct drm_device *dev = connector->base.dev;
848 int ret;
849
850 mutex_lock(&dev->mode_config.mutex);
851 ret = intel_panel_get_backlight(connector);
852 mutex_unlock(&dev->mode_config.mutex);
853
854 return ret;
696} 855}
697 856
698static const struct backlight_ops intel_panel_bl_ops = { 857static const struct backlight_ops intel_backlight_device_ops = {
699 .update_status = intel_panel_update_status, 858 .update_status = intel_backlight_device_update_status,
700 .get_brightness = intel_panel_get_brightness, 859 .get_brightness = intel_backlight_device_get_brightness,
701}; 860};
702 861
703int intel_panel_setup_backlight(struct drm_connector *connector) 862static int intel_backlight_device_register(struct intel_connector *connector)
704{ 863{
705 struct drm_device *dev = connector->dev; 864 struct intel_panel *panel = &connector->panel;
706 struct drm_i915_private *dev_priv = dev->dev_private;
707 struct backlight_properties props; 865 struct backlight_properties props;
708 unsigned long flags;
709 866
710 intel_panel_init_backlight(dev); 867 if (WARN_ON(panel->backlight.device))
711
712 if (WARN_ON(dev_priv->backlight.device))
713 return -ENODEV; 868 return -ENODEV;
714 869
870 BUG_ON(panel->backlight.max == 0);
871
715 memset(&props, 0, sizeof(props)); 872 memset(&props, 0, sizeof(props));
716 props.type = BACKLIGHT_RAW; 873 props.type = BACKLIGHT_RAW;
717 props.brightness = dev_priv->backlight.level; 874 props.brightness = panel->backlight.level;
718 875 props.max_brightness = panel->backlight.max;
719 spin_lock_irqsave(&dev_priv->backlight.lock, flags);
720 props.max_brightness = intel_panel_get_max_backlight(dev);
721 spin_unlock_irqrestore(&dev_priv->backlight.lock, flags);
722 876
723 if (props.max_brightness == 0) { 877 /*
724 DRM_DEBUG_DRIVER("Failed to get maximum backlight value\n"); 878 * Note: using the same name independent of the connector prevents
725 return -ENODEV; 879 * registration of multiple backlight devices in the driver.
726 } 880 */
727 dev_priv->backlight.device = 881 panel->backlight.device =
728 backlight_device_register("intel_backlight", 882 backlight_device_register("intel_backlight",
729 &connector->kdev, dev, 883 connector->base.kdev,
730 &intel_panel_bl_ops, &props); 884 connector,
885 &intel_backlight_device_ops, &props);
731 886
732 if (IS_ERR(dev_priv->backlight.device)) { 887 if (IS_ERR(panel->backlight.device)) {
733 DRM_ERROR("Failed to register backlight: %ld\n", 888 DRM_ERROR("Failed to register backlight: %ld\n",
734 PTR_ERR(dev_priv->backlight.device)); 889 PTR_ERR(panel->backlight.device));
735 dev_priv->backlight.device = NULL; 890 panel->backlight.device = NULL;
736 return -ENODEV; 891 return -ENODEV;
737 } 892 }
738 return 0; 893 return 0;
739} 894}
740 895
741void intel_panel_destroy_backlight(struct drm_device *dev) 896static void intel_backlight_device_unregister(struct intel_connector *connector)
742{ 897{
898 struct intel_panel *panel = &connector->panel;
899
900 if (panel->backlight.device) {
901 backlight_device_unregister(panel->backlight.device);
902 panel->backlight.device = NULL;
903 }
904}
905#else /* CONFIG_BACKLIGHT_CLASS_DEVICE */
906static int intel_backlight_device_register(struct intel_connector *connector)
907{
908 return 0;
909}
910static void intel_backlight_device_unregister(struct intel_connector *connector)
911{
912}
913#endif /* CONFIG_BACKLIGHT_CLASS_DEVICE */
914
915/*
916 * Note: The setup hooks can't assume pipe is set!
917 *
918 * XXX: Query mode clock or hardware clock and program PWM modulation frequency
919 * appropriately when it's 0. Use VBT and/or sane defaults.
920 */
921static int bdw_setup_backlight(struct intel_connector *connector)
922{
923 struct drm_device *dev = connector->base.dev;
924 struct drm_i915_private *dev_priv = dev->dev_private;
925 struct intel_panel *panel = &connector->panel;
926 u32 pch_ctl1, pch_ctl2, val;
927
928 pch_ctl1 = I915_READ(BLC_PWM_PCH_CTL1);
929 panel->backlight.active_low_pwm = pch_ctl1 & BLM_PCH_POLARITY;
930
931 pch_ctl2 = I915_READ(BLC_PWM_PCH_CTL2);
932 panel->backlight.max = pch_ctl2 >> 16;
933 if (!panel->backlight.max)
934 return -ENODEV;
935
936 val = bdw_get_backlight(connector);
937 panel->backlight.level = intel_panel_compute_brightness(connector, val);
938
939 panel->backlight.enabled = (pch_ctl1 & BLM_PCH_PWM_ENABLE) &&
940 panel->backlight.level != 0;
941
942 return 0;
943}
944
945static int pch_setup_backlight(struct intel_connector *connector)
946{
947 struct drm_device *dev = connector->base.dev;
743 struct drm_i915_private *dev_priv = dev->dev_private; 948 struct drm_i915_private *dev_priv = dev->dev_private;
744 if (dev_priv->backlight.device) { 949 struct intel_panel *panel = &connector->panel;
745 backlight_device_unregister(dev_priv->backlight.device); 950 u32 cpu_ctl2, pch_ctl1, pch_ctl2, val;
746 dev_priv->backlight.device = NULL; 951
952 pch_ctl1 = I915_READ(BLC_PWM_PCH_CTL1);
953 panel->backlight.active_low_pwm = pch_ctl1 & BLM_PCH_POLARITY;
954
955 pch_ctl2 = I915_READ(BLC_PWM_PCH_CTL2);
956 panel->backlight.max = pch_ctl2 >> 16;
957 if (!panel->backlight.max)
958 return -ENODEV;
959
960 val = pch_get_backlight(connector);
961 panel->backlight.level = intel_panel_compute_brightness(connector, val);
962
963 cpu_ctl2 = I915_READ(BLC_PWM_CPU_CTL2);
964 panel->backlight.enabled = (cpu_ctl2 & BLM_PWM_ENABLE) &&
965 (pch_ctl1 & BLM_PCH_PWM_ENABLE) && panel->backlight.level != 0;
966
967 return 0;
968}
969
970static int i9xx_setup_backlight(struct intel_connector *connector)
971{
972 struct drm_device *dev = connector->base.dev;
973 struct drm_i915_private *dev_priv = dev->dev_private;
974 struct intel_panel *panel = &connector->panel;
975 u32 ctl, val;
976
977 ctl = I915_READ(BLC_PWM_CTL);
978
979 if (IS_GEN2(dev))
980 panel->backlight.combination_mode = ctl & BLM_LEGACY_MODE;
981
982 if (IS_PINEVIEW(dev))
983 panel->backlight.active_low_pwm = ctl & BLM_POLARITY_PNV;
984
985 panel->backlight.max = ctl >> 17;
986 if (panel->backlight.combination_mode)
987 panel->backlight.max *= 0xff;
988
989 if (!panel->backlight.max)
990 return -ENODEV;
991
992 val = i9xx_get_backlight(connector);
993 panel->backlight.level = intel_panel_compute_brightness(connector, val);
994
995 panel->backlight.enabled = panel->backlight.level != 0;
996
997 return 0;
998}
999
1000static int i965_setup_backlight(struct intel_connector *connector)
1001{
1002 struct drm_device *dev = connector->base.dev;
1003 struct drm_i915_private *dev_priv = dev->dev_private;
1004 struct intel_panel *panel = &connector->panel;
1005 u32 ctl, ctl2, val;
1006
1007 ctl2 = I915_READ(BLC_PWM_CTL2);
1008 panel->backlight.combination_mode = ctl2 & BLM_COMBINATION_MODE;
1009 panel->backlight.active_low_pwm = ctl2 & BLM_POLARITY_I965;
1010
1011 ctl = I915_READ(BLC_PWM_CTL);
1012 panel->backlight.max = ctl >> 16;
1013 if (panel->backlight.combination_mode)
1014 panel->backlight.max *= 0xff;
1015
1016 if (!panel->backlight.max)
1017 return -ENODEV;
1018
1019 val = i9xx_get_backlight(connector);
1020 panel->backlight.level = intel_panel_compute_brightness(connector, val);
1021
1022 panel->backlight.enabled = (ctl2 & BLM_PWM_ENABLE) &&
1023 panel->backlight.level != 0;
1024
1025 return 0;
1026}
1027
1028static int vlv_setup_backlight(struct intel_connector *connector)
1029{
1030 struct drm_device *dev = connector->base.dev;
1031 struct drm_i915_private *dev_priv = dev->dev_private;
1032 struct intel_panel *panel = &connector->panel;
1033 enum pipe pipe;
1034 u32 ctl, ctl2, val;
1035
1036 for_each_pipe(pipe) {
1037 u32 cur_val = I915_READ(VLV_BLC_PWM_CTL(pipe));
1038
1039 /* Skip if the modulation freq is already set */
1040 if (cur_val & ~BACKLIGHT_DUTY_CYCLE_MASK)
1041 continue;
1042
1043 cur_val &= BACKLIGHT_DUTY_CYCLE_MASK;
1044 I915_WRITE(VLV_BLC_PWM_CTL(pipe), (0xf42 << 16) |
1045 cur_val);
747 } 1046 }
1047
1048 ctl2 = I915_READ(VLV_BLC_PWM_CTL2(PIPE_A));
1049 panel->backlight.active_low_pwm = ctl2 & BLM_POLARITY_I965;
1050
1051 ctl = I915_READ(VLV_BLC_PWM_CTL(PIPE_A));
1052 panel->backlight.max = ctl >> 16;
1053 if (!panel->backlight.max)
1054 return -ENODEV;
1055
1056 val = _vlv_get_backlight(dev, PIPE_A);
1057 panel->backlight.level = intel_panel_compute_brightness(connector, val);
1058
1059 panel->backlight.enabled = (ctl2 & BLM_PWM_ENABLE) &&
1060 panel->backlight.level != 0;
1061
1062 return 0;
748} 1063}
749#else 1064
750int intel_panel_setup_backlight(struct drm_connector *connector) 1065int intel_panel_setup_backlight(struct drm_connector *connector)
751{ 1066{
752 intel_panel_init_backlight(connector->dev); 1067 struct drm_device *dev = connector->dev;
1068 struct drm_i915_private *dev_priv = dev->dev_private;
1069 struct intel_connector *intel_connector = to_intel_connector(connector);
1070 struct intel_panel *panel = &intel_connector->panel;
1071 unsigned long flags;
1072 int ret;
1073
1074 /* set level and max in panel struct */
1075 spin_lock_irqsave(&dev_priv->backlight_lock, flags);
1076 ret = dev_priv->display.setup_backlight(intel_connector);
1077 spin_unlock_irqrestore(&dev_priv->backlight_lock, flags);
1078
1079 if (ret) {
1080 DRM_DEBUG_KMS("failed to setup backlight for connector %s\n",
1081 drm_get_connector_name(connector));
1082 return ret;
1083 }
1084
1085 intel_backlight_device_register(intel_connector);
1086
1087 panel->backlight.present = true;
1088
1089 DRM_DEBUG_KMS("backlight initialized, %s, brightness %u/%u, "
1090 "sysfs interface %sregistered\n",
1091 panel->backlight.enabled ? "enabled" : "disabled",
1092 panel->backlight.level, panel->backlight.max,
1093 panel->backlight.device ? "" : "not ");
1094
753 return 0; 1095 return 0;
754} 1096}
755 1097
756void intel_panel_destroy_backlight(struct drm_device *dev) 1098void intel_panel_destroy_backlight(struct drm_connector *connector)
757{ 1099{
758 return; 1100 struct intel_connector *intel_connector = to_intel_connector(connector);
1101 struct intel_panel *panel = &intel_connector->panel;
1102
1103 panel->backlight.present = false;
1104 intel_backlight_device_unregister(intel_connector);
1105}
1106
1107/* Set up chip specific backlight functions */
1108void intel_panel_init_backlight_funcs(struct drm_device *dev)
1109{
1110 struct drm_i915_private *dev_priv = dev->dev_private;
1111
1112 if (IS_BROADWELL(dev)) {
1113 dev_priv->display.setup_backlight = bdw_setup_backlight;
1114 dev_priv->display.enable_backlight = bdw_enable_backlight;
1115 dev_priv->display.disable_backlight = pch_disable_backlight;
1116 dev_priv->display.set_backlight = bdw_set_backlight;
1117 dev_priv->display.get_backlight = bdw_get_backlight;
1118 } else if (HAS_PCH_SPLIT(dev)) {
1119 dev_priv->display.setup_backlight = pch_setup_backlight;
1120 dev_priv->display.enable_backlight = pch_enable_backlight;
1121 dev_priv->display.disable_backlight = pch_disable_backlight;
1122 dev_priv->display.set_backlight = pch_set_backlight;
1123 dev_priv->display.get_backlight = pch_get_backlight;
1124 } else if (IS_VALLEYVIEW(dev)) {
1125 dev_priv->display.setup_backlight = vlv_setup_backlight;
1126 dev_priv->display.enable_backlight = vlv_enable_backlight;
1127 dev_priv->display.disable_backlight = vlv_disable_backlight;
1128 dev_priv->display.set_backlight = vlv_set_backlight;
1129 dev_priv->display.get_backlight = vlv_get_backlight;
1130 } else if (IS_GEN4(dev)) {
1131 dev_priv->display.setup_backlight = i965_setup_backlight;
1132 dev_priv->display.enable_backlight = i965_enable_backlight;
1133 dev_priv->display.disable_backlight = i965_disable_backlight;
1134 dev_priv->display.set_backlight = i9xx_set_backlight;
1135 dev_priv->display.get_backlight = i9xx_get_backlight;
1136 } else {
1137 dev_priv->display.setup_backlight = i9xx_setup_backlight;
1138 dev_priv->display.enable_backlight = i9xx_enable_backlight;
1139 dev_priv->display.disable_backlight = i9xx_disable_backlight;
1140 dev_priv->display.set_backlight = i9xx_set_backlight;
1141 dev_priv->display.get_backlight = i9xx_get_backlight;
1142 }
759} 1143}
760#endif
761 1144
762int intel_panel_init(struct intel_panel *panel, 1145int intel_panel_init(struct intel_panel *panel,
763 struct drm_display_mode *fixed_mode) 1146 struct drm_display_mode *fixed_mode)
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 542b4448ccb7..172efa0bfb86 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -2291,7 +2291,9 @@ static uint32_t ilk_compute_fbc_wm(const struct hsw_pipe_wm_parameters *params,
2291 2291
2292static unsigned int ilk_display_fifo_size(const struct drm_device *dev) 2292static unsigned int ilk_display_fifo_size(const struct drm_device *dev)
2293{ 2293{
2294 if (INTEL_INFO(dev)->gen >= 7) 2294 if (INTEL_INFO(dev)->gen >= 8)
2295 return 3072;
2296 else if (INTEL_INFO(dev)->gen >= 7)
2295 return 768; 2297 return 768;
2296 else 2298 else
2297 return 512; 2299 return 512;
@@ -2336,7 +2338,9 @@ static unsigned int ilk_plane_wm_max(const struct drm_device *dev,
2336 } 2338 }
2337 2339
2338 /* clamp to max that the registers can hold */ 2340 /* clamp to max that the registers can hold */
2339 if (INTEL_INFO(dev)->gen >= 7) 2341 if (INTEL_INFO(dev)->gen >= 8)
2342 max = level == 0 ? 255 : 2047;
2343 else if (INTEL_INFO(dev)->gen >= 7)
2340 /* IVB/HSW primary/sprite plane watermarks */ 2344 /* IVB/HSW primary/sprite plane watermarks */
2341 max = level == 0 ? 127 : 1023; 2345 max = level == 0 ? 127 : 1023;
2342 else if (!is_sprite) 2346 else if (!is_sprite)
@@ -2366,10 +2370,13 @@ static unsigned int ilk_cursor_wm_max(const struct drm_device *dev,
2366} 2370}
2367 2371
2368/* Calculate the maximum FBC watermark */ 2372/* Calculate the maximum FBC watermark */
2369static unsigned int ilk_fbc_wm_max(void) 2373static unsigned int ilk_fbc_wm_max(struct drm_device *dev)
2370{ 2374{
2371 /* max that registers can hold */ 2375 /* max that registers can hold */
2372 return 15; 2376 if (INTEL_INFO(dev)->gen >= 8)
2377 return 31;
2378 else
2379 return 15;
2373} 2380}
2374 2381
2375static void ilk_compute_wm_maximums(struct drm_device *dev, 2382static void ilk_compute_wm_maximums(struct drm_device *dev,
@@ -2381,7 +2388,7 @@ static void ilk_compute_wm_maximums(struct drm_device *dev,
2381 max->pri = ilk_plane_wm_max(dev, level, config, ddb_partitioning, false); 2388 max->pri = ilk_plane_wm_max(dev, level, config, ddb_partitioning, false);
2382 max->spr = ilk_plane_wm_max(dev, level, config, ddb_partitioning, true); 2389 max->spr = ilk_plane_wm_max(dev, level, config, ddb_partitioning, true);
2383 max->cur = ilk_cursor_wm_max(dev, level, config); 2390 max->cur = ilk_cursor_wm_max(dev, level, config);
2384 max->fbc = ilk_fbc_wm_max(); 2391 max->fbc = ilk_fbc_wm_max(dev);
2385} 2392}
2386 2393
2387static bool ilk_validate_wm_level(int level, 2394static bool ilk_validate_wm_level(int level,
@@ -2722,10 +2729,18 @@ static void hsw_compute_wm_results(struct drm_device *dev,
2722 if (!r->enable) 2729 if (!r->enable)
2723 break; 2730 break;
2724 2731
2725 results->wm_lp[wm_lp - 1] = HSW_WM_LP_VAL(level * 2, 2732 results->wm_lp[wm_lp - 1] = WM3_LP_EN |
2726 r->fbc_val, 2733 ((level * 2) << WM1_LP_LATENCY_SHIFT) |
2727 r->pri_val, 2734 (r->pri_val << WM1_LP_SR_SHIFT) |
2728 r->cur_val); 2735 r->cur_val;
2736
2737 if (INTEL_INFO(dev)->gen >= 8)
2738 results->wm_lp[wm_lp - 1] |=
2739 r->fbc_val << WM1_LP_FBC_SHIFT_BDW;
2740 else
2741 results->wm_lp[wm_lp - 1] |=
2742 r->fbc_val << WM1_LP_FBC_SHIFT;
2743
2729 results->wm_lp_spr[wm_lp - 1] = r->spr_val; 2744 results->wm_lp_spr[wm_lp - 1] = r->spr_val;
2730 } 2745 }
2731 2746
@@ -3710,6 +3725,78 @@ static void gen6_enable_rps_interrupts(struct drm_device *dev)
3710 I915_WRITE(GEN6_PMINTRMSK, ~enabled_intrs); 3725 I915_WRITE(GEN6_PMINTRMSK, ~enabled_intrs);
3711} 3726}
3712 3727
3728static void gen8_enable_rps(struct drm_device *dev)
3729{
3730 struct drm_i915_private *dev_priv = dev->dev_private;
3731 struct intel_ring_buffer *ring;
3732 uint32_t rc6_mask = 0, rp_state_cap;
3733 int unused;
3734
3735 /* 1a: Software RC state - RC0 */
3736 I915_WRITE(GEN6_RC_STATE, 0);
3737
3738 /* 1c & 1d: Get forcewake during program sequence. Although the driver
3739 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
3740 gen6_gt_force_wake_get(dev_priv);
3741
3742 /* 2a: Disable RC states. */
3743 I915_WRITE(GEN6_RC_CONTROL, 0);
3744
3745 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
3746
3747 /* 2b: Program RC6 thresholds.*/
3748 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
3749 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
3750 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
3751 for_each_ring(ring, dev_priv, unused)
3752 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
3753 I915_WRITE(GEN6_RC_SLEEP, 0);
3754 I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
3755
3756 /* 3: Enable RC6 */
3757 if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
3758 rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
3759 DRM_INFO("RC6 %s\n", (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off");
3760 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
3761 GEN6_RC_CTL_EI_MODE(1) |
3762 rc6_mask);
3763
3764 /* 4 Program defaults and thresholds for RPS*/
3765 I915_WRITE(GEN6_RPNSWREQ, HSW_FREQUENCY(10)); /* Request 500 MHz */
3766 I915_WRITE(GEN6_RC_VIDEO_FREQ, HSW_FREQUENCY(12)); /* Request 600 MHz */
3767 /* NB: Docs say 1s, and 1000000 - which aren't equivalent */
3768 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 100000000 / 128); /* 1 second timeout */
3769
3770 /* Docs recommend 900MHz, and 300 MHz respectively */
3771 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
3772 dev_priv->rps.max_delay << 24 |
3773 dev_priv->rps.min_delay << 16);
3774
3775 I915_WRITE(GEN6_RP_UP_THRESHOLD, 7600000 / 128); /* 76ms busyness per EI, 90% */
3776 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 31300000 / 128); /* 313ms busyness per EI, 70%*/
3777 I915_WRITE(GEN6_RP_UP_EI, 66000); /* 84.48ms, XXX: random? */
3778 I915_WRITE(GEN6_RP_DOWN_EI, 350000); /* 448ms, XXX: random? */
3779
3780 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
3781
3782 /* 5: Enable RPS */
3783 I915_WRITE(GEN6_RP_CONTROL,
3784 GEN6_RP_MEDIA_TURBO |
3785 GEN6_RP_MEDIA_HW_NORMAL_MODE |
3786 GEN6_RP_MEDIA_IS_GFX |
3787 GEN6_RP_ENABLE |
3788 GEN6_RP_UP_BUSY_AVG |
3789 GEN6_RP_DOWN_IDLE_AVG);
3790
3791 /* 6: Ring frequency + overclocking (our driver does this later */
3792
3793 gen6_set_rps(dev, (I915_READ(GEN6_GT_PERF_STATUS) & 0xff00) >> 8);
3794
3795 gen6_enable_rps_interrupts(dev);
3796
3797 gen6_gt_force_wake_put(dev_priv);
3798}
3799
3713static void gen6_enable_rps(struct drm_device *dev) 3800static void gen6_enable_rps(struct drm_device *dev)
3714{ 3801{
3715 struct drm_i915_private *dev_priv = dev->dev_private; 3802 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3872,7 +3959,10 @@ void gen6_update_ring_freq(struct drm_device *dev)
3872 int diff = dev_priv->rps.max_delay - gpu_freq; 3959 int diff = dev_priv->rps.max_delay - gpu_freq;
3873 unsigned int ia_freq = 0, ring_freq = 0; 3960 unsigned int ia_freq = 0, ring_freq = 0;
3874 3961
3875 if (IS_HASWELL(dev)) { 3962 if (INTEL_INFO(dev)->gen >= 8) {
3963 /* max(2 * GT, DDR). NB: GT is 50MHz units */
3964 ring_freq = max(min_ring_freq, gpu_freq);
3965 } else if (IS_HASWELL(dev)) {
3876 ring_freq = mult_frac(gpu_freq, 5, 4); 3966 ring_freq = mult_frac(gpu_freq, 5, 4);
3877 ring_freq = max(min_ring_freq, ring_freq); 3967 ring_freq = max(min_ring_freq, ring_freq);
3878 /* leave ia_freq as the default, chosen by cpufreq */ 3968 /* leave ia_freq as the default, chosen by cpufreq */
@@ -4818,6 +4908,9 @@ static void intel_gen6_powersave_work(struct work_struct *work)
4818 4908
4819 if (IS_VALLEYVIEW(dev)) { 4909 if (IS_VALLEYVIEW(dev)) {
4820 valleyview_enable_rps(dev); 4910 valleyview_enable_rps(dev);
4911 } else if (IS_BROADWELL(dev)) {
4912 gen8_enable_rps(dev);
4913 gen6_update_ring_freq(dev);
4821 } else { 4914 } else {
4822 gen6_enable_rps(dev); 4915 gen6_enable_rps(dev);
4823 gen6_update_ring_freq(dev); 4916 gen6_update_ring_freq(dev);
@@ -5126,6 +5219,50 @@ static void lpt_suspend_hw(struct drm_device *dev)
5126 } 5219 }
5127} 5220}
5128 5221
5222static void gen8_init_clock_gating(struct drm_device *dev)
5223{
5224 struct drm_i915_private *dev_priv = dev->dev_private;
5225 enum pipe i;
5226
5227 I915_WRITE(WM3_LP_ILK, 0);
5228 I915_WRITE(WM2_LP_ILK, 0);
5229 I915_WRITE(WM1_LP_ILK, 0);
5230
5231 /* FIXME(BDW): Check all the w/a, some might only apply to
5232 * pre-production hw. */
5233
5234 WARN(!i915_preliminary_hw_support,
5235 "GEN8_CENTROID_PIXEL_OPT_DIS not be needed for production\n");
5236 I915_WRITE(HALF_SLICE_CHICKEN3,
5237 _MASKED_BIT_ENABLE(GEN8_CENTROID_PIXEL_OPT_DIS));
5238 I915_WRITE(HALF_SLICE_CHICKEN3,
5239 _MASKED_BIT_ENABLE(GEN8_SAMPLER_POWER_BYPASS_DIS));
5240 I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_BWGTLB_DISABLE));
5241
5242 I915_WRITE(_3D_CHICKEN3,
5243 _3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(2));
5244
5245 I915_WRITE(COMMON_SLICE_CHICKEN2,
5246 _MASKED_BIT_ENABLE(GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE));
5247
5248 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
5249 _MASKED_BIT_ENABLE(GEN7_SINGLE_SUBSCAN_DISPATCH_ENABLE));
5250
5251 /* WaSwitchSolVfFArbitrationPriority */
5252 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
5253
5254 /* WaPsrDPAMaskVBlankInSRD */
5255 I915_WRITE(CHICKEN_PAR1_1,
5256 I915_READ(CHICKEN_PAR1_1) | DPA_MASK_VBLANK_SRD);
5257
5258 /* WaPsrDPRSUnmaskVBlankInSRD */
5259 for_each_pipe(i) {
5260 I915_WRITE(CHICKEN_PIPESL_1(i),
5261 I915_READ(CHICKEN_PIPESL_1(i) |
5262 DPRS_MASK_VBLANK_SRD));
5263 }
5264}
5265
5129static void haswell_init_clock_gating(struct drm_device *dev) 5266static void haswell_init_clock_gating(struct drm_device *dev)
5130{ 5267{
5131 struct drm_i915_private *dev_priv = dev->dev_private; 5268 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -5476,7 +5613,9 @@ static bool is_always_on_power_domain(struct drm_device *dev,
5476 5613
5477 BUG_ON(BIT(domain) & ~POWER_DOMAIN_MASK); 5614 BUG_ON(BIT(domain) & ~POWER_DOMAIN_MASK);
5478 5615
5479 if (IS_HASWELL(dev)) { 5616 if (IS_BROADWELL(dev)) {
5617 always_on_domains = BDW_ALWAYS_ON_POWER_DOMAINS;
5618 } else if (IS_HASWELL(dev)) {
5480 always_on_domains = HSW_ALWAYS_ON_POWER_DOMAINS; 5619 always_on_domains = HSW_ALWAYS_ON_POWER_DOMAINS;
5481 } else { 5620 } else {
5482 WARN_ON(1); 5621 WARN_ON(1);
@@ -5510,6 +5649,7 @@ static void __intel_set_power_well(struct drm_device *dev, bool enable)
5510{ 5649{
5511 struct drm_i915_private *dev_priv = dev->dev_private; 5650 struct drm_i915_private *dev_priv = dev->dev_private;
5512 bool is_enabled, enable_requested; 5651 bool is_enabled, enable_requested;
5652 unsigned long irqflags;
5513 uint32_t tmp; 5653 uint32_t tmp;
5514 5654
5515 tmp = I915_READ(HSW_PWR_WELL_DRIVER); 5655 tmp = I915_READ(HSW_PWR_WELL_DRIVER);
@@ -5527,9 +5667,24 @@ static void __intel_set_power_well(struct drm_device *dev, bool enable)
5527 HSW_PWR_WELL_STATE_ENABLED), 20)) 5667 HSW_PWR_WELL_STATE_ENABLED), 20))
5528 DRM_ERROR("Timeout enabling power well\n"); 5668 DRM_ERROR("Timeout enabling power well\n");
5529 } 5669 }
5670
5671 if (IS_BROADWELL(dev)) {
5672 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
5673 I915_WRITE(GEN8_DE_PIPE_IMR(PIPE_B),
5674 dev_priv->de_irq_mask[PIPE_B]);
5675 I915_WRITE(GEN8_DE_PIPE_IER(PIPE_B),
5676 ~dev_priv->de_irq_mask[PIPE_B] |
5677 GEN8_PIPE_VBLANK);
5678 I915_WRITE(GEN8_DE_PIPE_IMR(PIPE_C),
5679 dev_priv->de_irq_mask[PIPE_C]);
5680 I915_WRITE(GEN8_DE_PIPE_IER(PIPE_C),
5681 ~dev_priv->de_irq_mask[PIPE_C] |
5682 GEN8_PIPE_VBLANK);
5683 POSTING_READ(GEN8_DE_PIPE_IER(PIPE_C));
5684 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
5685 }
5530 } else { 5686 } else {
5531 if (enable_requested) { 5687 if (enable_requested) {
5532 unsigned long irqflags;
5533 enum pipe p; 5688 enum pipe p;
5534 5689
5535 I915_WRITE(HSW_PWR_WELL_DRIVER, 0); 5690 I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
@@ -5798,6 +5953,8 @@ void intel_init_pm(struct drm_device *dev)
5798 dev_priv->display.update_wm = NULL; 5953 dev_priv->display.update_wm = NULL;
5799 } 5954 }
5800 dev_priv->display.init_clock_gating = haswell_init_clock_gating; 5955 dev_priv->display.init_clock_gating = haswell_init_clock_gating;
5956 } else if (INTEL_INFO(dev)->gen == 8) {
5957 dev_priv->display.init_clock_gating = gen8_init_clock_gating;
5801 } else 5958 } else
5802 dev_priv->display.update_wm = NULL; 5959 dev_priv->display.update_wm = NULL;
5803 } else if (IS_VALLEYVIEW(dev)) { 5960 } else if (IS_VALLEYVIEW(dev)) {
@@ -5949,4 +6106,3 @@ void intel_pm_init(struct drm_device *dev)
5949 INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work, 6106 INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
5950 intel_gen6_powersave_work); 6107 intel_gen6_powersave_work);
5951} 6108}
5952
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 2dec134f75eb..c2f09d456300 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -360,6 +360,47 @@ gen7_render_ring_flush(struct intel_ring_buffer *ring,
360 return 0; 360 return 0;
361} 361}
362 362
363static int
364gen8_render_ring_flush(struct intel_ring_buffer *ring,
365 u32 invalidate_domains, u32 flush_domains)
366{
367 u32 flags = 0;
368 u32 scratch_addr = ring->scratch.gtt_offset + 128;
369 int ret;
370
371 flags |= PIPE_CONTROL_CS_STALL;
372
373 if (flush_domains) {
374 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
375 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
376 }
377 if (invalidate_domains) {
378 flags |= PIPE_CONTROL_TLB_INVALIDATE;
379 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
380 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
381 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
382 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
383 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
384 flags |= PIPE_CONTROL_QW_WRITE;
385 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
386 }
387
388 ret = intel_ring_begin(ring, 6);
389 if (ret)
390 return ret;
391
392 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
393 intel_ring_emit(ring, flags);
394 intel_ring_emit(ring, scratch_addr);
395 intel_ring_emit(ring, 0);
396 intel_ring_emit(ring, 0);
397 intel_ring_emit(ring, 0);
398 intel_ring_advance(ring);
399
400 return 0;
401
402}
403
363static void ring_write_tail(struct intel_ring_buffer *ring, 404static void ring_write_tail(struct intel_ring_buffer *ring,
364 u32 value) 405 u32 value)
365{ 406{
@@ -924,6 +965,7 @@ void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
924 } else if (IS_GEN6(ring->dev)) { 965 } else if (IS_GEN6(ring->dev)) {
925 mmio = RING_HWS_PGA_GEN6(ring->mmio_base); 966 mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
926 } else { 967 } else {
968 /* XXX: gen8 returns to sanity */
927 mmio = RING_HWS_PGA(ring->mmio_base); 969 mmio = RING_HWS_PGA(ring->mmio_base);
928 } 970 }
929 971
@@ -1066,6 +1108,52 @@ hsw_vebox_put_irq(struct intel_ring_buffer *ring)
1066 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 1108 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1067} 1109}
1068 1110
1111static bool
1112gen8_ring_get_irq(struct intel_ring_buffer *ring)
1113{
1114 struct drm_device *dev = ring->dev;
1115 struct drm_i915_private *dev_priv = dev->dev_private;
1116 unsigned long flags;
1117
1118 if (!dev->irq_enabled)
1119 return false;
1120
1121 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1122 if (ring->irq_refcount++ == 0) {
1123 if (HAS_L3_DPF(dev) && ring->id == RCS) {
1124 I915_WRITE_IMR(ring,
1125 ~(ring->irq_enable_mask |
1126 GT_RENDER_L3_PARITY_ERROR_INTERRUPT));
1127 } else {
1128 I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
1129 }
1130 POSTING_READ(RING_IMR(ring->mmio_base));
1131 }
1132 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1133
1134 return true;
1135}
1136
1137static void
1138gen8_ring_put_irq(struct intel_ring_buffer *ring)
1139{
1140 struct drm_device *dev = ring->dev;
1141 struct drm_i915_private *dev_priv = dev->dev_private;
1142 unsigned long flags;
1143
1144 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1145 if (--ring->irq_refcount == 0) {
1146 if (HAS_L3_DPF(dev) && ring->id == RCS) {
1147 I915_WRITE_IMR(ring,
1148 ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
1149 } else {
1150 I915_WRITE_IMR(ring, ~0);
1151 }
1152 POSTING_READ(RING_IMR(ring->mmio_base));
1153 }
1154 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1155}
1156
1069static int 1157static int
1070i965_dispatch_execbuffer(struct intel_ring_buffer *ring, 1158i965_dispatch_execbuffer(struct intel_ring_buffer *ring,
1071 u32 offset, u32 length, 1159 u32 offset, u32 length,
@@ -1624,6 +1712,8 @@ static int gen6_bsd_ring_flush(struct intel_ring_buffer *ring,
1624 return ret; 1712 return ret;
1625 1713
1626 cmd = MI_FLUSH_DW; 1714 cmd = MI_FLUSH_DW;
1715 if (INTEL_INFO(ring->dev)->gen >= 8)
1716 cmd += 1;
1627 /* 1717 /*
1628 * Bspec vol 1c.5 - video engine command streamer: 1718 * Bspec vol 1c.5 - video engine command streamer:
1629 * "If ENABLED, all TLBs will be invalidated once the flush 1719 * "If ENABLED, all TLBs will be invalidated once the flush
@@ -1635,9 +1725,38 @@ static int gen6_bsd_ring_flush(struct intel_ring_buffer *ring,
1635 MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW; 1725 MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
1636 intel_ring_emit(ring, cmd); 1726 intel_ring_emit(ring, cmd);
1637 intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT); 1727 intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
1728 if (INTEL_INFO(ring->dev)->gen >= 8) {
1729 intel_ring_emit(ring, 0); /* upper addr */
1730 intel_ring_emit(ring, 0); /* value */
1731 } else {
1732 intel_ring_emit(ring, 0);
1733 intel_ring_emit(ring, MI_NOOP);
1734 }
1735 intel_ring_advance(ring);
1736 return 0;
1737}
1738
1739static int
1740gen8_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
1741 u32 offset, u32 len,
1742 unsigned flags)
1743{
1744 struct drm_i915_private *dev_priv = ring->dev->dev_private;
1745 bool ppgtt = dev_priv->mm.aliasing_ppgtt != NULL &&
1746 !(flags & I915_DISPATCH_SECURE);
1747 int ret;
1748
1749 ret = intel_ring_begin(ring, 4);
1750 if (ret)
1751 return ret;
1752
1753 /* FIXME(BDW): Address space and security selectors. */
1754 intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8));
1755 intel_ring_emit(ring, offset);
1638 intel_ring_emit(ring, 0); 1756 intel_ring_emit(ring, 0);
1639 intel_ring_emit(ring, MI_NOOP); 1757 intel_ring_emit(ring, MI_NOOP);
1640 intel_ring_advance(ring); 1758 intel_ring_advance(ring);
1759
1641 return 0; 1760 return 0;
1642} 1761}
1643 1762
@@ -1697,6 +1816,8 @@ static int gen6_ring_flush(struct intel_ring_buffer *ring,
1697 return ret; 1816 return ret;
1698 1817
1699 cmd = MI_FLUSH_DW; 1818 cmd = MI_FLUSH_DW;
1819 if (INTEL_INFO(ring->dev)->gen >= 8)
1820 cmd += 1;
1700 /* 1821 /*
1701 * Bspec vol 1c.3 - blitter engine command streamer: 1822 * Bspec vol 1c.3 - blitter engine command streamer:
1702 * "If ENABLED, all TLBs will be invalidated once the flush 1823 * "If ENABLED, all TLBs will be invalidated once the flush
@@ -1708,8 +1829,13 @@ static int gen6_ring_flush(struct intel_ring_buffer *ring,
1708 MI_FLUSH_DW_OP_STOREDW; 1829 MI_FLUSH_DW_OP_STOREDW;
1709 intel_ring_emit(ring, cmd); 1830 intel_ring_emit(ring, cmd);
1710 intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT); 1831 intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
1711 intel_ring_emit(ring, 0); 1832 if (INTEL_INFO(ring->dev)->gen >= 8) {
1712 intel_ring_emit(ring, MI_NOOP); 1833 intel_ring_emit(ring, 0); /* upper addr */
1834 intel_ring_emit(ring, 0); /* value */
1835 } else {
1836 intel_ring_emit(ring, 0);
1837 intel_ring_emit(ring, MI_NOOP);
1838 }
1713 intel_ring_advance(ring); 1839 intel_ring_advance(ring);
1714 1840
1715 if (IS_GEN7(dev) && flush) 1841 if (IS_GEN7(dev) && flush)
@@ -1732,8 +1858,14 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
1732 ring->flush = gen7_render_ring_flush; 1858 ring->flush = gen7_render_ring_flush;
1733 if (INTEL_INFO(dev)->gen == 6) 1859 if (INTEL_INFO(dev)->gen == 6)
1734 ring->flush = gen6_render_ring_flush; 1860 ring->flush = gen6_render_ring_flush;
1735 ring->irq_get = gen6_ring_get_irq; 1861 if (INTEL_INFO(dev)->gen >= 8) {
1736 ring->irq_put = gen6_ring_put_irq; 1862 ring->flush = gen8_render_ring_flush;
1863 ring->irq_get = gen8_ring_get_irq;
1864 ring->irq_put = gen8_ring_put_irq;
1865 } else {
1866 ring->irq_get = gen6_ring_get_irq;
1867 ring->irq_put = gen6_ring_put_irq;
1868 }
1737 ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT; 1869 ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
1738 ring->get_seqno = gen6_ring_get_seqno; 1870 ring->get_seqno = gen6_ring_get_seqno;
1739 ring->set_seqno = ring_set_seqno; 1871 ring->set_seqno = ring_set_seqno;
@@ -1775,6 +1907,8 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
1775 ring->write_tail = ring_write_tail; 1907 ring->write_tail = ring_write_tail;
1776 if (IS_HASWELL(dev)) 1908 if (IS_HASWELL(dev))
1777 ring->dispatch_execbuffer = hsw_ring_dispatch_execbuffer; 1909 ring->dispatch_execbuffer = hsw_ring_dispatch_execbuffer;
1910 else if (IS_GEN8(dev))
1911 ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
1778 else if (INTEL_INFO(dev)->gen >= 6) 1912 else if (INTEL_INFO(dev)->gen >= 6)
1779 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; 1913 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
1780 else if (INTEL_INFO(dev)->gen >= 4) 1914 else if (INTEL_INFO(dev)->gen >= 4)
@@ -1888,7 +2022,7 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
1888 ring->id = VCS; 2022 ring->id = VCS;
1889 2023
1890 ring->write_tail = ring_write_tail; 2024 ring->write_tail = ring_write_tail;
1891 if (IS_GEN6(dev) || IS_GEN7(dev)) { 2025 if (INTEL_INFO(dev)->gen >= 6) {
1892 ring->mmio_base = GEN6_BSD_RING_BASE; 2026 ring->mmio_base = GEN6_BSD_RING_BASE;
1893 /* gen6 bsd needs a special wa for tail updates */ 2027 /* gen6 bsd needs a special wa for tail updates */
1894 if (IS_GEN6(dev)) 2028 if (IS_GEN6(dev))
@@ -1897,10 +2031,20 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
1897 ring->add_request = gen6_add_request; 2031 ring->add_request = gen6_add_request;
1898 ring->get_seqno = gen6_ring_get_seqno; 2032 ring->get_seqno = gen6_ring_get_seqno;
1899 ring->set_seqno = ring_set_seqno; 2033 ring->set_seqno = ring_set_seqno;
1900 ring->irq_enable_mask = GT_BSD_USER_INTERRUPT; 2034 if (INTEL_INFO(dev)->gen >= 8) {
1901 ring->irq_get = gen6_ring_get_irq; 2035 ring->irq_enable_mask =
1902 ring->irq_put = gen6_ring_put_irq; 2036 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
1903 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; 2037 ring->irq_get = gen8_ring_get_irq;
2038 ring->irq_put = gen8_ring_put_irq;
2039 ring->dispatch_execbuffer =
2040 gen8_ring_dispatch_execbuffer;
2041 } else {
2042 ring->irq_enable_mask = GT_BSD_USER_INTERRUPT;
2043 ring->irq_get = gen6_ring_get_irq;
2044 ring->irq_put = gen6_ring_put_irq;
2045 ring->dispatch_execbuffer =
2046 gen6_ring_dispatch_execbuffer;
2047 }
1904 ring->sync_to = gen6_ring_sync; 2048 ring->sync_to = gen6_ring_sync;
1905 ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_VR; 2049 ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_VR;
1906 ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_INVALID; 2050 ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_INVALID;
@@ -1946,10 +2090,18 @@ int intel_init_blt_ring_buffer(struct drm_device *dev)
1946 ring->add_request = gen6_add_request; 2090 ring->add_request = gen6_add_request;
1947 ring->get_seqno = gen6_ring_get_seqno; 2091 ring->get_seqno = gen6_ring_get_seqno;
1948 ring->set_seqno = ring_set_seqno; 2092 ring->set_seqno = ring_set_seqno;
1949 ring->irq_enable_mask = GT_BLT_USER_INTERRUPT; 2093 if (INTEL_INFO(dev)->gen >= 8) {
1950 ring->irq_get = gen6_ring_get_irq; 2094 ring->irq_enable_mask =
1951 ring->irq_put = gen6_ring_put_irq; 2095 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
1952 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; 2096 ring->irq_get = gen8_ring_get_irq;
2097 ring->irq_put = gen8_ring_put_irq;
2098 ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
2099 } else {
2100 ring->irq_enable_mask = GT_BLT_USER_INTERRUPT;
2101 ring->irq_get = gen6_ring_get_irq;
2102 ring->irq_put = gen6_ring_put_irq;
2103 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
2104 }
1953 ring->sync_to = gen6_ring_sync; 2105 ring->sync_to = gen6_ring_sync;
1954 ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_BR; 2106 ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_BR;
1955 ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_BV; 2107 ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_BV;
@@ -1978,10 +2130,19 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev)
1978 ring->add_request = gen6_add_request; 2130 ring->add_request = gen6_add_request;
1979 ring->get_seqno = gen6_ring_get_seqno; 2131 ring->get_seqno = gen6_ring_get_seqno;
1980 ring->set_seqno = ring_set_seqno; 2132 ring->set_seqno = ring_set_seqno;
1981 ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT; 2133
1982 ring->irq_get = hsw_vebox_get_irq; 2134 if (INTEL_INFO(dev)->gen >= 8) {
1983 ring->irq_put = hsw_vebox_put_irq; 2135 ring->irq_enable_mask =
1984 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; 2136 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT;
2137 ring->irq_get = gen8_ring_get_irq;
2138 ring->irq_put = gen8_ring_put_irq;
2139 ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
2140 } else {
2141 ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
2142 ring->irq_get = hsw_vebox_get_irq;
2143 ring->irq_put = hsw_vebox_put_irq;
2144 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
2145 }
1985 ring->sync_to = gen6_ring_sync; 2146 ring->sync_to = gen6_ring_sync;
1986 ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_VER; 2147 ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_VER;
1987 ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_VEV; 2148 ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_VEV;
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 8afaad6bcc48..b9fabf826f7d 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -260,14 +260,14 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
260 if (obj->tiling_mode != I915_TILING_NONE) 260 if (obj->tiling_mode != I915_TILING_NONE)
261 sprctl |= SPRITE_TILED; 261 sprctl |= SPRITE_TILED;
262 262
263 if (IS_HASWELL(dev)) 263 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
264 sprctl &= ~SPRITE_TRICKLE_FEED_DISABLE; 264 sprctl &= ~SPRITE_TRICKLE_FEED_DISABLE;
265 else 265 else
266 sprctl |= SPRITE_TRICKLE_FEED_DISABLE; 266 sprctl |= SPRITE_TRICKLE_FEED_DISABLE;
267 267
268 sprctl |= SPRITE_ENABLE; 268 sprctl |= SPRITE_ENABLE;
269 269
270 if (IS_HASWELL(dev)) 270 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
271 sprctl |= SPRITE_PIPE_CSC_ENABLE; 271 sprctl |= SPRITE_PIPE_CSC_ENABLE;
272 272
273 intel_update_sprite_watermarks(plane, crtc, src_w, pixel_size, true, 273 intel_update_sprite_watermarks(plane, crtc, src_w, pixel_size, true,
@@ -306,7 +306,7 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
306 306
307 /* HSW consolidates SPRTILEOFF and SPRLINOFF into a single SPROFFSET 307 /* HSW consolidates SPRTILEOFF and SPRLINOFF into a single SPROFFSET
308 * register */ 308 * register */
309 if (IS_HASWELL(dev)) 309 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
310 I915_WRITE(SPROFFSET(pipe), (y << 16) | x); 310 I915_WRITE(SPROFFSET(pipe), (y << 16) | x);
311 else if (obj->tiling_mode != I915_TILING_NONE) 311 else if (obj->tiling_mode != I915_TILING_NONE)
312 I915_WRITE(SPRTILEOFF(pipe), (y << 16) | x); 312 I915_WRITE(SPRTILEOFF(pipe), (y << 16) | x);
@@ -955,7 +955,7 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
955 955
956 obj = drm_mode_object_find(dev, set->plane_id, DRM_MODE_OBJECT_PLANE); 956 obj = drm_mode_object_find(dev, set->plane_id, DRM_MODE_OBJECT_PLANE);
957 if (!obj) { 957 if (!obj) {
958 ret = -EINVAL; 958 ret = -ENOENT;
959 goto out_unlock; 959 goto out_unlock;
960 } 960 }
961 961
@@ -984,7 +984,7 @@ int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
984 984
985 obj = drm_mode_object_find(dev, get->plane_id, DRM_MODE_OBJECT_PLANE); 985 obj = drm_mode_object_find(dev, get->plane_id, DRM_MODE_OBJECT_PLANE);
986 if (!obj) { 986 if (!obj) {
987 ret = -EINVAL; 987 ret = -ENOENT;
988 goto out_unlock; 988 goto out_unlock;
989 } 989 }
990 990
@@ -1092,6 +1092,7 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
1092 break; 1092 break;
1093 1093
1094 case 7: 1094 case 7:
1095 case 8:
1095 if (IS_IVYBRIDGE(dev)) { 1096 if (IS_IVYBRIDGE(dev)) {
1096 intel_plane->can_scale = true; 1097 intel_plane->can_scale = true;
1097 intel_plane->max_downscale = 2; 1098 intel_plane->max_downscale = 2;
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index a881906969eb..5103d80fc3ae 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -93,7 +93,7 @@ static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
93{ 93{
94 u32 forcewake_ack; 94 u32 forcewake_ack;
95 95
96 if (IS_HASWELL(dev_priv->dev)) 96 if (IS_HASWELL(dev_priv->dev) || IS_GEN8(dev_priv->dev))
97 forcewake_ack = FORCEWAKE_ACK_HSW; 97 forcewake_ack = FORCEWAKE_ACK_HSW;
98 else 98 else
99 forcewake_ack = FORCEWAKE_MT_ACK; 99 forcewake_ack = FORCEWAKE_MT_ACK;
@@ -112,7 +112,8 @@ static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
112 DRM_ERROR("Timed out waiting for forcewake to ack request.\n"); 112 DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
113 113
114 /* WaRsForcewakeWaitTC0:ivb,hsw */ 114 /* WaRsForcewakeWaitTC0:ivb,hsw */
115 __gen6_gt_wait_for_thread_c0(dev_priv); 115 if (INTEL_INFO(dev_priv->dev)->gen < 8)
116 __gen6_gt_wait_for_thread_c0(dev_priv);
116} 117}
117 118
118static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv) 119static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
@@ -459,6 +460,46 @@ hsw_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace)
459 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \ 460 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
460} 461}
461 462
463static const u32 gen8_shadowed_regs[] = {
464 FORCEWAKE_MT,
465 GEN6_RPNSWREQ,
466 GEN6_RC_VIDEO_FREQ,
467 RING_TAIL(RENDER_RING_BASE),
468 RING_TAIL(GEN6_BSD_RING_BASE),
469 RING_TAIL(VEBOX_RING_BASE),
470 RING_TAIL(BLT_RING_BASE),
471 /* TODO: Other registers are not yet used */
472};
473
474static bool is_gen8_shadowed(struct drm_i915_private *dev_priv, u32 reg)
475{
476 int i;
477 for (i = 0; i < ARRAY_SIZE(gen8_shadowed_regs); i++)
478 if (reg == gen8_shadowed_regs[i])
479 return true;
480
481 return false;
482}
483
484#define __gen8_write(x) \
485static void \
486gen8_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
487 bool __needs_put = !is_gen8_shadowed(dev_priv, reg); \
488 REG_WRITE_HEADER; \
489 if (__needs_put) { \
490 dev_priv->uncore.funcs.force_wake_get(dev_priv); \
491 } \
492 __raw_i915_write##x(dev_priv, reg, val); \
493 if (__needs_put) { \
494 dev_priv->uncore.funcs.force_wake_put(dev_priv); \
495 } \
496 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
497}
498
499__gen8_write(8)
500__gen8_write(16)
501__gen8_write(32)
502__gen8_write(64)
462__hsw_write(8) 503__hsw_write(8)
463__hsw_write(16) 504__hsw_write(16)
464__hsw_write(32) 505__hsw_write(32)
@@ -476,6 +517,7 @@ __gen4_write(16)
476__gen4_write(32) 517__gen4_write(32)
477__gen4_write(64) 518__gen4_write(64)
478 519
520#undef __gen8_write
479#undef __hsw_write 521#undef __hsw_write
480#undef __gen6_write 522#undef __gen6_write
481#undef __gen5_write 523#undef __gen5_write
@@ -492,7 +534,7 @@ void intel_uncore_init(struct drm_device *dev)
492 if (IS_VALLEYVIEW(dev)) { 534 if (IS_VALLEYVIEW(dev)) {
493 dev_priv->uncore.funcs.force_wake_get = vlv_force_wake_get; 535 dev_priv->uncore.funcs.force_wake_get = vlv_force_wake_get;
494 dev_priv->uncore.funcs.force_wake_put = vlv_force_wake_put; 536 dev_priv->uncore.funcs.force_wake_put = vlv_force_wake_put;
495 } else if (IS_HASWELL(dev)) { 537 } else if (IS_HASWELL(dev) || IS_GEN8(dev)) {
496 dev_priv->uncore.funcs.force_wake_get = __gen6_gt_force_wake_mt_get; 538 dev_priv->uncore.funcs.force_wake_get = __gen6_gt_force_wake_mt_get;
497 dev_priv->uncore.funcs.force_wake_put = __gen6_gt_force_wake_mt_put; 539 dev_priv->uncore.funcs.force_wake_put = __gen6_gt_force_wake_mt_put;
498 } else if (IS_IVYBRIDGE(dev)) { 540 } else if (IS_IVYBRIDGE(dev)) {
@@ -534,6 +576,16 @@ void intel_uncore_init(struct drm_device *dev)
534 } 576 }
535 577
536 switch (INTEL_INFO(dev)->gen) { 578 switch (INTEL_INFO(dev)->gen) {
579 default:
580 dev_priv->uncore.funcs.mmio_writeb = gen8_write8;
581 dev_priv->uncore.funcs.mmio_writew = gen8_write16;
582 dev_priv->uncore.funcs.mmio_writel = gen8_write32;
583 dev_priv->uncore.funcs.mmio_writeq = gen8_write64;
584 dev_priv->uncore.funcs.mmio_readb = gen6_read8;
585 dev_priv->uncore.funcs.mmio_readw = gen6_read16;
586 dev_priv->uncore.funcs.mmio_readl = gen6_read32;
587 dev_priv->uncore.funcs.mmio_readq = gen6_read64;
588 break;
537 case 7: 589 case 7:
538 case 6: 590 case 6:
539 if (IS_HASWELL(dev)) { 591 if (IS_HASWELL(dev)) {
@@ -767,6 +819,7 @@ static int gen6_do_reset(struct drm_device *dev)
767int intel_gpu_reset(struct drm_device *dev) 819int intel_gpu_reset(struct drm_device *dev)
768{ 820{
769 switch (INTEL_INFO(dev)->gen) { 821 switch (INTEL_INFO(dev)->gen) {
822 case 8:
770 case 7: 823 case 7:
771 case 6: return gen6_do_reset(dev); 824 case 6: return gen6_do_reset(dev);
772 case 5: return ironlake_do_reset(dev); 825 case 5: return ironlake_do_reset(dev);
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index 503a414cbdad..ee6ed633b7b1 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -765,8 +765,6 @@ static int mga_crtc_do_set_base(struct drm_crtc *crtc,
765 } 765 }
766 mgag200_bo_unreserve(bo); 766 mgag200_bo_unreserve(bo);
767 767
768 DRM_INFO("mga base %llx\n", gpu_addr);
769
770 mga_set_start_address(crtc, (u32)gpu_addr); 768 mga_set_start_address(crtc, (u32)gpu_addr);
771 769
772 return 0; 770 return 0;
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index e17914889e54..e5fa12b0d21e 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -21,6 +21,7 @@ msm-y := \
21 msm_drv.o \ 21 msm_drv.o \
22 msm_fb.o \ 22 msm_fb.o \
23 msm_gem.o \ 23 msm_gem.o \
24 msm_gem_prime.o \
24 msm_gem_submit.o \ 25 msm_gem_submit.o \
25 msm_gpu.o \ 26 msm_gpu.o \
26 msm_ringbuffer.o 27 msm_ringbuffer.o
diff --git a/drivers/gpu/drm/msm/adreno/a2xx.xml.h b/drivers/gpu/drm/msm/adreno/a2xx.xml.h
index 35463864b959..9588098741b5 100644
--- a/drivers/gpu/drm/msm/adreno/a2xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a2xx.xml.h
@@ -4,16 +4,16 @@
4/* Autogenerated file, DO NOT EDIT manually! 4/* Autogenerated file, DO NOT EDIT manually!
5 5
6This file was generated by the rules-ng-ng headergen tool in this git repository: 6This file was generated by the rules-ng-ng headergen tool in this git repository:
7http://0x04.net/cgit/index.cgi/rules-ng-ng 7http://github.com/freedreno/envytools/
8git clone git://0x04.net/rules-ng-ng 8git clone https://github.com/freedreno/envytools.git
9 9
10The rules-ng-ng source files this header was generated from are: 10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 327 bytes, from 2013-07-05 19:21:12) 11- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 327 bytes, from 2013-07-05 19:21:12)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
13- /home/robclark/src/freedreno/envytools/rnndb/a2xx/a2xx.xml ( 30005 bytes, from 2013-07-19 21:30:48) 13- /home/robclark/src/freedreno/envytools/rnndb/a2xx/a2xx.xml ( 31003 bytes, from 2013-09-19 18:50:16)
14- /home/robclark/src/freedreno/envytools/rnndb/adreno_common.xml ( 8983 bytes, from 2013-07-24 01:38:36) 14- /home/robclark/src/freedreno/envytools/rnndb/adreno_common.xml ( 8983 bytes, from 2013-07-24 01:38:36)
15- /home/robclark/src/freedreno/envytools/rnndb/adreno_pm4.xml ( 9712 bytes, from 2013-05-26 15:22:37) 15- /home/robclark/src/freedreno/envytools/rnndb/adreno_pm4.xml ( 9759 bytes, from 2013-09-10 00:52:33)
16- /home/robclark/src/freedreno/envytools/rnndb/a3xx/a3xx.xml ( 51415 bytes, from 2013-08-03 14:26:05) 16- /home/robclark/src/freedreno/envytools/rnndb/a3xx/a3xx.xml ( 51983 bytes, from 2013-09-10 00:52:32)
17 17
18Copyright (C) 2013 by the following authors: 18Copyright (C) 2013 by the following authors:
19- Rob Clark <robdclark@gmail.com> (robclark) 19- Rob Clark <robdclark@gmail.com> (robclark)
@@ -317,6 +317,38 @@ static inline uint32_t A2XX_RBBM_STATUS_CMDFIFO_AVAIL(uint32_t val)
317#define A2XX_RBBM_STATUS_RB_CNTX_BUSY 0x40000000 317#define A2XX_RBBM_STATUS_RB_CNTX_BUSY 0x40000000
318#define A2XX_RBBM_STATUS_GUI_ACTIVE 0x80000000 318#define A2XX_RBBM_STATUS_GUI_ACTIVE 0x80000000
319 319
320#define REG_A2XX_MH_ARBITER_CONFIG 0x00000a40
321#define A2XX_MH_ARBITER_CONFIG_SAME_PAGE_LIMIT__MASK 0x0000003f
322#define A2XX_MH_ARBITER_CONFIG_SAME_PAGE_LIMIT__SHIFT 0
323static inline uint32_t A2XX_MH_ARBITER_CONFIG_SAME_PAGE_LIMIT(uint32_t val)
324{
325 return ((val) << A2XX_MH_ARBITER_CONFIG_SAME_PAGE_LIMIT__SHIFT) & A2XX_MH_ARBITER_CONFIG_SAME_PAGE_LIMIT__MASK;
326}
327#define A2XX_MH_ARBITER_CONFIG_SAME_PAGE_GRANULARITY 0x00000040
328#define A2XX_MH_ARBITER_CONFIG_L1_ARB_ENABLE 0x00000080
329#define A2XX_MH_ARBITER_CONFIG_L1_ARB_HOLD_ENABLE 0x00000100
330#define A2XX_MH_ARBITER_CONFIG_L2_ARB_CONTROL 0x00000200
331#define A2XX_MH_ARBITER_CONFIG_PAGE_SIZE__MASK 0x00001c00
332#define A2XX_MH_ARBITER_CONFIG_PAGE_SIZE__SHIFT 10
333static inline uint32_t A2XX_MH_ARBITER_CONFIG_PAGE_SIZE(uint32_t val)
334{
335 return ((val) << A2XX_MH_ARBITER_CONFIG_PAGE_SIZE__SHIFT) & A2XX_MH_ARBITER_CONFIG_PAGE_SIZE__MASK;
336}
337#define A2XX_MH_ARBITER_CONFIG_TC_REORDER_ENABLE 0x00002000
338#define A2XX_MH_ARBITER_CONFIG_TC_ARB_HOLD_ENABLE 0x00004000
339#define A2XX_MH_ARBITER_CONFIG_IN_FLIGHT_LIMIT_ENABLE 0x00008000
340#define A2XX_MH_ARBITER_CONFIG_IN_FLIGHT_LIMIT__MASK 0x003f0000
341#define A2XX_MH_ARBITER_CONFIG_IN_FLIGHT_LIMIT__SHIFT 16
342static inline uint32_t A2XX_MH_ARBITER_CONFIG_IN_FLIGHT_LIMIT(uint32_t val)
343{
344 return ((val) << A2XX_MH_ARBITER_CONFIG_IN_FLIGHT_LIMIT__SHIFT) & A2XX_MH_ARBITER_CONFIG_IN_FLIGHT_LIMIT__MASK;
345}
346#define A2XX_MH_ARBITER_CONFIG_CP_CLNT_ENABLE 0x00400000
347#define A2XX_MH_ARBITER_CONFIG_VGT_CLNT_ENABLE 0x00800000
348#define A2XX_MH_ARBITER_CONFIG_TC_CLNT_ENABLE 0x01000000
349#define A2XX_MH_ARBITER_CONFIG_RB_CLNT_ENABLE 0x02000000
350#define A2XX_MH_ARBITER_CONFIG_PA_CLNT_ENABLE 0x04000000
351
320#define REG_A2XX_A220_VSC_BIN_SIZE 0x00000c01 352#define REG_A2XX_A220_VSC_BIN_SIZE 0x00000c01
321#define A2XX_A220_VSC_BIN_SIZE_WIDTH__MASK 0x0000001f 353#define A2XX_A220_VSC_BIN_SIZE_WIDTH__MASK 0x0000001f
322#define A2XX_A220_VSC_BIN_SIZE_WIDTH__SHIFT 0 354#define A2XX_A220_VSC_BIN_SIZE_WIDTH__SHIFT 0
diff --git a/drivers/gpu/drm/msm/adreno/a3xx.xml.h b/drivers/gpu/drm/msm/adreno/a3xx.xml.h
index d183516067b4..d4afdf657559 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a3xx.xml.h
@@ -4,16 +4,16 @@
4/* Autogenerated file, DO NOT EDIT manually! 4/* Autogenerated file, DO NOT EDIT manually!
5 5
6This file was generated by the rules-ng-ng headergen tool in this git repository: 6This file was generated by the rules-ng-ng headergen tool in this git repository:
7http://0x04.net/cgit/index.cgi/rules-ng-ng 7http://github.com/freedreno/envytools/
8git clone git://0x04.net/rules-ng-ng 8git clone https://github.com/freedreno/envytools.git
9 9
10The rules-ng-ng source files this header was generated from are: 10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 327 bytes, from 2013-07-05 19:21:12) 11- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 327 bytes, from 2013-07-05 19:21:12)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
13- /home/robclark/src/freedreno/envytools/rnndb/a2xx/a2xx.xml ( 30005 bytes, from 2013-07-19 21:30:48) 13- /home/robclark/src/freedreno/envytools/rnndb/a2xx/a2xx.xml ( 31003 bytes, from 2013-09-19 18:50:16)
14- /home/robclark/src/freedreno/envytools/rnndb/adreno_common.xml ( 8983 bytes, from 2013-07-24 01:38:36) 14- /home/robclark/src/freedreno/envytools/rnndb/adreno_common.xml ( 8983 bytes, from 2013-07-24 01:38:36)
15- /home/robclark/src/freedreno/envytools/rnndb/adreno_pm4.xml ( 9712 bytes, from 2013-05-26 15:22:37) 15- /home/robclark/src/freedreno/envytools/rnndb/adreno_pm4.xml ( 9759 bytes, from 2013-09-10 00:52:33)
16- /home/robclark/src/freedreno/envytools/rnndb/a3xx/a3xx.xml ( 51415 bytes, from 2013-08-03 14:26:05) 16- /home/robclark/src/freedreno/envytools/rnndb/a3xx/a3xx.xml ( 51983 bytes, from 2013-09-10 00:52:32)
17 17
18Copyright (C) 2013 by the following authors: 18Copyright (C) 2013 by the following authors:
19- Rob Clark <robdclark@gmail.com> (robclark) 19- Rob Clark <robdclark@gmail.com> (robclark)
@@ -637,11 +637,12 @@ static inline uint32_t A3XX_GRAS_SU_POLY_OFFSET_OFFSET(float val)
637#define REG_A3XX_GRAS_SU_MODE_CONTROL 0x00002070 637#define REG_A3XX_GRAS_SU_MODE_CONTROL 0x00002070
638#define A3XX_GRAS_SU_MODE_CONTROL_CULL_FRONT 0x00000001 638#define A3XX_GRAS_SU_MODE_CONTROL_CULL_FRONT 0x00000001
639#define A3XX_GRAS_SU_MODE_CONTROL_CULL_BACK 0x00000002 639#define A3XX_GRAS_SU_MODE_CONTROL_CULL_BACK 0x00000002
640#define A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__MASK 0x000007fc 640#define A3XX_GRAS_SU_MODE_CONTROL_FRONT_CW 0x00000004
641#define A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__SHIFT 2 641#define A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__MASK 0x000007f8
642static inline uint32_t A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH(uint32_t val) 642#define A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__SHIFT 3
643static inline uint32_t A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH(float val)
643{ 644{
644 return ((val) << A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__SHIFT) & A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__MASK; 645 return ((((uint32_t)(val * 4.0))) << A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__SHIFT) & A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__MASK;
645} 646}
646#define A3XX_GRAS_SU_MODE_CONTROL_POLY_OFFSET 0x00000800 647#define A3XX_GRAS_SU_MODE_CONTROL_POLY_OFFSET 0x00000800
647 648
@@ -745,6 +746,7 @@ static inline uint32_t A3XX_RB_RENDER_CONTROL_BIN_WIDTH(uint32_t val)
745} 746}
746#define A3XX_RB_RENDER_CONTROL_DISABLE_COLOR_PIPE 0x00001000 747#define A3XX_RB_RENDER_CONTROL_DISABLE_COLOR_PIPE 0x00001000
747#define A3XX_RB_RENDER_CONTROL_ENABLE_GMEM 0x00002000 748#define A3XX_RB_RENDER_CONTROL_ENABLE_GMEM 0x00002000
749#define A3XX_RB_RENDER_CONTROL_ALPHA_TEST 0x00400000
748#define A3XX_RB_RENDER_CONTROL_ALPHA_TEST_FUNC__MASK 0x07000000 750#define A3XX_RB_RENDER_CONTROL_ALPHA_TEST_FUNC__MASK 0x07000000
749#define A3XX_RB_RENDER_CONTROL_ALPHA_TEST_FUNC__SHIFT 24 751#define A3XX_RB_RENDER_CONTROL_ALPHA_TEST_FUNC__SHIFT 24
750static inline uint32_t A3XX_RB_RENDER_CONTROL_ALPHA_TEST_FUNC(enum adreno_compare_func val) 752static inline uint32_t A3XX_RB_RENDER_CONTROL_ALPHA_TEST_FUNC(enum adreno_compare_func val)
@@ -767,7 +769,19 @@ static inline uint32_t A3XX_RB_MSAA_CONTROL_SAMPLE_MASK(uint32_t val)
767 return ((val) << A3XX_RB_MSAA_CONTROL_SAMPLE_MASK__SHIFT) & A3XX_RB_MSAA_CONTROL_SAMPLE_MASK__MASK; 769 return ((val) << A3XX_RB_MSAA_CONTROL_SAMPLE_MASK__SHIFT) & A3XX_RB_MSAA_CONTROL_SAMPLE_MASK__MASK;
768} 770}
769 771
770#define REG_A3XX_UNKNOWN_20C3 0x000020c3 772#define REG_A3XX_RB_ALPHA_REF 0x000020c3
773#define A3XX_RB_ALPHA_REF_UINT__MASK 0x0000ff00
774#define A3XX_RB_ALPHA_REF_UINT__SHIFT 8
775static inline uint32_t A3XX_RB_ALPHA_REF_UINT(uint32_t val)
776{
777 return ((val) << A3XX_RB_ALPHA_REF_UINT__SHIFT) & A3XX_RB_ALPHA_REF_UINT__MASK;
778}
779#define A3XX_RB_ALPHA_REF_FLOAT__MASK 0xffff0000
780#define A3XX_RB_ALPHA_REF_FLOAT__SHIFT 16
781static inline uint32_t A3XX_RB_ALPHA_REF_FLOAT(float val)
782{
783 return ((util_float_to_half(val)) << A3XX_RB_ALPHA_REF_FLOAT__SHIFT) & A3XX_RB_ALPHA_REF_FLOAT__MASK;
784}
771 785
772static inline uint32_t REG_A3XX_RB_MRT(uint32_t i0) { return 0x000020c4 + 0x4*i0; } 786static inline uint32_t REG_A3XX_RB_MRT(uint32_t i0) { return 0x000020c4 + 0x4*i0; }
773 787
@@ -1002,7 +1016,7 @@ static inline uint32_t A3XX_RB_COPY_DEST_INFO_ENDIAN(enum adreno_rb_surface_endi
1002#define REG_A3XX_RB_DEPTH_CONTROL 0x00002100 1016#define REG_A3XX_RB_DEPTH_CONTROL 0x00002100
1003#define A3XX_RB_DEPTH_CONTROL_Z_ENABLE 0x00000002 1017#define A3XX_RB_DEPTH_CONTROL_Z_ENABLE 0x00000002
1004#define A3XX_RB_DEPTH_CONTROL_Z_WRITE_ENABLE 0x00000004 1018#define A3XX_RB_DEPTH_CONTROL_Z_WRITE_ENABLE 0x00000004
1005#define A3XX_RB_DEPTH_CONTROL_EARLY_Z_ENABLE 0x00000008 1019#define A3XX_RB_DEPTH_CONTROL_EARLY_Z_DISABLE 0x00000008
1006#define A3XX_RB_DEPTH_CONTROL_ZFUNC__MASK 0x00000070 1020#define A3XX_RB_DEPTH_CONTROL_ZFUNC__MASK 0x00000070
1007#define A3XX_RB_DEPTH_CONTROL_ZFUNC__SHIFT 4 1021#define A3XX_RB_DEPTH_CONTROL_ZFUNC__SHIFT 4
1008static inline uint32_t A3XX_RB_DEPTH_CONTROL_ZFUNC(enum adreno_compare_func val) 1022static inline uint32_t A3XX_RB_DEPTH_CONTROL_ZFUNC(enum adreno_compare_func val)
@@ -1038,7 +1052,8 @@ static inline uint32_t A3XX_RB_DEPTH_PITCH(uint32_t val)
1038 1052
1039#define REG_A3XX_RB_STENCIL_CONTROL 0x00002104 1053#define REG_A3XX_RB_STENCIL_CONTROL 0x00002104
1040#define A3XX_RB_STENCIL_CONTROL_STENCIL_ENABLE 0x00000001 1054#define A3XX_RB_STENCIL_CONTROL_STENCIL_ENABLE 0x00000001
1041#define A3XX_RB_STENCIL_CONTROL_STENCIL_ENABLE_BF 0x00000004 1055#define A3XX_RB_STENCIL_CONTROL_STENCIL_ENABLE_BF 0x00000002
1056#define A3XX_RB_STENCIL_CONTROL_STENCIL_READ 0x00000004
1042#define A3XX_RB_STENCIL_CONTROL_FUNC__MASK 0x00000700 1057#define A3XX_RB_STENCIL_CONTROL_FUNC__MASK 0x00000700
1043#define A3XX_RB_STENCIL_CONTROL_FUNC__SHIFT 8 1058#define A3XX_RB_STENCIL_CONTROL_FUNC__SHIFT 8
1044static inline uint32_t A3XX_RB_STENCIL_CONTROL_FUNC(enum adreno_compare_func val) 1059static inline uint32_t A3XX_RB_STENCIL_CONTROL_FUNC(enum adreno_compare_func val)
@@ -2074,6 +2089,7 @@ static inline uint32_t A3XX_UCHE_CACHE_INVALIDATE1_REG_OPCODE(enum a3xx_cache_op
2074#define REG_A3XX_TP_PERFCOUNTER5_SELECT 0x00000f09 2089#define REG_A3XX_TP_PERFCOUNTER5_SELECT 0x00000f09
2075 2090
2076#define REG_A3XX_TEX_SAMP_0 0x00000000 2091#define REG_A3XX_TEX_SAMP_0 0x00000000
2092#define A3XX_TEX_SAMP_0_MIPFILTER_LINEAR 0x00000002
2077#define A3XX_TEX_SAMP_0_XY_MAG__MASK 0x0000000c 2093#define A3XX_TEX_SAMP_0_XY_MAG__MASK 0x0000000c
2078#define A3XX_TEX_SAMP_0_XY_MAG__SHIFT 2 2094#define A3XX_TEX_SAMP_0_XY_MAG__SHIFT 2
2079static inline uint32_t A3XX_TEX_SAMP_0_XY_MAG(enum a3xx_tex_filter val) 2095static inline uint32_t A3XX_TEX_SAMP_0_XY_MAG(enum a3xx_tex_filter val)
@@ -2134,6 +2150,12 @@ static inline uint32_t A3XX_TEX_CONST_0_SWIZ_W(enum a3xx_tex_swiz val)
2134{ 2150{
2135 return ((val) << A3XX_TEX_CONST_0_SWIZ_W__SHIFT) & A3XX_TEX_CONST_0_SWIZ_W__MASK; 2151 return ((val) << A3XX_TEX_CONST_0_SWIZ_W__SHIFT) & A3XX_TEX_CONST_0_SWIZ_W__MASK;
2136} 2152}
2153#define A3XX_TEX_CONST_0_MIPLVLS__MASK 0x000f0000
2154#define A3XX_TEX_CONST_0_MIPLVLS__SHIFT 16
2155static inline uint32_t A3XX_TEX_CONST_0_MIPLVLS(uint32_t val)
2156{
2157 return ((val) << A3XX_TEX_CONST_0_MIPLVLS__SHIFT) & A3XX_TEX_CONST_0_MIPLVLS__MASK;
2158}
2137#define A3XX_TEX_CONST_0_FMT__MASK 0x1fc00000 2159#define A3XX_TEX_CONST_0_FMT__MASK 0x1fc00000
2138#define A3XX_TEX_CONST_0_FMT__SHIFT 22 2160#define A3XX_TEX_CONST_0_FMT__SHIFT 22
2139static inline uint32_t A3XX_TEX_CONST_0_FMT(enum a3xx_tex_fmt val) 2161static inline uint32_t A3XX_TEX_CONST_0_FMT(enum a3xx_tex_fmt val)
diff --git a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
index 61979d458ac0..33dcc606c7c5 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
@@ -4,16 +4,16 @@
4/* Autogenerated file, DO NOT EDIT manually! 4/* Autogenerated file, DO NOT EDIT manually!
5 5
6This file was generated by the rules-ng-ng headergen tool in this git repository: 6This file was generated by the rules-ng-ng headergen tool in this git repository:
7http://0x04.net/cgit/index.cgi/rules-ng-ng 7http://github.com/freedreno/envytools/
8git clone git://0x04.net/rules-ng-ng 8git clone https://github.com/freedreno/envytools.git
9 9
10The rules-ng-ng source files this header was generated from are: 10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 327 bytes, from 2013-07-05 19:21:12) 11- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 327 bytes, from 2013-07-05 19:21:12)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
13- /home/robclark/src/freedreno/envytools/rnndb/a2xx/a2xx.xml ( 30005 bytes, from 2013-07-19 21:30:48) 13- /home/robclark/src/freedreno/envytools/rnndb/a2xx/a2xx.xml ( 31003 bytes, from 2013-09-19 18:50:16)
14- /home/robclark/src/freedreno/envytools/rnndb/adreno_common.xml ( 8983 bytes, from 2013-07-24 01:38:36) 14- /home/robclark/src/freedreno/envytools/rnndb/adreno_common.xml ( 8983 bytes, from 2013-07-24 01:38:36)
15- /home/robclark/src/freedreno/envytools/rnndb/adreno_pm4.xml ( 9712 bytes, from 2013-05-26 15:22:37) 15- /home/robclark/src/freedreno/envytools/rnndb/adreno_pm4.xml ( 9759 bytes, from 2013-09-10 00:52:33)
16- /home/robclark/src/freedreno/envytools/rnndb/a3xx/a3xx.xml ( 51415 bytes, from 2013-08-03 14:26:05) 16- /home/robclark/src/freedreno/envytools/rnndb/a3xx/a3xx.xml ( 51983 bytes, from 2013-09-10 00:52:32)
17 17
18Copyright (C) 2013 by the following authors: 18Copyright (C) 2013 by the following authors:
19- Rob Clark <robdclark@gmail.com> (robclark) 19- Rob Clark <robdclark@gmail.com> (robclark)
diff --git a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
index 94c13f418e75..259ad709b0cc 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
@@ -4,16 +4,16 @@
4/* Autogenerated file, DO NOT EDIT manually! 4/* Autogenerated file, DO NOT EDIT manually!
5 5
6This file was generated by the rules-ng-ng headergen tool in this git repository: 6This file was generated by the rules-ng-ng headergen tool in this git repository:
7http://0x04.net/cgit/index.cgi/rules-ng-ng 7http://github.com/freedreno/envytools/
8git clone git://0x04.net/rules-ng-ng 8git clone https://github.com/freedreno/envytools.git
9 9
10The rules-ng-ng source files this header was generated from are: 10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 327 bytes, from 2013-07-05 19:21:12) 11- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 327 bytes, from 2013-07-05 19:21:12)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
13- /home/robclark/src/freedreno/envytools/rnndb/a2xx/a2xx.xml ( 30005 bytes, from 2013-07-19 21:30:48) 13- /home/robclark/src/freedreno/envytools/rnndb/a2xx/a2xx.xml ( 31003 bytes, from 2013-09-19 18:50:16)
14- /home/robclark/src/freedreno/envytools/rnndb/adreno_common.xml ( 8983 bytes, from 2013-07-24 01:38:36) 14- /home/robclark/src/freedreno/envytools/rnndb/adreno_common.xml ( 8983 bytes, from 2013-07-24 01:38:36)
15- /home/robclark/src/freedreno/envytools/rnndb/adreno_pm4.xml ( 9712 bytes, from 2013-05-26 15:22:37) 15- /home/robclark/src/freedreno/envytools/rnndb/adreno_pm4.xml ( 9759 bytes, from 2013-09-10 00:52:33)
16- /home/robclark/src/freedreno/envytools/rnndb/a3xx/a3xx.xml ( 51415 bytes, from 2013-08-03 14:26:05) 16- /home/robclark/src/freedreno/envytools/rnndb/a3xx/a3xx.xml ( 51983 bytes, from 2013-09-10 00:52:32)
17 17
18Copyright (C) 2013 by the following authors: 18Copyright (C) 2013 by the following authors:
19- Rob Clark <robdclark@gmail.com> (robclark) 19- Rob Clark <robdclark@gmail.com> (robclark)
diff --git a/drivers/gpu/drm/msm/dsi/dsi.xml.h b/drivers/gpu/drm/msm/dsi/dsi.xml.h
index 6f8396be431d..6d4c62bf70dc 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.xml.h
+++ b/drivers/gpu/drm/msm/dsi/dsi.xml.h
@@ -4,13 +4,13 @@
4/* Autogenerated file, DO NOT EDIT manually! 4/* Autogenerated file, DO NOT EDIT manually!
5 5
6This file was generated by the rules-ng-ng headergen tool in this git repository: 6This file was generated by the rules-ng-ng headergen tool in this git repository:
7http://0x04.net/cgit/index.cgi/rules-ng-ng 7http://github.com/freedreno/envytools/
8git clone git://0x04.net/rules-ng-ng 8git clone https://github.com/freedreno/envytools.git
9 9
10The rules-ng-ng source files this header was generated from are: 10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 595 bytes, from 2013-07-05 19:21:12) 11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 595 bytes, from 2013-07-05 19:21:12)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
13- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml ( 19332 bytes, from 2013-08-16 22:16:36) 13- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml ( 19332 bytes, from 2013-10-07 16:36:48)
14- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) 14- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
15- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) 15- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
16- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05) 16- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
diff --git a/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h b/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
index aefc1b8feae9..d1df38bf5747 100644
--- a/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
+++ b/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
@@ -4,13 +4,13 @@
4/* Autogenerated file, DO NOT EDIT manually! 4/* Autogenerated file, DO NOT EDIT manually!
5 5
6This file was generated by the rules-ng-ng headergen tool in this git repository: 6This file was generated by the rules-ng-ng headergen tool in this git repository:
7http://0x04.net/cgit/index.cgi/rules-ng-ng 7http://github.com/freedreno/envytools/
8git clone git://0x04.net/rules-ng-ng 8git clone https://github.com/freedreno/envytools.git
9 9
10The rules-ng-ng source files this header was generated from are: 10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 595 bytes, from 2013-07-05 19:21:12) 11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 595 bytes, from 2013-07-05 19:21:12)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
13- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml ( 19332 bytes, from 2013-08-16 22:16:36) 13- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml ( 19332 bytes, from 2013-10-07 16:36:48)
14- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) 14- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
15- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) 15- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
16- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05) 16- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
diff --git a/drivers/gpu/drm/msm/dsi/sfpb.xml.h b/drivers/gpu/drm/msm/dsi/sfpb.xml.h
index a225e8170b2a..0030a111302d 100644
--- a/drivers/gpu/drm/msm/dsi/sfpb.xml.h
+++ b/drivers/gpu/drm/msm/dsi/sfpb.xml.h
@@ -4,13 +4,13 @@
4/* Autogenerated file, DO NOT EDIT manually! 4/* Autogenerated file, DO NOT EDIT manually!
5 5
6This file was generated by the rules-ng-ng headergen tool in this git repository: 6This file was generated by the rules-ng-ng headergen tool in this git repository:
7http://0x04.net/cgit/index.cgi/rules-ng-ng 7http://github.com/freedreno/envytools/
8git clone git://0x04.net/rules-ng-ng 8git clone https://github.com/freedreno/envytools.git
9 9
10The rules-ng-ng source files this header was generated from are: 10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 595 bytes, from 2013-07-05 19:21:12) 11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 595 bytes, from 2013-07-05 19:21:12)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
13- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml ( 19332 bytes, from 2013-08-16 22:16:36) 13- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml ( 19332 bytes, from 2013-10-07 16:36:48)
14- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) 14- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
15- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) 15- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
16- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05) 16- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.xml.h b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
index f5fa4865e059..4e939f82918c 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
@@ -4,13 +4,13 @@
4/* Autogenerated file, DO NOT EDIT manually! 4/* Autogenerated file, DO NOT EDIT manually!
5 5
6This file was generated by the rules-ng-ng headergen tool in this git repository: 6This file was generated by the rules-ng-ng headergen tool in this git repository:
7http://0x04.net/cgit/index.cgi/rules-ng-ng 7http://github.com/freedreno/envytools/
8git clone git://0x04.net/rules-ng-ng 8git clone https://github.com/freedreno/envytools.git
9 9
10The rules-ng-ng source files this header was generated from are: 10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 595 bytes, from 2013-07-05 19:21:12) 11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 595 bytes, from 2013-07-05 19:21:12)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
13- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml ( 19332 bytes, from 2013-08-16 22:16:36) 13- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml ( 19332 bytes, from 2013-10-07 16:36:48)
14- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) 14- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
15- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) 15- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
16- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05) 16- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
diff --git a/drivers/gpu/drm/msm/hdmi/qfprom.xml.h b/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
index bee36363bcd0..dbde4f6339b9 100644
--- a/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
+++ b/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
@@ -4,13 +4,13 @@
4/* Autogenerated file, DO NOT EDIT manually! 4/* Autogenerated file, DO NOT EDIT manually!
5 5
6This file was generated by the rules-ng-ng headergen tool in this git repository: 6This file was generated by the rules-ng-ng headergen tool in this git repository:
7http://0x04.net/cgit/index.cgi/rules-ng-ng 7http://github.com/freedreno/envytools/
8git clone git://0x04.net/rules-ng-ng 8git clone https://github.com/freedreno/envytools.git
9 9
10The rules-ng-ng source files this header was generated from are: 10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 595 bytes, from 2013-07-05 19:21:12) 11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 595 bytes, from 2013-07-05 19:21:12)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
13- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml ( 19332 bytes, from 2013-08-16 22:16:36) 13- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml ( 19332 bytes, from 2013-10-07 16:36:48)
14- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) 14- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
15- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) 15- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
16- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05) 16- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4.xml.h b/drivers/gpu/drm/msm/mdp4/mdp4.xml.h
index bbeeebe2db55..9908ffe1c3ad 100644
--- a/drivers/gpu/drm/msm/mdp4/mdp4.xml.h
+++ b/drivers/gpu/drm/msm/mdp4/mdp4.xml.h
@@ -4,13 +4,13 @@
4/* Autogenerated file, DO NOT EDIT manually! 4/* Autogenerated file, DO NOT EDIT manually!
5 5
6This file was generated by the rules-ng-ng headergen tool in this git repository: 6This file was generated by the rules-ng-ng headergen tool in this git repository:
7http://0x04.net/cgit/index.cgi/rules-ng-ng 7http://github.com/freedreno/envytools/
8git clone git://0x04.net/rules-ng-ng 8git clone https://github.com/freedreno/envytools.git
9 9
10The rules-ng-ng source files this header was generated from are: 10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 595 bytes, from 2013-07-05 19:21:12) 11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 595 bytes, from 2013-07-05 19:21:12)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
13- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml ( 19332 bytes, from 2013-08-16 22:16:36) 13- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml ( 19332 bytes, from 2013-10-07 16:36:48)
14- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) 14- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
15- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) 15- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
16- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05) 16- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
@@ -42,28 +42,28 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
42*/ 42*/
43 43
44 44
45enum mpd4_bpc { 45enum mdp4_bpc {
46 BPC1 = 0, 46 BPC1 = 0,
47 BPC5 = 1, 47 BPC5 = 1,
48 BPC6 = 2, 48 BPC6 = 2,
49 BPC8 = 3, 49 BPC8 = 3,
50}; 50};
51 51
52enum mpd4_bpc_alpha { 52enum mdp4_bpc_alpha {
53 BPC1A = 0, 53 BPC1A = 0,
54 BPC4A = 1, 54 BPC4A = 1,
55 BPC6A = 2, 55 BPC6A = 2,
56 BPC8A = 3, 56 BPC8A = 3,
57}; 57};
58 58
59enum mpd4_alpha_type { 59enum mdp4_alpha_type {
60 FG_CONST = 0, 60 FG_CONST = 0,
61 BG_CONST = 1, 61 BG_CONST = 1,
62 FG_PIXEL = 2, 62 FG_PIXEL = 2,
63 BG_PIXEL = 3, 63 BG_PIXEL = 3,
64}; 64};
65 65
66enum mpd4_pipe { 66enum mdp4_pipe {
67 VG1 = 0, 67 VG1 = 0,
68 VG2 = 1, 68 VG2 = 1,
69 RGB1 = 2, 69 RGB1 = 2,
@@ -73,13 +73,13 @@ enum mpd4_pipe {
73 VG4 = 6, 73 VG4 = 6,
74}; 74};
75 75
76enum mpd4_mixer { 76enum mdp4_mixer {
77 MIXER0 = 0, 77 MIXER0 = 0,
78 MIXER1 = 1, 78 MIXER1 = 1,
79 MIXER2 = 2, 79 MIXER2 = 2,
80}; 80};
81 81
82enum mpd4_mixer_stage_id { 82enum mdp4_mixer_stage_id {
83 STAGE_UNUSED = 0, 83 STAGE_UNUSED = 0,
84 STAGE_BASE = 1, 84 STAGE_BASE = 1,
85 STAGE0 = 2, 85 STAGE0 = 2,
@@ -194,56 +194,56 @@ static inline uint32_t MDP4_DISP_INTF_SEL_EXT(enum mdp4_intf val)
194#define REG_MDP4_LAYERMIXER2_IN_CFG 0x000100f0 194#define REG_MDP4_LAYERMIXER2_IN_CFG 0x000100f0
195#define MDP4_LAYERMIXER2_IN_CFG_PIPE0__MASK 0x00000007 195#define MDP4_LAYERMIXER2_IN_CFG_PIPE0__MASK 0x00000007
196#define MDP4_LAYERMIXER2_IN_CFG_PIPE0__SHIFT 0 196#define MDP4_LAYERMIXER2_IN_CFG_PIPE0__SHIFT 0
197static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE0(enum mpd4_mixer_stage_id val) 197static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE0(enum mdp4_mixer_stage_id val)
198{ 198{
199 return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE0__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE0__MASK; 199 return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE0__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE0__MASK;
200} 200}
201#define MDP4_LAYERMIXER2_IN_CFG_PIPE0_MIXER1 0x00000008 201#define MDP4_LAYERMIXER2_IN_CFG_PIPE0_MIXER1 0x00000008
202#define MDP4_LAYERMIXER2_IN_CFG_PIPE1__MASK 0x00000070 202#define MDP4_LAYERMIXER2_IN_CFG_PIPE1__MASK 0x00000070
203#define MDP4_LAYERMIXER2_IN_CFG_PIPE1__SHIFT 4 203#define MDP4_LAYERMIXER2_IN_CFG_PIPE1__SHIFT 4
204static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE1(enum mpd4_mixer_stage_id val) 204static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE1(enum mdp4_mixer_stage_id val)
205{ 205{
206 return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE1__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE1__MASK; 206 return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE1__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE1__MASK;
207} 207}
208#define MDP4_LAYERMIXER2_IN_CFG_PIPE1_MIXER1 0x00000080 208#define MDP4_LAYERMIXER2_IN_CFG_PIPE1_MIXER1 0x00000080
209#define MDP4_LAYERMIXER2_IN_CFG_PIPE2__MASK 0x00000700 209#define MDP4_LAYERMIXER2_IN_CFG_PIPE2__MASK 0x00000700
210#define MDP4_LAYERMIXER2_IN_CFG_PIPE2__SHIFT 8 210#define MDP4_LAYERMIXER2_IN_CFG_PIPE2__SHIFT 8
211static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE2(enum mpd4_mixer_stage_id val) 211static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE2(enum mdp4_mixer_stage_id val)
212{ 212{
213 return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE2__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE2__MASK; 213 return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE2__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE2__MASK;
214} 214}
215#define MDP4_LAYERMIXER2_IN_CFG_PIPE2_MIXER1 0x00000800 215#define MDP4_LAYERMIXER2_IN_CFG_PIPE2_MIXER1 0x00000800
216#define MDP4_LAYERMIXER2_IN_CFG_PIPE3__MASK 0x00007000 216#define MDP4_LAYERMIXER2_IN_CFG_PIPE3__MASK 0x00007000
217#define MDP4_LAYERMIXER2_IN_CFG_PIPE3__SHIFT 12 217#define MDP4_LAYERMIXER2_IN_CFG_PIPE3__SHIFT 12
218static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE3(enum mpd4_mixer_stage_id val) 218static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE3(enum mdp4_mixer_stage_id val)
219{ 219{
220 return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE3__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE3__MASK; 220 return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE3__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE3__MASK;
221} 221}
222#define MDP4_LAYERMIXER2_IN_CFG_PIPE3_MIXER1 0x00008000 222#define MDP4_LAYERMIXER2_IN_CFG_PIPE3_MIXER1 0x00008000
223#define MDP4_LAYERMIXER2_IN_CFG_PIPE4__MASK 0x00070000 223#define MDP4_LAYERMIXER2_IN_CFG_PIPE4__MASK 0x00070000
224#define MDP4_LAYERMIXER2_IN_CFG_PIPE4__SHIFT 16 224#define MDP4_LAYERMIXER2_IN_CFG_PIPE4__SHIFT 16
225static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE4(enum mpd4_mixer_stage_id val) 225static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE4(enum mdp4_mixer_stage_id val)
226{ 226{
227 return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE4__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE4__MASK; 227 return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE4__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE4__MASK;
228} 228}
229#define MDP4_LAYERMIXER2_IN_CFG_PIPE4_MIXER1 0x00080000 229#define MDP4_LAYERMIXER2_IN_CFG_PIPE4_MIXER1 0x00080000
230#define MDP4_LAYERMIXER2_IN_CFG_PIPE5__MASK 0x00700000 230#define MDP4_LAYERMIXER2_IN_CFG_PIPE5__MASK 0x00700000
231#define MDP4_LAYERMIXER2_IN_CFG_PIPE5__SHIFT 20 231#define MDP4_LAYERMIXER2_IN_CFG_PIPE5__SHIFT 20
232static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE5(enum mpd4_mixer_stage_id val) 232static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE5(enum mdp4_mixer_stage_id val)
233{ 233{
234 return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE5__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE5__MASK; 234 return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE5__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE5__MASK;
235} 235}
236#define MDP4_LAYERMIXER2_IN_CFG_PIPE5_MIXER1 0x00800000 236#define MDP4_LAYERMIXER2_IN_CFG_PIPE5_MIXER1 0x00800000
237#define MDP4_LAYERMIXER2_IN_CFG_PIPE6__MASK 0x07000000 237#define MDP4_LAYERMIXER2_IN_CFG_PIPE6__MASK 0x07000000
238#define MDP4_LAYERMIXER2_IN_CFG_PIPE6__SHIFT 24 238#define MDP4_LAYERMIXER2_IN_CFG_PIPE6__SHIFT 24
239static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE6(enum mpd4_mixer_stage_id val) 239static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE6(enum mdp4_mixer_stage_id val)
240{ 240{
241 return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE6__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE6__MASK; 241 return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE6__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE6__MASK;
242} 242}
243#define MDP4_LAYERMIXER2_IN_CFG_PIPE6_MIXER1 0x08000000 243#define MDP4_LAYERMIXER2_IN_CFG_PIPE6_MIXER1 0x08000000
244#define MDP4_LAYERMIXER2_IN_CFG_PIPE7__MASK 0x70000000 244#define MDP4_LAYERMIXER2_IN_CFG_PIPE7__MASK 0x70000000
245#define MDP4_LAYERMIXER2_IN_CFG_PIPE7__SHIFT 28 245#define MDP4_LAYERMIXER2_IN_CFG_PIPE7__SHIFT 28
246static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE7(enum mpd4_mixer_stage_id val) 246static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE7(enum mdp4_mixer_stage_id val)
247{ 247{
248 return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE7__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE7__MASK; 248 return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE7__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE7__MASK;
249} 249}
@@ -254,56 +254,56 @@ static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE7(enum mpd4_mixer_stage_id va
254#define REG_MDP4_LAYERMIXER_IN_CFG 0x00010100 254#define REG_MDP4_LAYERMIXER_IN_CFG 0x00010100
255#define MDP4_LAYERMIXER_IN_CFG_PIPE0__MASK 0x00000007 255#define MDP4_LAYERMIXER_IN_CFG_PIPE0__MASK 0x00000007
256#define MDP4_LAYERMIXER_IN_CFG_PIPE0__SHIFT 0 256#define MDP4_LAYERMIXER_IN_CFG_PIPE0__SHIFT 0
257static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE0(enum mpd4_mixer_stage_id val) 257static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE0(enum mdp4_mixer_stage_id val)
258{ 258{
259 return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE0__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE0__MASK; 259 return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE0__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE0__MASK;
260} 260}
261#define MDP4_LAYERMIXER_IN_CFG_PIPE0_MIXER1 0x00000008 261#define MDP4_LAYERMIXER_IN_CFG_PIPE0_MIXER1 0x00000008
262#define MDP4_LAYERMIXER_IN_CFG_PIPE1__MASK 0x00000070 262#define MDP4_LAYERMIXER_IN_CFG_PIPE1__MASK 0x00000070
263#define MDP4_LAYERMIXER_IN_CFG_PIPE1__SHIFT 4 263#define MDP4_LAYERMIXER_IN_CFG_PIPE1__SHIFT 4
264static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE1(enum mpd4_mixer_stage_id val) 264static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE1(enum mdp4_mixer_stage_id val)
265{ 265{
266 return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE1__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE1__MASK; 266 return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE1__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE1__MASK;
267} 267}
268#define MDP4_LAYERMIXER_IN_CFG_PIPE1_MIXER1 0x00000080 268#define MDP4_LAYERMIXER_IN_CFG_PIPE1_MIXER1 0x00000080
269#define MDP4_LAYERMIXER_IN_CFG_PIPE2__MASK 0x00000700 269#define MDP4_LAYERMIXER_IN_CFG_PIPE2__MASK 0x00000700
270#define MDP4_LAYERMIXER_IN_CFG_PIPE2__SHIFT 8 270#define MDP4_LAYERMIXER_IN_CFG_PIPE2__SHIFT 8
271static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE2(enum mpd4_mixer_stage_id val) 271static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE2(enum mdp4_mixer_stage_id val)
272{ 272{
273 return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE2__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE2__MASK; 273 return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE2__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE2__MASK;
274} 274}
275#define MDP4_LAYERMIXER_IN_CFG_PIPE2_MIXER1 0x00000800 275#define MDP4_LAYERMIXER_IN_CFG_PIPE2_MIXER1 0x00000800
276#define MDP4_LAYERMIXER_IN_CFG_PIPE3__MASK 0x00007000 276#define MDP4_LAYERMIXER_IN_CFG_PIPE3__MASK 0x00007000
277#define MDP4_LAYERMIXER_IN_CFG_PIPE3__SHIFT 12 277#define MDP4_LAYERMIXER_IN_CFG_PIPE3__SHIFT 12
278static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE3(enum mpd4_mixer_stage_id val) 278static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE3(enum mdp4_mixer_stage_id val)
279{ 279{
280 return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE3__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE3__MASK; 280 return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE3__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE3__MASK;
281} 281}
282#define MDP4_LAYERMIXER_IN_CFG_PIPE3_MIXER1 0x00008000 282#define MDP4_LAYERMIXER_IN_CFG_PIPE3_MIXER1 0x00008000
283#define MDP4_LAYERMIXER_IN_CFG_PIPE4__MASK 0x00070000 283#define MDP4_LAYERMIXER_IN_CFG_PIPE4__MASK 0x00070000
284#define MDP4_LAYERMIXER_IN_CFG_PIPE4__SHIFT 16 284#define MDP4_LAYERMIXER_IN_CFG_PIPE4__SHIFT 16
285static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE4(enum mpd4_mixer_stage_id val) 285static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE4(enum mdp4_mixer_stage_id val)
286{ 286{
287 return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE4__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE4__MASK; 287 return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE4__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE4__MASK;
288} 288}
289#define MDP4_LAYERMIXER_IN_CFG_PIPE4_MIXER1 0x00080000 289#define MDP4_LAYERMIXER_IN_CFG_PIPE4_MIXER1 0x00080000
290#define MDP4_LAYERMIXER_IN_CFG_PIPE5__MASK 0x00700000 290#define MDP4_LAYERMIXER_IN_CFG_PIPE5__MASK 0x00700000
291#define MDP4_LAYERMIXER_IN_CFG_PIPE5__SHIFT 20 291#define MDP4_LAYERMIXER_IN_CFG_PIPE5__SHIFT 20
292static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE5(enum mpd4_mixer_stage_id val) 292static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE5(enum mdp4_mixer_stage_id val)
293{ 293{
294 return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE5__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE5__MASK; 294 return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE5__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE5__MASK;
295} 295}
296#define MDP4_LAYERMIXER_IN_CFG_PIPE5_MIXER1 0x00800000 296#define MDP4_LAYERMIXER_IN_CFG_PIPE5_MIXER1 0x00800000
297#define MDP4_LAYERMIXER_IN_CFG_PIPE6__MASK 0x07000000 297#define MDP4_LAYERMIXER_IN_CFG_PIPE6__MASK 0x07000000
298#define MDP4_LAYERMIXER_IN_CFG_PIPE6__SHIFT 24 298#define MDP4_LAYERMIXER_IN_CFG_PIPE6__SHIFT 24
299static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE6(enum mpd4_mixer_stage_id val) 299static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE6(enum mdp4_mixer_stage_id val)
300{ 300{
301 return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE6__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE6__MASK; 301 return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE6__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE6__MASK;
302} 302}
303#define MDP4_LAYERMIXER_IN_CFG_PIPE6_MIXER1 0x08000000 303#define MDP4_LAYERMIXER_IN_CFG_PIPE6_MIXER1 0x08000000
304#define MDP4_LAYERMIXER_IN_CFG_PIPE7__MASK 0x70000000 304#define MDP4_LAYERMIXER_IN_CFG_PIPE7__MASK 0x70000000
305#define MDP4_LAYERMIXER_IN_CFG_PIPE7__SHIFT 28 305#define MDP4_LAYERMIXER_IN_CFG_PIPE7__SHIFT 28
306static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE7(enum mpd4_mixer_stage_id val) 306static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE7(enum mdp4_mixer_stage_id val)
307{ 307{
308 return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE7__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE7__MASK; 308 return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE7__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE7__MASK;
309} 309}
@@ -369,7 +369,7 @@ static inline uint32_t REG_MDP4_OVLP_STAGE(uint32_t i0, uint32_t i1) { return 0x
369static inline uint32_t REG_MDP4_OVLP_STAGE_OP(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_OVLP(i0) + __offset_STAGE(i1); } 369static inline uint32_t REG_MDP4_OVLP_STAGE_OP(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_OVLP(i0) + __offset_STAGE(i1); }
370#define MDP4_OVLP_STAGE_OP_FG_ALPHA__MASK 0x00000003 370#define MDP4_OVLP_STAGE_OP_FG_ALPHA__MASK 0x00000003
371#define MDP4_OVLP_STAGE_OP_FG_ALPHA__SHIFT 0 371#define MDP4_OVLP_STAGE_OP_FG_ALPHA__SHIFT 0
372static inline uint32_t MDP4_OVLP_STAGE_OP_FG_ALPHA(enum mpd4_alpha_type val) 372static inline uint32_t MDP4_OVLP_STAGE_OP_FG_ALPHA(enum mdp4_alpha_type val)
373{ 373{
374 return ((val) << MDP4_OVLP_STAGE_OP_FG_ALPHA__SHIFT) & MDP4_OVLP_STAGE_OP_FG_ALPHA__MASK; 374 return ((val) << MDP4_OVLP_STAGE_OP_FG_ALPHA__SHIFT) & MDP4_OVLP_STAGE_OP_FG_ALPHA__MASK;
375} 375}
@@ -377,7 +377,7 @@ static inline uint32_t MDP4_OVLP_STAGE_OP_FG_ALPHA(enum mpd4_alpha_type val)
377#define MDP4_OVLP_STAGE_OP_FG_MOD_ALPHA 0x00000008 377#define MDP4_OVLP_STAGE_OP_FG_MOD_ALPHA 0x00000008
378#define MDP4_OVLP_STAGE_OP_BG_ALPHA__MASK 0x00000030 378#define MDP4_OVLP_STAGE_OP_BG_ALPHA__MASK 0x00000030
379#define MDP4_OVLP_STAGE_OP_BG_ALPHA__SHIFT 4 379#define MDP4_OVLP_STAGE_OP_BG_ALPHA__SHIFT 4
380static inline uint32_t MDP4_OVLP_STAGE_OP_BG_ALPHA(enum mpd4_alpha_type val) 380static inline uint32_t MDP4_OVLP_STAGE_OP_BG_ALPHA(enum mdp4_alpha_type val)
381{ 381{
382 return ((val) << MDP4_OVLP_STAGE_OP_BG_ALPHA__SHIFT) & MDP4_OVLP_STAGE_OP_BG_ALPHA__MASK; 382 return ((val) << MDP4_OVLP_STAGE_OP_BG_ALPHA__SHIFT) & MDP4_OVLP_STAGE_OP_BG_ALPHA__MASK;
383} 383}
@@ -472,19 +472,19 @@ static inline uint32_t REG_MDP4_DMA(enum mdp4_dma i0) { return 0x00000000 + __of
472static inline uint32_t REG_MDP4_DMA_CONFIG(enum mdp4_dma i0) { return 0x00000000 + __offset_DMA(i0); } 472static inline uint32_t REG_MDP4_DMA_CONFIG(enum mdp4_dma i0) { return 0x00000000 + __offset_DMA(i0); }
473#define MDP4_DMA_CONFIG_G_BPC__MASK 0x00000003 473#define MDP4_DMA_CONFIG_G_BPC__MASK 0x00000003
474#define MDP4_DMA_CONFIG_G_BPC__SHIFT 0 474#define MDP4_DMA_CONFIG_G_BPC__SHIFT 0
475static inline uint32_t MDP4_DMA_CONFIG_G_BPC(enum mpd4_bpc val) 475static inline uint32_t MDP4_DMA_CONFIG_G_BPC(enum mdp4_bpc val)
476{ 476{
477 return ((val) << MDP4_DMA_CONFIG_G_BPC__SHIFT) & MDP4_DMA_CONFIG_G_BPC__MASK; 477 return ((val) << MDP4_DMA_CONFIG_G_BPC__SHIFT) & MDP4_DMA_CONFIG_G_BPC__MASK;
478} 478}
479#define MDP4_DMA_CONFIG_B_BPC__MASK 0x0000000c 479#define MDP4_DMA_CONFIG_B_BPC__MASK 0x0000000c
480#define MDP4_DMA_CONFIG_B_BPC__SHIFT 2 480#define MDP4_DMA_CONFIG_B_BPC__SHIFT 2
481static inline uint32_t MDP4_DMA_CONFIG_B_BPC(enum mpd4_bpc val) 481static inline uint32_t MDP4_DMA_CONFIG_B_BPC(enum mdp4_bpc val)
482{ 482{
483 return ((val) << MDP4_DMA_CONFIG_B_BPC__SHIFT) & MDP4_DMA_CONFIG_B_BPC__MASK; 483 return ((val) << MDP4_DMA_CONFIG_B_BPC__SHIFT) & MDP4_DMA_CONFIG_B_BPC__MASK;
484} 484}
485#define MDP4_DMA_CONFIG_R_BPC__MASK 0x00000030 485#define MDP4_DMA_CONFIG_R_BPC__MASK 0x00000030
486#define MDP4_DMA_CONFIG_R_BPC__SHIFT 4 486#define MDP4_DMA_CONFIG_R_BPC__SHIFT 4
487static inline uint32_t MDP4_DMA_CONFIG_R_BPC(enum mpd4_bpc val) 487static inline uint32_t MDP4_DMA_CONFIG_R_BPC(enum mdp4_bpc val)
488{ 488{
489 return ((val) << MDP4_DMA_CONFIG_R_BPC__SHIFT) & MDP4_DMA_CONFIG_R_BPC__MASK; 489 return ((val) << MDP4_DMA_CONFIG_R_BPC__SHIFT) & MDP4_DMA_CONFIG_R_BPC__MASK;
490} 490}
@@ -601,9 +601,9 @@ static inline uint32_t REG_MDP4_DMA_CSC_POST_LV(enum mdp4_dma i0, uint32_t i1) {
601 601
602static inline uint32_t REG_MDP4_DMA_CSC_POST_LV_VAL(enum mdp4_dma i0, uint32_t i1) { return 0x00003680 + __offset_DMA(i0) + 0x4*i1; } 602static inline uint32_t REG_MDP4_DMA_CSC_POST_LV_VAL(enum mdp4_dma i0, uint32_t i1) { return 0x00003680 + __offset_DMA(i0) + 0x4*i1; }
603 603
604static inline uint32_t REG_MDP4_PIPE(enum mpd4_pipe i0) { return 0x00020000 + 0x10000*i0; } 604static inline uint32_t REG_MDP4_PIPE(enum mdp4_pipe i0) { return 0x00020000 + 0x10000*i0; }
605 605
606static inline uint32_t REG_MDP4_PIPE_SRC_SIZE(enum mpd4_pipe i0) { return 0x00020000 + 0x10000*i0; } 606static inline uint32_t REG_MDP4_PIPE_SRC_SIZE(enum mdp4_pipe i0) { return 0x00020000 + 0x10000*i0; }
607#define MDP4_PIPE_SRC_SIZE_HEIGHT__MASK 0xffff0000 607#define MDP4_PIPE_SRC_SIZE_HEIGHT__MASK 0xffff0000
608#define MDP4_PIPE_SRC_SIZE_HEIGHT__SHIFT 16 608#define MDP4_PIPE_SRC_SIZE_HEIGHT__SHIFT 16
609static inline uint32_t MDP4_PIPE_SRC_SIZE_HEIGHT(uint32_t val) 609static inline uint32_t MDP4_PIPE_SRC_SIZE_HEIGHT(uint32_t val)
@@ -617,7 +617,7 @@ static inline uint32_t MDP4_PIPE_SRC_SIZE_WIDTH(uint32_t val)
617 return ((val) << MDP4_PIPE_SRC_SIZE_WIDTH__SHIFT) & MDP4_PIPE_SRC_SIZE_WIDTH__MASK; 617 return ((val) << MDP4_PIPE_SRC_SIZE_WIDTH__SHIFT) & MDP4_PIPE_SRC_SIZE_WIDTH__MASK;
618} 618}
619 619
620static inline uint32_t REG_MDP4_PIPE_SRC_XY(enum mpd4_pipe i0) { return 0x00020004 + 0x10000*i0; } 620static inline uint32_t REG_MDP4_PIPE_SRC_XY(enum mdp4_pipe i0) { return 0x00020004 + 0x10000*i0; }
621#define MDP4_PIPE_SRC_XY_Y__MASK 0xffff0000 621#define MDP4_PIPE_SRC_XY_Y__MASK 0xffff0000
622#define MDP4_PIPE_SRC_XY_Y__SHIFT 16 622#define MDP4_PIPE_SRC_XY_Y__SHIFT 16
623static inline uint32_t MDP4_PIPE_SRC_XY_Y(uint32_t val) 623static inline uint32_t MDP4_PIPE_SRC_XY_Y(uint32_t val)
@@ -631,7 +631,7 @@ static inline uint32_t MDP4_PIPE_SRC_XY_X(uint32_t val)
631 return ((val) << MDP4_PIPE_SRC_XY_X__SHIFT) & MDP4_PIPE_SRC_XY_X__MASK; 631 return ((val) << MDP4_PIPE_SRC_XY_X__SHIFT) & MDP4_PIPE_SRC_XY_X__MASK;
632} 632}
633 633
634static inline uint32_t REG_MDP4_PIPE_DST_SIZE(enum mpd4_pipe i0) { return 0x00020008 + 0x10000*i0; } 634static inline uint32_t REG_MDP4_PIPE_DST_SIZE(enum mdp4_pipe i0) { return 0x00020008 + 0x10000*i0; }
635#define MDP4_PIPE_DST_SIZE_HEIGHT__MASK 0xffff0000 635#define MDP4_PIPE_DST_SIZE_HEIGHT__MASK 0xffff0000
636#define MDP4_PIPE_DST_SIZE_HEIGHT__SHIFT 16 636#define MDP4_PIPE_DST_SIZE_HEIGHT__SHIFT 16
637static inline uint32_t MDP4_PIPE_DST_SIZE_HEIGHT(uint32_t val) 637static inline uint32_t MDP4_PIPE_DST_SIZE_HEIGHT(uint32_t val)
@@ -645,7 +645,7 @@ static inline uint32_t MDP4_PIPE_DST_SIZE_WIDTH(uint32_t val)
645 return ((val) << MDP4_PIPE_DST_SIZE_WIDTH__SHIFT) & MDP4_PIPE_DST_SIZE_WIDTH__MASK; 645 return ((val) << MDP4_PIPE_DST_SIZE_WIDTH__SHIFT) & MDP4_PIPE_DST_SIZE_WIDTH__MASK;
646} 646}
647 647
648static inline uint32_t REG_MDP4_PIPE_DST_XY(enum mpd4_pipe i0) { return 0x0002000c + 0x10000*i0; } 648static inline uint32_t REG_MDP4_PIPE_DST_XY(enum mdp4_pipe i0) { return 0x0002000c + 0x10000*i0; }
649#define MDP4_PIPE_DST_XY_Y__MASK 0xffff0000 649#define MDP4_PIPE_DST_XY_Y__MASK 0xffff0000
650#define MDP4_PIPE_DST_XY_Y__SHIFT 16 650#define MDP4_PIPE_DST_XY_Y__SHIFT 16
651static inline uint32_t MDP4_PIPE_DST_XY_Y(uint32_t val) 651static inline uint32_t MDP4_PIPE_DST_XY_Y(uint32_t val)
@@ -659,13 +659,13 @@ static inline uint32_t MDP4_PIPE_DST_XY_X(uint32_t val)
659 return ((val) << MDP4_PIPE_DST_XY_X__SHIFT) & MDP4_PIPE_DST_XY_X__MASK; 659 return ((val) << MDP4_PIPE_DST_XY_X__SHIFT) & MDP4_PIPE_DST_XY_X__MASK;
660} 660}
661 661
662static inline uint32_t REG_MDP4_PIPE_SRCP0_BASE(enum mpd4_pipe i0) { return 0x00020010 + 0x10000*i0; } 662static inline uint32_t REG_MDP4_PIPE_SRCP0_BASE(enum mdp4_pipe i0) { return 0x00020010 + 0x10000*i0; }
663 663
664static inline uint32_t REG_MDP4_PIPE_SRCP1_BASE(enum mpd4_pipe i0) { return 0x00020014 + 0x10000*i0; } 664static inline uint32_t REG_MDP4_PIPE_SRCP1_BASE(enum mdp4_pipe i0) { return 0x00020014 + 0x10000*i0; }
665 665
666static inline uint32_t REG_MDP4_PIPE_SRCP2_BASE(enum mpd4_pipe i0) { return 0x00020018 + 0x10000*i0; } 666static inline uint32_t REG_MDP4_PIPE_SRCP2_BASE(enum mdp4_pipe i0) { return 0x00020018 + 0x10000*i0; }
667 667
668static inline uint32_t REG_MDP4_PIPE_SRC_STRIDE_A(enum mpd4_pipe i0) { return 0x00020040 + 0x10000*i0; } 668static inline uint32_t REG_MDP4_PIPE_SRC_STRIDE_A(enum mdp4_pipe i0) { return 0x00020040 + 0x10000*i0; }
669#define MDP4_PIPE_SRC_STRIDE_A_P0__MASK 0x0000ffff 669#define MDP4_PIPE_SRC_STRIDE_A_P0__MASK 0x0000ffff
670#define MDP4_PIPE_SRC_STRIDE_A_P0__SHIFT 0 670#define MDP4_PIPE_SRC_STRIDE_A_P0__SHIFT 0
671static inline uint32_t MDP4_PIPE_SRC_STRIDE_A_P0(uint32_t val) 671static inline uint32_t MDP4_PIPE_SRC_STRIDE_A_P0(uint32_t val)
@@ -679,7 +679,7 @@ static inline uint32_t MDP4_PIPE_SRC_STRIDE_A_P1(uint32_t val)
679 return ((val) << MDP4_PIPE_SRC_STRIDE_A_P1__SHIFT) & MDP4_PIPE_SRC_STRIDE_A_P1__MASK; 679 return ((val) << MDP4_PIPE_SRC_STRIDE_A_P1__SHIFT) & MDP4_PIPE_SRC_STRIDE_A_P1__MASK;
680} 680}
681 681
682static inline uint32_t REG_MDP4_PIPE_SRC_STRIDE_B(enum mpd4_pipe i0) { return 0x00020044 + 0x10000*i0; } 682static inline uint32_t REG_MDP4_PIPE_SRC_STRIDE_B(enum mdp4_pipe i0) { return 0x00020044 + 0x10000*i0; }
683#define MDP4_PIPE_SRC_STRIDE_B_P2__MASK 0x0000ffff 683#define MDP4_PIPE_SRC_STRIDE_B_P2__MASK 0x0000ffff
684#define MDP4_PIPE_SRC_STRIDE_B_P2__SHIFT 0 684#define MDP4_PIPE_SRC_STRIDE_B_P2__SHIFT 0
685static inline uint32_t MDP4_PIPE_SRC_STRIDE_B_P2(uint32_t val) 685static inline uint32_t MDP4_PIPE_SRC_STRIDE_B_P2(uint32_t val)
@@ -693,7 +693,7 @@ static inline uint32_t MDP4_PIPE_SRC_STRIDE_B_P3(uint32_t val)
693 return ((val) << MDP4_PIPE_SRC_STRIDE_B_P3__SHIFT) & MDP4_PIPE_SRC_STRIDE_B_P3__MASK; 693 return ((val) << MDP4_PIPE_SRC_STRIDE_B_P3__SHIFT) & MDP4_PIPE_SRC_STRIDE_B_P3__MASK;
694} 694}
695 695
696static inline uint32_t REG_MDP4_PIPE_FRAME_SIZE(enum mpd4_pipe i0) { return 0x00020048 + 0x10000*i0; } 696static inline uint32_t REG_MDP4_PIPE_FRAME_SIZE(enum mdp4_pipe i0) { return 0x00020048 + 0x10000*i0; }
697#define MDP4_PIPE_FRAME_SIZE_HEIGHT__MASK 0xffff0000 697#define MDP4_PIPE_FRAME_SIZE_HEIGHT__MASK 0xffff0000
698#define MDP4_PIPE_FRAME_SIZE_HEIGHT__SHIFT 16 698#define MDP4_PIPE_FRAME_SIZE_HEIGHT__SHIFT 16
699static inline uint32_t MDP4_PIPE_FRAME_SIZE_HEIGHT(uint32_t val) 699static inline uint32_t MDP4_PIPE_FRAME_SIZE_HEIGHT(uint32_t val)
@@ -707,28 +707,28 @@ static inline uint32_t MDP4_PIPE_FRAME_SIZE_WIDTH(uint32_t val)
707 return ((val) << MDP4_PIPE_FRAME_SIZE_WIDTH__SHIFT) & MDP4_PIPE_FRAME_SIZE_WIDTH__MASK; 707 return ((val) << MDP4_PIPE_FRAME_SIZE_WIDTH__SHIFT) & MDP4_PIPE_FRAME_SIZE_WIDTH__MASK;
708} 708}
709 709
710static inline uint32_t REG_MDP4_PIPE_SRC_FORMAT(enum mpd4_pipe i0) { return 0x00020050 + 0x10000*i0; } 710static inline uint32_t REG_MDP4_PIPE_SRC_FORMAT(enum mdp4_pipe i0) { return 0x00020050 + 0x10000*i0; }
711#define MDP4_PIPE_SRC_FORMAT_G_BPC__MASK 0x00000003 711#define MDP4_PIPE_SRC_FORMAT_G_BPC__MASK 0x00000003
712#define MDP4_PIPE_SRC_FORMAT_G_BPC__SHIFT 0 712#define MDP4_PIPE_SRC_FORMAT_G_BPC__SHIFT 0
713static inline uint32_t MDP4_PIPE_SRC_FORMAT_G_BPC(enum mpd4_bpc val) 713static inline uint32_t MDP4_PIPE_SRC_FORMAT_G_BPC(enum mdp4_bpc val)
714{ 714{
715 return ((val) << MDP4_PIPE_SRC_FORMAT_G_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_G_BPC__MASK; 715 return ((val) << MDP4_PIPE_SRC_FORMAT_G_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_G_BPC__MASK;
716} 716}
717#define MDP4_PIPE_SRC_FORMAT_B_BPC__MASK 0x0000000c 717#define MDP4_PIPE_SRC_FORMAT_B_BPC__MASK 0x0000000c
718#define MDP4_PIPE_SRC_FORMAT_B_BPC__SHIFT 2 718#define MDP4_PIPE_SRC_FORMAT_B_BPC__SHIFT 2
719static inline uint32_t MDP4_PIPE_SRC_FORMAT_B_BPC(enum mpd4_bpc val) 719static inline uint32_t MDP4_PIPE_SRC_FORMAT_B_BPC(enum mdp4_bpc val)
720{ 720{
721 return ((val) << MDP4_PIPE_SRC_FORMAT_B_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_B_BPC__MASK; 721 return ((val) << MDP4_PIPE_SRC_FORMAT_B_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_B_BPC__MASK;
722} 722}
723#define MDP4_PIPE_SRC_FORMAT_R_BPC__MASK 0x00000030 723#define MDP4_PIPE_SRC_FORMAT_R_BPC__MASK 0x00000030
724#define MDP4_PIPE_SRC_FORMAT_R_BPC__SHIFT 4 724#define MDP4_PIPE_SRC_FORMAT_R_BPC__SHIFT 4
725static inline uint32_t MDP4_PIPE_SRC_FORMAT_R_BPC(enum mpd4_bpc val) 725static inline uint32_t MDP4_PIPE_SRC_FORMAT_R_BPC(enum mdp4_bpc val)
726{ 726{
727 return ((val) << MDP4_PIPE_SRC_FORMAT_R_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_R_BPC__MASK; 727 return ((val) << MDP4_PIPE_SRC_FORMAT_R_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_R_BPC__MASK;
728} 728}
729#define MDP4_PIPE_SRC_FORMAT_A_BPC__MASK 0x000000c0 729#define MDP4_PIPE_SRC_FORMAT_A_BPC__MASK 0x000000c0
730#define MDP4_PIPE_SRC_FORMAT_A_BPC__SHIFT 6 730#define MDP4_PIPE_SRC_FORMAT_A_BPC__SHIFT 6
731static inline uint32_t MDP4_PIPE_SRC_FORMAT_A_BPC(enum mpd4_bpc_alpha val) 731static inline uint32_t MDP4_PIPE_SRC_FORMAT_A_BPC(enum mdp4_bpc_alpha val)
732{ 732{
733 return ((val) << MDP4_PIPE_SRC_FORMAT_A_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_A_BPC__MASK; 733 return ((val) << MDP4_PIPE_SRC_FORMAT_A_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_A_BPC__MASK;
734} 734}
@@ -750,7 +750,7 @@ static inline uint32_t MDP4_PIPE_SRC_FORMAT_UNPACK_COUNT(uint32_t val)
750#define MDP4_PIPE_SRC_FORMAT_UNPACK_ALIGN_MSB 0x00040000 750#define MDP4_PIPE_SRC_FORMAT_UNPACK_ALIGN_MSB 0x00040000
751#define MDP4_PIPE_SRC_FORMAT_SOLID_FILL 0x00400000 751#define MDP4_PIPE_SRC_FORMAT_SOLID_FILL 0x00400000
752 752
753static inline uint32_t REG_MDP4_PIPE_SRC_UNPACK(enum mpd4_pipe i0) { return 0x00020054 + 0x10000*i0; } 753static inline uint32_t REG_MDP4_PIPE_SRC_UNPACK(enum mdp4_pipe i0) { return 0x00020054 + 0x10000*i0; }
754#define MDP4_PIPE_SRC_UNPACK_ELEM0__MASK 0x000000ff 754#define MDP4_PIPE_SRC_UNPACK_ELEM0__MASK 0x000000ff
755#define MDP4_PIPE_SRC_UNPACK_ELEM0__SHIFT 0 755#define MDP4_PIPE_SRC_UNPACK_ELEM0__SHIFT 0
756static inline uint32_t MDP4_PIPE_SRC_UNPACK_ELEM0(uint32_t val) 756static inline uint32_t MDP4_PIPE_SRC_UNPACK_ELEM0(uint32_t val)
@@ -776,7 +776,7 @@ static inline uint32_t MDP4_PIPE_SRC_UNPACK_ELEM3(uint32_t val)
776 return ((val) << MDP4_PIPE_SRC_UNPACK_ELEM3__SHIFT) & MDP4_PIPE_SRC_UNPACK_ELEM3__MASK; 776 return ((val) << MDP4_PIPE_SRC_UNPACK_ELEM3__SHIFT) & MDP4_PIPE_SRC_UNPACK_ELEM3__MASK;
777} 777}
778 778
779static inline uint32_t REG_MDP4_PIPE_OP_MODE(enum mpd4_pipe i0) { return 0x00020058 + 0x10000*i0; } 779static inline uint32_t REG_MDP4_PIPE_OP_MODE(enum mdp4_pipe i0) { return 0x00020058 + 0x10000*i0; }
780#define MDP4_PIPE_OP_MODE_SCALEX_EN 0x00000001 780#define MDP4_PIPE_OP_MODE_SCALEX_EN 0x00000001
781#define MDP4_PIPE_OP_MODE_SCALEY_EN 0x00000002 781#define MDP4_PIPE_OP_MODE_SCALEY_EN 0x00000002
782#define MDP4_PIPE_OP_MODE_SRC_YCBCR 0x00000200 782#define MDP4_PIPE_OP_MODE_SRC_YCBCR 0x00000200
@@ -789,36 +789,36 @@ static inline uint32_t REG_MDP4_PIPE_OP_MODE(enum mpd4_pipe i0) { return 0x00020
789#define MDP4_PIPE_OP_MODE_DEINT_EN 0x00040000 789#define MDP4_PIPE_OP_MODE_DEINT_EN 0x00040000
790#define MDP4_PIPE_OP_MODE_DEINT_ODD_REF 0x00080000 790#define MDP4_PIPE_OP_MODE_DEINT_ODD_REF 0x00080000
791 791
792static inline uint32_t REG_MDP4_PIPE_PHASEX_STEP(enum mpd4_pipe i0) { return 0x0002005c + 0x10000*i0; } 792static inline uint32_t REG_MDP4_PIPE_PHASEX_STEP(enum mdp4_pipe i0) { return 0x0002005c + 0x10000*i0; }
793 793
794static inline uint32_t REG_MDP4_PIPE_PHASEY_STEP(enum mpd4_pipe i0) { return 0x00020060 + 0x10000*i0; } 794static inline uint32_t REG_MDP4_PIPE_PHASEY_STEP(enum mdp4_pipe i0) { return 0x00020060 + 0x10000*i0; }
795 795
796static inline uint32_t REG_MDP4_PIPE_FETCH_CONFIG(enum mpd4_pipe i0) { return 0x00021004 + 0x10000*i0; } 796static inline uint32_t REG_MDP4_PIPE_FETCH_CONFIG(enum mdp4_pipe i0) { return 0x00021004 + 0x10000*i0; }
797 797
798static inline uint32_t REG_MDP4_PIPE_SOLID_COLOR(enum mpd4_pipe i0) { return 0x00021008 + 0x10000*i0; } 798static inline uint32_t REG_MDP4_PIPE_SOLID_COLOR(enum mdp4_pipe i0) { return 0x00021008 + 0x10000*i0; }
799 799
800static inline uint32_t REG_MDP4_PIPE_CSC(enum mpd4_pipe i0) { return 0x00024000 + 0x10000*i0; } 800static inline uint32_t REG_MDP4_PIPE_CSC(enum mdp4_pipe i0) { return 0x00024000 + 0x10000*i0; }
801 801
802 802
803static inline uint32_t REG_MDP4_PIPE_CSC_MV(enum mpd4_pipe i0, uint32_t i1) { return 0x00024400 + 0x10000*i0 + 0x4*i1; } 803static inline uint32_t REG_MDP4_PIPE_CSC_MV(enum mdp4_pipe i0, uint32_t i1) { return 0x00024400 + 0x10000*i0 + 0x4*i1; }
804 804
805static inline uint32_t REG_MDP4_PIPE_CSC_MV_VAL(enum mpd4_pipe i0, uint32_t i1) { return 0x00024400 + 0x10000*i0 + 0x4*i1; } 805static inline uint32_t REG_MDP4_PIPE_CSC_MV_VAL(enum mdp4_pipe i0, uint32_t i1) { return 0x00024400 + 0x10000*i0 + 0x4*i1; }
806 806
807static inline uint32_t REG_MDP4_PIPE_CSC_PRE_BV(enum mpd4_pipe i0, uint32_t i1) { return 0x00024500 + 0x10000*i0 + 0x4*i1; } 807static inline uint32_t REG_MDP4_PIPE_CSC_PRE_BV(enum mdp4_pipe i0, uint32_t i1) { return 0x00024500 + 0x10000*i0 + 0x4*i1; }
808 808
809static inline uint32_t REG_MDP4_PIPE_CSC_PRE_BV_VAL(enum mpd4_pipe i0, uint32_t i1) { return 0x00024500 + 0x10000*i0 + 0x4*i1; } 809static inline uint32_t REG_MDP4_PIPE_CSC_PRE_BV_VAL(enum mdp4_pipe i0, uint32_t i1) { return 0x00024500 + 0x10000*i0 + 0x4*i1; }
810 810
811static inline uint32_t REG_MDP4_PIPE_CSC_POST_BV(enum mpd4_pipe i0, uint32_t i1) { return 0x00024580 + 0x10000*i0 + 0x4*i1; } 811static inline uint32_t REG_MDP4_PIPE_CSC_POST_BV(enum mdp4_pipe i0, uint32_t i1) { return 0x00024580 + 0x10000*i0 + 0x4*i1; }
812 812
813static inline uint32_t REG_MDP4_PIPE_CSC_POST_BV_VAL(enum mpd4_pipe i0, uint32_t i1) { return 0x00024580 + 0x10000*i0 + 0x4*i1; } 813static inline uint32_t REG_MDP4_PIPE_CSC_POST_BV_VAL(enum mdp4_pipe i0, uint32_t i1) { return 0x00024580 + 0x10000*i0 + 0x4*i1; }
814 814
815static inline uint32_t REG_MDP4_PIPE_CSC_PRE_LV(enum mpd4_pipe i0, uint32_t i1) { return 0x00024600 + 0x10000*i0 + 0x4*i1; } 815static inline uint32_t REG_MDP4_PIPE_CSC_PRE_LV(enum mdp4_pipe i0, uint32_t i1) { return 0x00024600 + 0x10000*i0 + 0x4*i1; }
816 816
817static inline uint32_t REG_MDP4_PIPE_CSC_PRE_LV_VAL(enum mpd4_pipe i0, uint32_t i1) { return 0x00024600 + 0x10000*i0 + 0x4*i1; } 817static inline uint32_t REG_MDP4_PIPE_CSC_PRE_LV_VAL(enum mdp4_pipe i0, uint32_t i1) { return 0x00024600 + 0x10000*i0 + 0x4*i1; }
818 818
819static inline uint32_t REG_MDP4_PIPE_CSC_POST_LV(enum mpd4_pipe i0, uint32_t i1) { return 0x00024680 + 0x10000*i0 + 0x4*i1; } 819static inline uint32_t REG_MDP4_PIPE_CSC_POST_LV(enum mdp4_pipe i0, uint32_t i1) { return 0x00024680 + 0x10000*i0 + 0x4*i1; }
820 820
821static inline uint32_t REG_MDP4_PIPE_CSC_POST_LV_VAL(enum mpd4_pipe i0, uint32_t i1) { return 0x00024680 + 0x10000*i0 + 0x4*i1; } 821static inline uint32_t REG_MDP4_PIPE_CSC_POST_LV_VAL(enum mdp4_pipe i0, uint32_t i1) { return 0x00024680 + 0x10000*i0 + 0x4*i1; }
822 822
823#define REG_MDP4_LCDC 0x000c0000 823#define REG_MDP4_LCDC 0x000c0000
824 824
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/mdp4/mdp4_crtc.c
index de6bea297cda..019d530187ff 100644
--- a/drivers/gpu/drm/msm/mdp4/mdp4_crtc.c
+++ b/drivers/gpu/drm/msm/mdp4/mdp4_crtc.c
@@ -26,6 +26,7 @@ struct mdp4_crtc {
26 struct drm_crtc base; 26 struct drm_crtc base;
27 char name[8]; 27 char name[8];
28 struct drm_plane *plane; 28 struct drm_plane *plane;
29 struct drm_plane *planes[8];
29 int id; 30 int id;
30 int ovlp; 31 int ovlp;
31 enum mdp4_dma dma; 32 enum mdp4_dma dma;
@@ -50,7 +51,11 @@ struct mdp4_crtc {
50 51
51 /* if there is a pending flip, these will be non-null: */ 52 /* if there is a pending flip, these will be non-null: */
52 struct drm_pending_vblank_event *event; 53 struct drm_pending_vblank_event *event;
53 struct work_struct pageflip_work; 54 struct msm_fence_cb pageflip_cb;
55
56#define PENDING_CURSOR 0x1
57#define PENDING_FLIP 0x2
58 atomic_t pending;
54 59
55 /* the fb that we currently hold a scanout ref to: */ 60 /* the fb that we currently hold a scanout ref to: */
56 struct drm_framebuffer *fb; 61 struct drm_framebuffer *fb;
@@ -92,7 +97,8 @@ static void update_fb(struct drm_crtc *crtc, bool async,
92 } 97 }
93} 98}
94 99
95static void complete_flip(struct drm_crtc *crtc, bool canceled) 100/* if file!=NULL, this is preclose potential cancel-flip path */
101static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
96{ 102{
97 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); 103 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
98 struct drm_device *dev = crtc->dev; 104 struct drm_device *dev = crtc->dev;
@@ -102,11 +108,14 @@ static void complete_flip(struct drm_crtc *crtc, bool canceled)
102 spin_lock_irqsave(&dev->event_lock, flags); 108 spin_lock_irqsave(&dev->event_lock, flags);
103 event = mdp4_crtc->event; 109 event = mdp4_crtc->event;
104 if (event) { 110 if (event) {
105 mdp4_crtc->event = NULL; 111 /* if regular vblank case (!file) or if cancel-flip from
106 if (canceled) 112 * preclose on file that requested flip, then send the
107 event->base.destroy(&event->base); 113 * event:
108 else 114 */
115 if (!file || (event->base.file_priv == file)) {
116 mdp4_crtc->event = NULL;
109 drm_send_vblank_event(dev, mdp4_crtc->id, event); 117 drm_send_vblank_event(dev, mdp4_crtc->id, event);
118 }
110 } 119 }
111 spin_unlock_irqrestore(&dev->event_lock, flags); 120 spin_unlock_irqrestore(&dev->event_lock, flags);
112} 121}
@@ -115,9 +124,15 @@ static void crtc_flush(struct drm_crtc *crtc)
115{ 124{
116 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); 125 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
117 struct mdp4_kms *mdp4_kms = get_kms(crtc); 126 struct mdp4_kms *mdp4_kms = get_kms(crtc);
118 uint32_t flush = 0; 127 uint32_t i, flush = 0;
119 128
120 flush |= pipe2flush(mdp4_plane_pipe(mdp4_crtc->plane)); 129 for (i = 0; i < ARRAY_SIZE(mdp4_crtc->planes); i++) {
130 struct drm_plane *plane = mdp4_crtc->planes[i];
131 if (plane) {
132 enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane);
133 flush |= pipe2flush(pipe_id);
134 }
135 }
121 flush |= ovlp2flush(mdp4_crtc->ovlp); 136 flush |= ovlp2flush(mdp4_crtc->ovlp);
122 137
123 DBG("%s: flush=%08x", mdp4_crtc->name, flush); 138 DBG("%s: flush=%08x", mdp4_crtc->name, flush);
@@ -125,17 +140,29 @@ static void crtc_flush(struct drm_crtc *crtc)
125 mdp4_write(mdp4_kms, REG_MDP4_OVERLAY_FLUSH, flush); 140 mdp4_write(mdp4_kms, REG_MDP4_OVERLAY_FLUSH, flush);
126} 141}
127 142
128static void pageflip_worker(struct work_struct *work) 143static void request_pending(struct drm_crtc *crtc, uint32_t pending)
144{
145 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
146
147 atomic_or(pending, &mdp4_crtc->pending);
148 mdp4_irq_register(get_kms(crtc), &mdp4_crtc->vblank);
149}
150
151static void pageflip_cb(struct msm_fence_cb *cb)
129{ 152{
130 struct mdp4_crtc *mdp4_crtc = 153 struct mdp4_crtc *mdp4_crtc =
131 container_of(work, struct mdp4_crtc, pageflip_work); 154 container_of(cb, struct mdp4_crtc, pageflip_cb);
132 struct drm_crtc *crtc = &mdp4_crtc->base; 155 struct drm_crtc *crtc = &mdp4_crtc->base;
156 struct drm_framebuffer *fb = crtc->fb;
133 157
134 mdp4_plane_set_scanout(mdp4_crtc->plane, crtc->fb); 158 if (!fb)
159 return;
160
161 mdp4_plane_set_scanout(mdp4_crtc->plane, fb);
135 crtc_flush(crtc); 162 crtc_flush(crtc);
136 163
137 /* enable vblank to complete flip: */ 164 /* enable vblank to complete flip: */
138 mdp4_irq_register(get_kms(crtc), &mdp4_crtc->vblank); 165 request_pending(crtc, PENDING_FLIP);
139} 166}
140 167
141static void unref_fb_worker(struct drm_flip_work *work, void *val) 168static void unref_fb_worker(struct drm_flip_work *work, void *val)
@@ -205,67 +232,69 @@ static void blend_setup(struct drm_crtc *crtc)
205 struct mdp4_kms *mdp4_kms = get_kms(crtc); 232 struct mdp4_kms *mdp4_kms = get_kms(crtc);
206 int i, ovlp = mdp4_crtc->ovlp; 233 int i, ovlp = mdp4_crtc->ovlp;
207 uint32_t mixer_cfg = 0; 234 uint32_t mixer_cfg = 0;
208 235 static const enum mdp4_mixer_stage_id stages[] = {
209 /* 236 STAGE_BASE, STAGE0, STAGE1, STAGE2, STAGE3,
210 * This probably would also need to be triggered by any attached 237 };
211 * plane when it changes.. for now since we are only using a single 238 /* statically (for now) map planes to mixer stage (z-order): */
212 * private plane, the configuration is hard-coded: 239 static const int idxs[] = {
213 */ 240 [VG1] = 1,
241 [VG2] = 2,
242 [RGB1] = 0,
243 [RGB2] = 0,
244 [RGB3] = 0,
245 [VG3] = 3,
246 [VG4] = 4,
247
248 };
249 bool alpha[4]= { false, false, false, false };
214 250
215 mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_LOW0(ovlp), 0); 251 mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_LOW0(ovlp), 0);
216 mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_LOW1(ovlp), 0); 252 mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_LOW1(ovlp), 0);
217 mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_HIGH0(ovlp), 0); 253 mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_HIGH0(ovlp), 0);
218 mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_HIGH1(ovlp), 0); 254 mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_HIGH1(ovlp), 0);
219 255
256 /* TODO single register for all CRTCs, so this won't work properly
257 * when multiple CRTCs are active..
258 */
259 for (i = 0; i < ARRAY_SIZE(mdp4_crtc->planes); i++) {
260 struct drm_plane *plane = mdp4_crtc->planes[i];
261 if (plane) {
262 enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane);
263 int idx = idxs[pipe_id];
264 if (idx > 0) {
265 const struct mdp4_format *format =
266 to_mdp4_format(msm_framebuffer_format(plane->fb));
267 alpha[idx-1] = format->alpha_enable;
268 }
269 mixer_cfg |= mixercfg(mdp4_crtc->mixer, pipe_id, stages[idx]);
270 }
271 }
272
273 /* this shouldn't happen.. and seems to cause underflow: */
274 WARN_ON(!mixer_cfg);
275
220 for (i = 0; i < 4; i++) { 276 for (i = 0; i < 4; i++) {
221 mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_FG_ALPHA(ovlp, i), 0); 277 uint32_t op;
222 mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_BG_ALPHA(ovlp, i), 0); 278
223 mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_OP(ovlp, i), 279 if (alpha[i]) {
224 MDP4_OVLP_STAGE_OP_FG_ALPHA(FG_CONST) | 280 op = MDP4_OVLP_STAGE_OP_FG_ALPHA(FG_PIXEL) |
225 MDP4_OVLP_STAGE_OP_BG_ALPHA(BG_CONST)); 281 MDP4_OVLP_STAGE_OP_BG_ALPHA(FG_PIXEL) |
226 mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_CO3(ovlp, i), 0); 282 MDP4_OVLP_STAGE_OP_BG_INV_ALPHA;
283 } else {
284 op = MDP4_OVLP_STAGE_OP_FG_ALPHA(FG_CONST) |
285 MDP4_OVLP_STAGE_OP_BG_ALPHA(BG_CONST);
286 }
287
288 mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_FG_ALPHA(ovlp, i), 0xff);
289 mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_BG_ALPHA(ovlp, i), 0x00);
290 mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_OP(ovlp, i), op);
291 mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_CO3(ovlp, i), 1);
227 mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_LOW0(ovlp, i), 0); 292 mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_LOW0(ovlp, i), 0);
228 mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_LOW1(ovlp, i), 0); 293 mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_LOW1(ovlp, i), 0);
229 mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_HIGH0(ovlp, i), 0); 294 mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_HIGH0(ovlp, i), 0);
230 mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_HIGH1(ovlp, i), 0); 295 mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_HIGH1(ovlp, i), 0);
231 } 296 }
232 297
233 /* TODO single register for all CRTCs, so this won't work properly
234 * when multiple CRTCs are active..
235 */
236 switch (mdp4_plane_pipe(mdp4_crtc->plane)) {
237 case VG1:
238 mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE0(STAGE_BASE) |
239 COND(mdp4_crtc->mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE0_MIXER1);
240 break;
241 case VG2:
242 mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE1(STAGE_BASE) |
243 COND(mdp4_crtc->mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE1_MIXER1);
244 break;
245 case RGB1:
246 mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE2(STAGE_BASE) |
247 COND(mdp4_crtc->mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE2_MIXER1);
248 break;
249 case RGB2:
250 mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE3(STAGE_BASE) |
251 COND(mdp4_crtc->mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE3_MIXER1);
252 break;
253 case RGB3:
254 mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE4(STAGE_BASE) |
255 COND(mdp4_crtc->mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE4_MIXER1);
256 break;
257 case VG3:
258 mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE5(STAGE_BASE) |
259 COND(mdp4_crtc->mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE5_MIXER1);
260 break;
261 case VG4:
262 mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE6(STAGE_BASE) |
263 COND(mdp4_crtc->mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE6_MIXER1);
264 break;
265 default:
266 WARN_ON("invalid pipe");
267 break;
268 }
269 mdp4_write(mdp4_kms, REG_MDP4_LAYERMIXER_IN_CFG, mixer_cfg); 298 mdp4_write(mdp4_kms, REG_MDP4_LAYERMIXER_IN_CFG, mixer_cfg);
270} 299}
271 300
@@ -377,6 +406,7 @@ static int mdp4_crtc_page_flip(struct drm_crtc *crtc,
377 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); 406 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
378 struct drm_device *dev = crtc->dev; 407 struct drm_device *dev = crtc->dev;
379 struct drm_gem_object *obj; 408 struct drm_gem_object *obj;
409 unsigned long flags;
380 410
381 if (mdp4_crtc->event) { 411 if (mdp4_crtc->event) {
382 dev_err(dev->dev, "already pending flip!\n"); 412 dev_err(dev->dev, "already pending flip!\n");
@@ -385,11 +415,13 @@ static int mdp4_crtc_page_flip(struct drm_crtc *crtc,
385 415
386 obj = msm_framebuffer_bo(new_fb, 0); 416 obj = msm_framebuffer_bo(new_fb, 0);
387 417
418 spin_lock_irqsave(&dev->event_lock, flags);
388 mdp4_crtc->event = event; 419 mdp4_crtc->event = event;
420 spin_unlock_irqrestore(&dev->event_lock, flags);
421
389 update_fb(crtc, true, new_fb); 422 update_fb(crtc, true, new_fb);
390 423
391 return msm_gem_queue_inactive_work(obj, 424 return msm_gem_queue_inactive_cb(obj, &mdp4_crtc->pageflip_cb);
392 &mdp4_crtc->pageflip_work);
393} 425}
394 426
395static int mdp4_crtc_set_property(struct drm_crtc *crtc, 427static int mdp4_crtc_set_property(struct drm_crtc *crtc,
@@ -498,6 +530,8 @@ static int mdp4_crtc_cursor_set(struct drm_crtc *crtc,
498 drm_gem_object_unreference_unlocked(old_bo); 530 drm_gem_object_unreference_unlocked(old_bo);
499 } 531 }
500 532
533 request_pending(crtc, PENDING_CURSOR);
534
501 return 0; 535 return 0;
502 536
503fail: 537fail:
@@ -542,13 +576,21 @@ static void mdp4_crtc_vblank_irq(struct mdp4_irq *irq, uint32_t irqstatus)
542 struct mdp4_crtc *mdp4_crtc = container_of(irq, struct mdp4_crtc, vblank); 576 struct mdp4_crtc *mdp4_crtc = container_of(irq, struct mdp4_crtc, vblank);
543 struct drm_crtc *crtc = &mdp4_crtc->base; 577 struct drm_crtc *crtc = &mdp4_crtc->base;
544 struct msm_drm_private *priv = crtc->dev->dev_private; 578 struct msm_drm_private *priv = crtc->dev->dev_private;
579 unsigned pending;
545 580
546 update_cursor(crtc);
547 complete_flip(crtc, false);
548 mdp4_irq_unregister(get_kms(crtc), &mdp4_crtc->vblank); 581 mdp4_irq_unregister(get_kms(crtc), &mdp4_crtc->vblank);
549 582
550 drm_flip_work_commit(&mdp4_crtc->unref_fb_work, priv->wq); 583 pending = atomic_xchg(&mdp4_crtc->pending, 0);
551 drm_flip_work_commit(&mdp4_crtc->unref_cursor_work, priv->wq); 584
585 if (pending & PENDING_FLIP) {
586 complete_flip(crtc, NULL);
587 drm_flip_work_commit(&mdp4_crtc->unref_fb_work, priv->wq);
588 }
589
590 if (pending & PENDING_CURSOR) {
591 update_cursor(crtc);
592 drm_flip_work_commit(&mdp4_crtc->unref_cursor_work, priv->wq);
593 }
552} 594}
553 595
554static void mdp4_crtc_err_irq(struct mdp4_irq *irq, uint32_t irqstatus) 596static void mdp4_crtc_err_irq(struct mdp4_irq *irq, uint32_t irqstatus)
@@ -565,9 +607,10 @@ uint32_t mdp4_crtc_vblank(struct drm_crtc *crtc)
565 return mdp4_crtc->vblank.irqmask; 607 return mdp4_crtc->vblank.irqmask;
566} 608}
567 609
568void mdp4_crtc_cancel_pending_flip(struct drm_crtc *crtc) 610void mdp4_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file)
569{ 611{
570 complete_flip(crtc, true); 612 DBG("cancel: %p", file);
613 complete_flip(crtc, file);
571} 614}
572 615
573/* set dma config, ie. the format the encoder wants. */ 616/* set dma config, ie. the format the encoder wants. */
@@ -622,6 +665,32 @@ void mdp4_crtc_set_intf(struct drm_crtc *crtc, enum mdp4_intf intf)
622 mdp4_write(mdp4_kms, REG_MDP4_DISP_INTF_SEL, intf_sel); 665 mdp4_write(mdp4_kms, REG_MDP4_DISP_INTF_SEL, intf_sel);
623} 666}
624 667
668static void set_attach(struct drm_crtc *crtc, enum mdp4_pipe pipe_id,
669 struct drm_plane *plane)
670{
671 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
672
673 BUG_ON(pipe_id >= ARRAY_SIZE(mdp4_crtc->planes));
674
675 if (mdp4_crtc->planes[pipe_id] == plane)
676 return;
677
678 mdp4_crtc->planes[pipe_id] = plane;
679 blend_setup(crtc);
680 if (mdp4_crtc->enabled && (plane != mdp4_crtc->plane))
681 crtc_flush(crtc);
682}
683
684void mdp4_crtc_attach(struct drm_crtc *crtc, struct drm_plane *plane)
685{
686 set_attach(crtc, mdp4_plane_pipe(plane), plane);
687}
688
689void mdp4_crtc_detach(struct drm_crtc *crtc, struct drm_plane *plane)
690{
691 set_attach(crtc, mdp4_plane_pipe(plane), NULL);
692}
693
625static const char *dma_names[] = { 694static const char *dma_names[] = {
626 "DMA_P", "DMA_S", "DMA_E", 695 "DMA_P", "DMA_S", "DMA_E",
627}; 696};
@@ -644,7 +713,6 @@ struct drm_crtc *mdp4_crtc_init(struct drm_device *dev,
644 crtc = &mdp4_crtc->base; 713 crtc = &mdp4_crtc->base;
645 714
646 mdp4_crtc->plane = plane; 715 mdp4_crtc->plane = plane;
647 mdp4_crtc->plane->crtc = crtc;
648 716
649 mdp4_crtc->ovlp = ovlp_id; 717 mdp4_crtc->ovlp = ovlp_id;
650 mdp4_crtc->dma = dma_id; 718 mdp4_crtc->dma = dma_id;
@@ -668,7 +736,7 @@ struct drm_crtc *mdp4_crtc_init(struct drm_device *dev,
668 ret = drm_flip_work_init(&mdp4_crtc->unref_cursor_work, 64, 736 ret = drm_flip_work_init(&mdp4_crtc->unref_cursor_work, 64,
669 "unref cursor", unref_cursor_worker); 737 "unref cursor", unref_cursor_worker);
670 738
671 INIT_WORK(&mdp4_crtc->pageflip_work, pageflip_worker); 739 INIT_FENCE_CB(&mdp4_crtc->pageflip_cb, pageflip_cb);
672 740
673 drm_crtc_init(dev, crtc, &mdp4_crtc_funcs); 741 drm_crtc_init(dev, crtc, &mdp4_crtc_funcs);
674 drm_crtc_helper_add(crtc, &mdp4_crtc_helper_funcs); 742 drm_crtc_helper_add(crtc, &mdp4_crtc_helper_funcs);
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_format.c b/drivers/gpu/drm/msm/mdp4/mdp4_format.c
index 7b645f2e837a..17330b0927b2 100644
--- a/drivers/gpu/drm/msm/mdp4/mdp4_format.c
+++ b/drivers/gpu/drm/msm/mdp4/mdp4_format.c
@@ -44,6 +44,22 @@ static const struct mdp4_format formats[] = {
44 FMT(BGR565, 0, 5, 6, 5, 2, 0, 1, 0, false, true, 2, 3), 44 FMT(BGR565, 0, 5, 6, 5, 2, 0, 1, 0, false, true, 2, 3),
45}; 45};
46 46
47uint32_t mdp4_get_formats(enum mdp4_pipe pipe_id, uint32_t *pixel_formats,
48 uint32_t max_formats)
49{
50 uint32_t i;
51 for (i = 0; i < ARRAY_SIZE(formats); i++) {
52 const struct mdp4_format *f = &formats[i];
53
54 if (i == max_formats)
55 break;
56
57 pixel_formats[i] = f->base.pixel_format;
58 }
59
60 return i;
61}
62
47const struct msm_format *mdp4_get_format(struct msm_kms *kms, uint32_t format) 63const struct msm_format *mdp4_get_format(struct msm_kms *kms, uint32_t format)
48{ 64{
49 int i; 65 int i;
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/mdp4/mdp4_kms.c
index bc7fd11ad8be..8972ac35a43d 100644
--- a/drivers/gpu/drm/msm/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/mdp4/mdp4_kms.c
@@ -135,7 +135,7 @@ static void mdp4_preclose(struct msm_kms *kms, struct drm_file *file)
135 unsigned i; 135 unsigned i;
136 136
137 for (i = 0; i < priv->num_crtcs; i++) 137 for (i = 0; i < priv->num_crtcs; i++)
138 mdp4_crtc_cancel_pending_flip(priv->crtcs[i]); 138 mdp4_crtc_cancel_pending_flip(priv->crtcs[i], file);
139} 139}
140 140
141static void mdp4_destroy(struct msm_kms *kms) 141static void mdp4_destroy(struct msm_kms *kms)
@@ -196,6 +196,23 @@ static int modeset_init(struct mdp4_kms *mdp4_kms)
196 * for more than just RGB1->DMA_E->DTV->HDMI 196 * for more than just RGB1->DMA_E->DTV->HDMI
197 */ 197 */
198 198
199 /* construct non-private planes: */
200 plane = mdp4_plane_init(dev, VG1, false);
201 if (IS_ERR(plane)) {
202 dev_err(dev->dev, "failed to construct plane for VG1\n");
203 ret = PTR_ERR(plane);
204 goto fail;
205 }
206 priv->planes[priv->num_planes++] = plane;
207
208 plane = mdp4_plane_init(dev, VG2, false);
209 if (IS_ERR(plane)) {
210 dev_err(dev->dev, "failed to construct plane for VG2\n");
211 ret = PTR_ERR(plane);
212 goto fail;
213 }
214 priv->planes[priv->num_planes++] = plane;
215
199 /* the CRTCs get constructed with a private plane: */ 216 /* the CRTCs get constructed with a private plane: */
200 plane = mdp4_plane_init(dev, RGB1, true); 217 plane = mdp4_plane_init(dev, RGB1, true);
201 if (IS_ERR(plane)) { 218 if (IS_ERR(plane)) {
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_kms.h b/drivers/gpu/drm/msm/mdp4/mdp4_kms.h
index 1e83554955f3..eb015c834087 100644
--- a/drivers/gpu/drm/msm/mdp4/mdp4_kms.h
+++ b/drivers/gpu/drm/msm/mdp4/mdp4_kms.h
@@ -75,8 +75,8 @@ struct mdp4_platform_config {
75 75
76struct mdp4_format { 76struct mdp4_format {
77 struct msm_format base; 77 struct msm_format base;
78 enum mpd4_bpc bpc_r, bpc_g, bpc_b; 78 enum mdp4_bpc bpc_r, bpc_g, bpc_b;
79 enum mpd4_bpc_alpha bpc_a; 79 enum mdp4_bpc_alpha bpc_a;
80 uint8_t unpack[4]; 80 uint8_t unpack[4];
81 bool alpha_enable, unpack_tight; 81 bool alpha_enable, unpack_tight;
82 uint8_t cpp, unpack_count; 82 uint8_t cpp, unpack_count;
@@ -93,7 +93,7 @@ static inline u32 mdp4_read(struct mdp4_kms *mdp4_kms, u32 reg)
93 return msm_readl(mdp4_kms->mmio + reg); 93 return msm_readl(mdp4_kms->mmio + reg);
94} 94}
95 95
96static inline uint32_t pipe2flush(enum mpd4_pipe pipe) 96static inline uint32_t pipe2flush(enum mdp4_pipe pipe)
97{ 97{
98 switch (pipe) { 98 switch (pipe) {
99 case VG1: return MDP4_OVERLAY_FLUSH_VG1; 99 case VG1: return MDP4_OVERLAY_FLUSH_VG1;
@@ -133,6 +133,48 @@ static inline uint32_t dma2err(enum mdp4_dma dma)
133 } 133 }
134} 134}
135 135
136static inline uint32_t mixercfg(int mixer, enum mdp4_pipe pipe,
137 enum mdp4_mixer_stage_id stage)
138{
139 uint32_t mixer_cfg = 0;
140
141 switch (pipe) {
142 case VG1:
143 mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE0(stage) |
144 COND(mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE0_MIXER1);
145 break;
146 case VG2:
147 mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE1(stage) |
148 COND(mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE1_MIXER1);
149 break;
150 case RGB1:
151 mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE2(stage) |
152 COND(mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE2_MIXER1);
153 break;
154 case RGB2:
155 mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE3(stage) |
156 COND(mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE3_MIXER1);
157 break;
158 case RGB3:
159 mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE4(stage) |
160 COND(mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE4_MIXER1);
161 break;
162 case VG3:
163 mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE5(stage) |
164 COND(mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE5_MIXER1);
165 break;
166 case VG4:
167 mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE6(stage) |
168 COND(mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE6_MIXER1);
169 break;
170 default:
171 WARN_ON("invalid pipe");
172 break;
173 }
174
175 return mixer_cfg;
176}
177
136int mdp4_disable(struct mdp4_kms *mdp4_kms); 178int mdp4_disable(struct mdp4_kms *mdp4_kms);
137int mdp4_enable(struct mdp4_kms *mdp4_kms); 179int mdp4_enable(struct mdp4_kms *mdp4_kms);
138 180
@@ -146,6 +188,8 @@ void mdp4_irq_unregister(struct mdp4_kms *mdp4_kms, struct mdp4_irq *irq);
146int mdp4_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc); 188int mdp4_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
147void mdp4_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc); 189void mdp4_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
148 190
191uint32_t mdp4_get_formats(enum mdp4_pipe pipe_id, uint32_t *formats,
192 uint32_t max_formats);
149const struct msm_format *mdp4_get_format(struct msm_kms *kms, uint32_t format); 193const struct msm_format *mdp4_get_format(struct msm_kms *kms, uint32_t format);
150 194
151void mdp4_plane_install_properties(struct drm_plane *plane, 195void mdp4_plane_install_properties(struct drm_plane *plane,
@@ -158,14 +202,16 @@ int mdp4_plane_mode_set(struct drm_plane *plane,
158 unsigned int crtc_w, unsigned int crtc_h, 202 unsigned int crtc_w, unsigned int crtc_h,
159 uint32_t src_x, uint32_t src_y, 203 uint32_t src_x, uint32_t src_y,
160 uint32_t src_w, uint32_t src_h); 204 uint32_t src_w, uint32_t src_h);
161enum mpd4_pipe mdp4_plane_pipe(struct drm_plane *plane); 205enum mdp4_pipe mdp4_plane_pipe(struct drm_plane *plane);
162struct drm_plane *mdp4_plane_init(struct drm_device *dev, 206struct drm_plane *mdp4_plane_init(struct drm_device *dev,
163 enum mpd4_pipe pipe_id, bool private_plane); 207 enum mdp4_pipe pipe_id, bool private_plane);
164 208
165uint32_t mdp4_crtc_vblank(struct drm_crtc *crtc); 209uint32_t mdp4_crtc_vblank(struct drm_crtc *crtc);
166void mdp4_crtc_cancel_pending_flip(struct drm_crtc *crtc); 210void mdp4_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file);
167void mdp4_crtc_set_config(struct drm_crtc *crtc, uint32_t config); 211void mdp4_crtc_set_config(struct drm_crtc *crtc, uint32_t config);
168void mdp4_crtc_set_intf(struct drm_crtc *crtc, enum mdp4_intf intf); 212void mdp4_crtc_set_intf(struct drm_crtc *crtc, enum mdp4_intf intf);
213void mdp4_crtc_attach(struct drm_crtc *crtc, struct drm_plane *plane);
214void mdp4_crtc_detach(struct drm_crtc *crtc, struct drm_plane *plane);
169struct drm_crtc *mdp4_crtc_init(struct drm_device *dev, 215struct drm_crtc *mdp4_crtc_init(struct drm_device *dev,
170 struct drm_plane *plane, int id, int ovlp_id, 216 struct drm_plane *plane, int id, int ovlp_id,
171 enum mdp4_dma dma_id); 217 enum mdp4_dma dma_id);
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/mdp4/mdp4_plane.c
index 3468229d58b3..0f0af243f6fc 100644
--- a/drivers/gpu/drm/msm/mdp4/mdp4_plane.c
+++ b/drivers/gpu/drm/msm/mdp4/mdp4_plane.c
@@ -22,7 +22,7 @@ struct mdp4_plane {
22 struct drm_plane base; 22 struct drm_plane base;
23 const char *name; 23 const char *name;
24 24
25 enum mpd4_pipe pipe; 25 enum mdp4_pipe pipe;
26 26
27 uint32_t nformats; 27 uint32_t nformats;
28 uint32_t formats[32]; 28 uint32_t formats[32];
@@ -61,7 +61,9 @@ static int mdp4_plane_update(struct drm_plane *plane,
61static int mdp4_plane_disable(struct drm_plane *plane) 61static int mdp4_plane_disable(struct drm_plane *plane)
62{ 62{
63 struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane); 63 struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
64 DBG("%s: TODO", mdp4_plane->name); // XXX 64 DBG("%s: disable", mdp4_plane->name);
65 if (plane->crtc)
66 mdp4_crtc_detach(plane->crtc, plane);
65 return 0; 67 return 0;
66} 68}
67 69
@@ -101,7 +103,7 @@ void mdp4_plane_set_scanout(struct drm_plane *plane,
101{ 103{
102 struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane); 104 struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
103 struct mdp4_kms *mdp4_kms = get_kms(plane); 105 struct mdp4_kms *mdp4_kms = get_kms(plane);
104 enum mpd4_pipe pipe = mdp4_plane->pipe; 106 enum mdp4_pipe pipe = mdp4_plane->pipe;
105 uint32_t iova; 107 uint32_t iova;
106 108
107 mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_STRIDE_A(pipe), 109 mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_STRIDE_A(pipe),
@@ -129,7 +131,7 @@ int mdp4_plane_mode_set(struct drm_plane *plane,
129{ 131{
130 struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane); 132 struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
131 struct mdp4_kms *mdp4_kms = get_kms(plane); 133 struct mdp4_kms *mdp4_kms = get_kms(plane);
132 enum mpd4_pipe pipe = mdp4_plane->pipe; 134 enum mdp4_pipe pipe = mdp4_plane->pipe;
133 const struct mdp4_format *format; 135 const struct mdp4_format *format;
134 uint32_t op_mode = 0; 136 uint32_t op_mode = 0;
135 uint32_t phasex_step = MDP4_VG_PHASE_STEP_DEFAULT; 137 uint32_t phasex_step = MDP4_VG_PHASE_STEP_DEFAULT;
@@ -141,6 +143,10 @@ int mdp4_plane_mode_set(struct drm_plane *plane,
141 src_w = src_w >> 16; 143 src_w = src_w >> 16;
142 src_h = src_h >> 16; 144 src_h = src_h >> 16;
143 145
146 DBG("%s: FB[%u] %u,%u,%u,%u -> CRTC[%u] %d,%d,%u,%u", mdp4_plane->name,
147 fb->base.id, src_x, src_y, src_w, src_h,
148 crtc->base.id, crtc_x, crtc_y, crtc_w, crtc_h);
149
144 if (src_w != crtc_w) { 150 if (src_w != crtc_w) {
145 op_mode |= MDP4_PIPE_OP_MODE_SCALEX_EN; 151 op_mode |= MDP4_PIPE_OP_MODE_SCALEX_EN;
146 /* TODO calc phasex_step */ 152 /* TODO calc phasex_step */
@@ -191,7 +197,8 @@ int mdp4_plane_mode_set(struct drm_plane *plane,
191 mdp4_write(mdp4_kms, REG_MDP4_PIPE_PHASEX_STEP(pipe), phasex_step); 197 mdp4_write(mdp4_kms, REG_MDP4_PIPE_PHASEX_STEP(pipe), phasex_step);
192 mdp4_write(mdp4_kms, REG_MDP4_PIPE_PHASEY_STEP(pipe), phasey_step); 198 mdp4_write(mdp4_kms, REG_MDP4_PIPE_PHASEY_STEP(pipe), phasey_step);
193 199
194 plane->crtc = crtc; 200 /* TODO detach from old crtc (if we had more than one) */
201 mdp4_crtc_attach(crtc, plane);
195 202
196 return 0; 203 return 0;
197} 204}
@@ -202,7 +209,7 @@ static const char *pipe_names[] = {
202 "VG3", "VG4", 209 "VG3", "VG4",
203}; 210};
204 211
205enum mpd4_pipe mdp4_plane_pipe(struct drm_plane *plane) 212enum mdp4_pipe mdp4_plane_pipe(struct drm_plane *plane)
206{ 213{
207 struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane); 214 struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
208 return mdp4_plane->pipe; 215 return mdp4_plane->pipe;
@@ -210,9 +217,8 @@ enum mpd4_pipe mdp4_plane_pipe(struct drm_plane *plane)
210 217
211/* initialize plane */ 218/* initialize plane */
212struct drm_plane *mdp4_plane_init(struct drm_device *dev, 219struct drm_plane *mdp4_plane_init(struct drm_device *dev,
213 enum mpd4_pipe pipe_id, bool private_plane) 220 enum mdp4_pipe pipe_id, bool private_plane)
214{ 221{
215 struct msm_drm_private *priv = dev->dev_private;
216 struct drm_plane *plane = NULL; 222 struct drm_plane *plane = NULL;
217 struct mdp4_plane *mdp4_plane; 223 struct mdp4_plane *mdp4_plane;
218 int ret; 224 int ret;
@@ -228,8 +234,12 @@ struct drm_plane *mdp4_plane_init(struct drm_device *dev,
228 mdp4_plane->pipe = pipe_id; 234 mdp4_plane->pipe = pipe_id;
229 mdp4_plane->name = pipe_names[pipe_id]; 235 mdp4_plane->name = pipe_names[pipe_id];
230 236
231 drm_plane_init(dev, plane, (1 << priv->num_crtcs) - 1, &mdp4_plane_funcs, 237 mdp4_plane->nformats = mdp4_get_formats(pipe_id, mdp4_plane->formats,
232 mdp4_plane->formats, mdp4_plane->nformats, private_plane); 238 ARRAY_SIZE(mdp4_plane->formats));
239
240 drm_plane_init(dev, plane, 0xff, &mdp4_plane_funcs,
241 mdp4_plane->formats, mdp4_plane->nformats,
242 private_plane);
233 243
234 mdp4_plane_install_properties(plane, &plane->base); 244 mdp4_plane_install_properties(plane, &plane->base);
235 245
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index b3a2f1629041..86537692e45c 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -187,6 +187,7 @@ static int msm_load(struct drm_device *dev, unsigned long flags)
187 init_waitqueue_head(&priv->fence_event); 187 init_waitqueue_head(&priv->fence_event);
188 188
189 INIT_LIST_HEAD(&priv->inactive_list); 189 INIT_LIST_HEAD(&priv->inactive_list);
190 INIT_LIST_HEAD(&priv->fence_cbs);
190 191
191 drm_mode_config_init(dev); 192 drm_mode_config_init(dev);
192 193
@@ -539,15 +540,36 @@ int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence,
539 return ret; 540 return ret;
540} 541}
541 542
542/* call under struct_mutex */ 543/* called from workqueue */
543void msm_update_fence(struct drm_device *dev, uint32_t fence) 544void msm_update_fence(struct drm_device *dev, uint32_t fence)
544{ 545{
545 struct msm_drm_private *priv = dev->dev_private; 546 struct msm_drm_private *priv = dev->dev_private;
546 547
547 if (fence > priv->completed_fence) { 548 mutex_lock(&dev->struct_mutex);
548 priv->completed_fence = fence; 549 priv->completed_fence = max(fence, priv->completed_fence);
549 wake_up_all(&priv->fence_event); 550
551 while (!list_empty(&priv->fence_cbs)) {
552 struct msm_fence_cb *cb;
553
554 cb = list_first_entry(&priv->fence_cbs,
555 struct msm_fence_cb, work.entry);
556
557 if (cb->fence > priv->completed_fence)
558 break;
559
560 list_del_init(&cb->work.entry);
561 queue_work(priv->wq, &cb->work);
550 } 562 }
563
564 mutex_unlock(&dev->struct_mutex);
565
566 wake_up_all(&priv->fence_event);
567}
568
569void __msm_fence_worker(struct work_struct *work)
570{
571 struct msm_fence_cb *cb = container_of(work, struct msm_fence_cb, work);
572 cb->func(cb);
551} 573}
552 574
553/* 575/*
@@ -650,13 +672,13 @@ static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
650} 672}
651 673
652static const struct drm_ioctl_desc msm_ioctls[] = { 674static const struct drm_ioctl_desc msm_ioctls[] = {
653 DRM_IOCTL_DEF_DRV(MSM_GET_PARAM, msm_ioctl_get_param, DRM_UNLOCKED|DRM_AUTH), 675 DRM_IOCTL_DEF_DRV(MSM_GET_PARAM, msm_ioctl_get_param, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
654 DRM_IOCTL_DEF_DRV(MSM_GEM_NEW, msm_ioctl_gem_new, DRM_UNLOCKED|DRM_AUTH), 676 DRM_IOCTL_DEF_DRV(MSM_GEM_NEW, msm_ioctl_gem_new, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
655 DRM_IOCTL_DEF_DRV(MSM_GEM_INFO, msm_ioctl_gem_info, DRM_UNLOCKED|DRM_AUTH), 677 DRM_IOCTL_DEF_DRV(MSM_GEM_INFO, msm_ioctl_gem_info, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
656 DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_PREP, msm_ioctl_gem_cpu_prep, DRM_UNLOCKED|DRM_AUTH), 678 DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_PREP, msm_ioctl_gem_cpu_prep, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
657 DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_FINI, msm_ioctl_gem_cpu_fini, DRM_UNLOCKED|DRM_AUTH), 679 DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_FINI, msm_ioctl_gem_cpu_fini, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
658 DRM_IOCTL_DEF_DRV(MSM_GEM_SUBMIT, msm_ioctl_gem_submit, DRM_UNLOCKED|DRM_AUTH), 680 DRM_IOCTL_DEF_DRV(MSM_GEM_SUBMIT, msm_ioctl_gem_submit, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
659 DRM_IOCTL_DEF_DRV(MSM_WAIT_FENCE, msm_ioctl_wait_fence, DRM_UNLOCKED|DRM_AUTH), 681 DRM_IOCTL_DEF_DRV(MSM_WAIT_FENCE, msm_ioctl_wait_fence, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
660}; 682};
661 683
662static const struct vm_operations_struct vm_ops = { 684static const struct vm_operations_struct vm_ops = {
@@ -680,7 +702,11 @@ static const struct file_operations fops = {
680}; 702};
681 703
682static struct drm_driver msm_driver = { 704static struct drm_driver msm_driver = {
683 .driver_features = DRIVER_HAVE_IRQ | DRIVER_GEM | DRIVER_MODESET, 705 .driver_features = DRIVER_HAVE_IRQ |
706 DRIVER_GEM |
707 DRIVER_PRIME |
708 DRIVER_RENDER |
709 DRIVER_MODESET,
684 .load = msm_load, 710 .load = msm_load,
685 .unload = msm_unload, 711 .unload = msm_unload,
686 .open = msm_open, 712 .open = msm_open,
@@ -698,6 +724,16 @@ static struct drm_driver msm_driver = {
698 .dumb_create = msm_gem_dumb_create, 724 .dumb_create = msm_gem_dumb_create,
699 .dumb_map_offset = msm_gem_dumb_map_offset, 725 .dumb_map_offset = msm_gem_dumb_map_offset,
700 .dumb_destroy = drm_gem_dumb_destroy, 726 .dumb_destroy = drm_gem_dumb_destroy,
727 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
728 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
729 .gem_prime_export = drm_gem_prime_export,
730 .gem_prime_import = drm_gem_prime_import,
731 .gem_prime_pin = msm_gem_prime_pin,
732 .gem_prime_unpin = msm_gem_prime_unpin,
733 .gem_prime_get_sg_table = msm_gem_prime_get_sg_table,
734 .gem_prime_import_sg_table = msm_gem_prime_import_sg_table,
735 .gem_prime_vmap = msm_gem_prime_vmap,
736 .gem_prime_vunmap = msm_gem_prime_vunmap,
701#ifdef CONFIG_DEBUG_FS 737#ifdef CONFIG_DEBUG_FS
702 .debugfs_init = msm_debugfs_init, 738 .debugfs_init = msm_debugfs_init,
703 .debugfs_cleanup = msm_debugfs_cleanup, 739 .debugfs_cleanup = msm_debugfs_cleanup,
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index df8f1d084bc1..d39f0862b19e 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -73,10 +73,16 @@ struct msm_drm_private {
73 73
74 struct workqueue_struct *wq; 74 struct workqueue_struct *wq;
75 75
76 /* callbacks deferred until bo is inactive: */
77 struct list_head fence_cbs;
78
76 /* registered IOMMU domains: */ 79 /* registered IOMMU domains: */
77 unsigned int num_iommus; 80 unsigned int num_iommus;
78 struct iommu_domain *iommus[NUM_DOMAINS]; 81 struct iommu_domain *iommus[NUM_DOMAINS];
79 82
83 unsigned int num_planes;
84 struct drm_plane *planes[8];
85
80 unsigned int num_crtcs; 86 unsigned int num_crtcs;
81 struct drm_crtc *crtcs[8]; 87 struct drm_crtc *crtcs[8];
82 88
@@ -94,6 +100,20 @@ struct msm_format {
94 uint32_t pixel_format; 100 uint32_t pixel_format;
95}; 101};
96 102
103/* callback from wq once fence has passed: */
104struct msm_fence_cb {
105 struct work_struct work;
106 uint32_t fence;
107 void (*func)(struct msm_fence_cb *cb);
108};
109
110void __msm_fence_worker(struct work_struct *work);
111
112#define INIT_FENCE_CB(_cb, _func) do { \
113 INIT_WORK(&(_cb)->work, __msm_fence_worker); \
114 (_cb)->func = _func; \
115 } while (0)
116
97/* As there are different display controller blocks depending on the 117/* As there are different display controller blocks depending on the
98 * snapdragon version, the kms support is split out and the appropriate 118 * snapdragon version, the kms support is split out and the appropriate
99 * implementation is loaded at runtime. The kms module is responsible 119 * implementation is loaded at runtime. The kms module is responsible
@@ -141,17 +161,24 @@ uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj);
141int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id, 161int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
142 uint32_t *iova); 162 uint32_t *iova);
143int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova); 163int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova);
164struct page **msm_gem_get_pages(struct drm_gem_object *obj);
165void msm_gem_put_pages(struct drm_gem_object *obj);
144void msm_gem_put_iova(struct drm_gem_object *obj, int id); 166void msm_gem_put_iova(struct drm_gem_object *obj, int id);
145int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev, 167int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
146 struct drm_mode_create_dumb *args); 168 struct drm_mode_create_dumb *args);
147int msm_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
148 uint32_t handle);
149int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, 169int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
150 uint32_t handle, uint64_t *offset); 170 uint32_t handle, uint64_t *offset);
171struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj);
172void *msm_gem_prime_vmap(struct drm_gem_object *obj);
173void msm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
174struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
175 size_t size, struct sg_table *sg);
176int msm_gem_prime_pin(struct drm_gem_object *obj);
177void msm_gem_prime_unpin(struct drm_gem_object *obj);
151void *msm_gem_vaddr_locked(struct drm_gem_object *obj); 178void *msm_gem_vaddr_locked(struct drm_gem_object *obj);
152void *msm_gem_vaddr(struct drm_gem_object *obj); 179void *msm_gem_vaddr(struct drm_gem_object *obj);
153int msm_gem_queue_inactive_work(struct drm_gem_object *obj, 180int msm_gem_queue_inactive_cb(struct drm_gem_object *obj,
154 struct work_struct *work); 181 struct msm_fence_cb *cb);
155void msm_gem_move_to_active(struct drm_gem_object *obj, 182void msm_gem_move_to_active(struct drm_gem_object *obj,
156 struct msm_gpu *gpu, bool write, uint32_t fence); 183 struct msm_gpu *gpu, bool write, uint32_t fence);
157void msm_gem_move_to_inactive(struct drm_gem_object *obj); 184void msm_gem_move_to_inactive(struct drm_gem_object *obj);
@@ -163,6 +190,8 @@ int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
163 uint32_t size, uint32_t flags, uint32_t *handle); 190 uint32_t size, uint32_t flags, uint32_t *handle);
164struct drm_gem_object *msm_gem_new(struct drm_device *dev, 191struct drm_gem_object *msm_gem_new(struct drm_device *dev,
165 uint32_t size, uint32_t flags); 192 uint32_t size, uint32_t flags);
193struct drm_gem_object *msm_gem_import(struct drm_device *dev,
194 uint32_t size, struct sg_table *sgt);
166 195
167struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane); 196struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane);
168const struct msm_format *msm_framebuffer_format(struct drm_framebuffer *fb); 197const struct msm_format *msm_framebuffer_format(struct drm_framebuffer *fb);
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 2bae46c66a30..e587d251c590 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -17,6 +17,7 @@
17 17
18#include <linux/spinlock.h> 18#include <linux/spinlock.h>
19#include <linux/shmem_fs.h> 19#include <linux/shmem_fs.h>
20#include <linux/dma-buf.h>
20 21
21#include "msm_drv.h" 22#include "msm_drv.h"
22#include "msm_gem.h" 23#include "msm_gem.h"
@@ -77,6 +78,21 @@ static void put_pages(struct drm_gem_object *obj)
77 } 78 }
78} 79}
79 80
81struct page **msm_gem_get_pages(struct drm_gem_object *obj)
82{
83 struct drm_device *dev = obj->dev;
84 struct page **p;
85 mutex_lock(&dev->struct_mutex);
86 p = get_pages(obj);
87 mutex_unlock(&dev->struct_mutex);
88 return p;
89}
90
91void msm_gem_put_pages(struct drm_gem_object *obj)
92{
93 /* when we start tracking the pin count, then do something here */
94}
95
80int msm_gem_mmap_obj(struct drm_gem_object *obj, 96int msm_gem_mmap_obj(struct drm_gem_object *obj,
81 struct vm_area_struct *vma) 97 struct vm_area_struct *vma)
82{ 98{
@@ -162,6 +178,11 @@ out:
162 case 0: 178 case 0:
163 case -ERESTARTSYS: 179 case -ERESTARTSYS:
164 case -EINTR: 180 case -EINTR:
181 case -EBUSY:
182 /*
183 * EBUSY is ok: this just means that another thread
184 * already did the job.
185 */
165 return VM_FAULT_NOPAGE; 186 return VM_FAULT_NOPAGE;
166 case -ENOMEM: 187 case -ENOMEM:
167 return VM_FAULT_OOM; 188 return VM_FAULT_OOM;
@@ -293,7 +314,17 @@ int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
293 314
294int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova) 315int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova)
295{ 316{
317 struct msm_gem_object *msm_obj = to_msm_bo(obj);
296 int ret; 318 int ret;
319
320 /* this is safe right now because we don't unmap until the
321 * bo is deleted:
322 */
323 if (msm_obj->domain[id].iova) {
324 *iova = msm_obj->domain[id].iova;
325 return 0;
326 }
327
297 mutex_lock(&obj->dev->struct_mutex); 328 mutex_lock(&obj->dev->struct_mutex);
298 ret = msm_gem_get_iova_locked(obj, id, iova); 329 ret = msm_gem_get_iova_locked(obj, id, iova);
299 mutex_unlock(&obj->dev->struct_mutex); 330 mutex_unlock(&obj->dev->struct_mutex);
@@ -363,8 +394,11 @@ void *msm_gem_vaddr(struct drm_gem_object *obj)
363 return ret; 394 return ret;
364} 395}
365 396
366int msm_gem_queue_inactive_work(struct drm_gem_object *obj, 397/* setup callback for when bo is no longer busy..
367 struct work_struct *work) 398 * TODO probably want to differentiate read vs write..
399 */
400int msm_gem_queue_inactive_cb(struct drm_gem_object *obj,
401 struct msm_fence_cb *cb)
368{ 402{
369 struct drm_device *dev = obj->dev; 403 struct drm_device *dev = obj->dev;
370 struct msm_drm_private *priv = dev->dev_private; 404 struct msm_drm_private *priv = dev->dev_private;
@@ -372,12 +406,13 @@ int msm_gem_queue_inactive_work(struct drm_gem_object *obj,
372 int ret = 0; 406 int ret = 0;
373 407
374 mutex_lock(&dev->struct_mutex); 408 mutex_lock(&dev->struct_mutex);
375 if (!list_empty(&work->entry)) { 409 if (!list_empty(&cb->work.entry)) {
376 ret = -EINVAL; 410 ret = -EINVAL;
377 } else if (is_active(msm_obj)) { 411 } else if (is_active(msm_obj)) {
378 list_add_tail(&work->entry, &msm_obj->inactive_work); 412 cb->fence = max(msm_obj->read_fence, msm_obj->write_fence);
413 list_add_tail(&cb->work.entry, &priv->fence_cbs);
379 } else { 414 } else {
380 queue_work(priv->wq, work); 415 queue_work(priv->wq, &cb->work);
381 } 416 }
382 mutex_unlock(&dev->struct_mutex); 417 mutex_unlock(&dev->struct_mutex);
383 418
@@ -410,16 +445,6 @@ void msm_gem_move_to_inactive(struct drm_gem_object *obj)
410 msm_obj->write_fence = 0; 445 msm_obj->write_fence = 0;
411 list_del_init(&msm_obj->mm_list); 446 list_del_init(&msm_obj->mm_list);
412 list_add_tail(&msm_obj->mm_list, &priv->inactive_list); 447 list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
413
414 while (!list_empty(&msm_obj->inactive_work)) {
415 struct work_struct *work;
416
417 work = list_first_entry(&msm_obj->inactive_work,
418 struct work_struct, entry);
419
420 list_del_init(&work->entry);
421 queue_work(priv->wq, work);
422 }
423} 448}
424 449
425int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, 450int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op,
@@ -510,10 +535,21 @@ void msm_gem_free_object(struct drm_gem_object *obj)
510 535
511 drm_gem_free_mmap_offset(obj); 536 drm_gem_free_mmap_offset(obj);
512 537
513 if (msm_obj->vaddr) 538 if (obj->import_attach) {
514 vunmap(msm_obj->vaddr); 539 if (msm_obj->vaddr)
540 dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr);
515 541
516 put_pages(obj); 542 /* Don't drop the pages for imported dmabuf, as they are not
543 * ours, just free the array we allocated:
544 */
545 if (msm_obj->pages)
546 drm_free_large(msm_obj->pages);
547
548 } else {
549 if (msm_obj->vaddr)
550 vunmap(msm_obj->vaddr);
551 put_pages(obj);
552 }
517 553
518 if (msm_obj->resv == &msm_obj->_resv) 554 if (msm_obj->resv == &msm_obj->_resv)
519 reservation_object_fini(msm_obj->resv); 555 reservation_object_fini(msm_obj->resv);
@@ -549,17 +585,12 @@ int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
549 return ret; 585 return ret;
550} 586}
551 587
552struct drm_gem_object *msm_gem_new(struct drm_device *dev, 588static int msm_gem_new_impl(struct drm_device *dev,
553 uint32_t size, uint32_t flags) 589 uint32_t size, uint32_t flags,
590 struct drm_gem_object **obj)
554{ 591{
555 struct msm_drm_private *priv = dev->dev_private; 592 struct msm_drm_private *priv = dev->dev_private;
556 struct msm_gem_object *msm_obj; 593 struct msm_gem_object *msm_obj;
557 struct drm_gem_object *obj = NULL;
558 int ret;
559
560 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
561
562 size = PAGE_ALIGN(size);
563 594
564 switch (flags & MSM_BO_CACHE_MASK) { 595 switch (flags & MSM_BO_CACHE_MASK) {
565 case MSM_BO_UNCACHED: 596 case MSM_BO_UNCACHED:
@@ -569,21 +600,12 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev,
569 default: 600 default:
570 dev_err(dev->dev, "invalid cache flag: %x\n", 601 dev_err(dev->dev, "invalid cache flag: %x\n",
571 (flags & MSM_BO_CACHE_MASK)); 602 (flags & MSM_BO_CACHE_MASK));
572 ret = -EINVAL; 603 return -EINVAL;
573 goto fail;
574 } 604 }
575 605
576 msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL); 606 msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
577 if (!msm_obj) { 607 if (!msm_obj)
578 ret = -ENOMEM; 608 return -ENOMEM;
579 goto fail;
580 }
581
582 obj = &msm_obj->base;
583
584 ret = drm_gem_object_init(dev, obj, size);
585 if (ret)
586 goto fail;
587 609
588 msm_obj->flags = flags; 610 msm_obj->flags = flags;
589 611
@@ -591,9 +613,69 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev,
591 reservation_object_init(msm_obj->resv); 613 reservation_object_init(msm_obj->resv);
592 614
593 INIT_LIST_HEAD(&msm_obj->submit_entry); 615 INIT_LIST_HEAD(&msm_obj->submit_entry);
594 INIT_LIST_HEAD(&msm_obj->inactive_work);
595 list_add_tail(&msm_obj->mm_list, &priv->inactive_list); 616 list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
596 617
618 *obj = &msm_obj->base;
619
620 return 0;
621}
622
623struct drm_gem_object *msm_gem_new(struct drm_device *dev,
624 uint32_t size, uint32_t flags)
625{
626 struct drm_gem_object *obj;
627 int ret;
628
629 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
630
631 size = PAGE_ALIGN(size);
632
633 ret = msm_gem_new_impl(dev, size, flags, &obj);
634 if (ret)
635 goto fail;
636
637 ret = drm_gem_object_init(dev, obj, size);
638 if (ret)
639 goto fail;
640
641 return obj;
642
643fail:
644 if (obj)
645 drm_gem_object_unreference_unlocked(obj);
646
647 return ERR_PTR(ret);
648}
649
650struct drm_gem_object *msm_gem_import(struct drm_device *dev,
651 uint32_t size, struct sg_table *sgt)
652{
653 struct msm_gem_object *msm_obj;
654 struct drm_gem_object *obj;
655 int ret, npages;
656
657 size = PAGE_ALIGN(size);
658
659 ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj);
660 if (ret)
661 goto fail;
662
663 drm_gem_private_object_init(dev, obj, size);
664
665 npages = size / PAGE_SIZE;
666
667 msm_obj = to_msm_bo(obj);
668 msm_obj->sgt = sgt;
669 msm_obj->pages = drm_malloc_ab(npages, sizeof(struct page *));
670 if (!msm_obj->pages) {
671 ret = -ENOMEM;
672 goto fail;
673 }
674
675 ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages);
676 if (ret)
677 goto fail;
678
597 return obj; 679 return obj;
598 680
599fail: 681fail:
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index 0676f32e2c6a..f4f23a578d9d 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -45,9 +45,6 @@ struct msm_gem_object {
45 */ 45 */
46 struct list_head submit_entry; 46 struct list_head submit_entry;
47 47
48 /* work defered until bo is inactive: */
49 struct list_head inactive_work;
50
51 struct page **pages; 48 struct page **pages;
52 struct sg_table *sgt; 49 struct sg_table *sgt;
53 void *vaddr; 50 void *vaddr;
diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c
new file mode 100644
index 000000000000..d48f9fc5129b
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_gem_prime.c
@@ -0,0 +1,56 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include "msm_drv.h"
19#include "msm_gem.h"
20
21
22struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj)
23{
24 struct msm_gem_object *msm_obj = to_msm_bo(obj);
25 BUG_ON(!msm_obj->sgt); /* should have already pinned! */
26 return msm_obj->sgt;
27}
28
29void *msm_gem_prime_vmap(struct drm_gem_object *obj)
30{
31 return msm_gem_vaddr(obj);
32}
33
34void msm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
35{
36 /* TODO msm_gem_vunmap() */
37}
38
39struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
40 size_t size, struct sg_table *sg)
41{
42 return msm_gem_import(dev, size, sg);
43}
44
45int msm_gem_prime_pin(struct drm_gem_object *obj)
46{
47 if (!obj->import_attach)
48 msm_gem_get_pages(obj);
49 return 0;
50}
51
52void msm_gem_prime_unpin(struct drm_gem_object *obj)
53{
54 if (!obj->import_attach)
55 msm_gem_put_pages(obj);
56}
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 3bab937965d1..4583d61556f5 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -268,6 +268,8 @@ static void retire_worker(struct work_struct *work)
268 struct drm_device *dev = gpu->dev; 268 struct drm_device *dev = gpu->dev;
269 uint32_t fence = gpu->funcs->last_fence(gpu); 269 uint32_t fence = gpu->funcs->last_fence(gpu);
270 270
271 msm_update_fence(gpu->dev, fence);
272
271 mutex_lock(&dev->struct_mutex); 273 mutex_lock(&dev->struct_mutex);
272 274
273 while (!list_empty(&gpu->active_list)) { 275 while (!list_empty(&gpu->active_list)) {
@@ -287,8 +289,6 @@ static void retire_worker(struct work_struct *work)
287 } 289 }
288 } 290 }
289 291
290 msm_update_fence(gpu->dev, fence);
291
292 mutex_unlock(&dev->struct_mutex); 292 mutex_unlock(&dev->struct_mutex);
293} 293}
294 294
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile
index d939a1da3203..edcf801613e6 100644
--- a/drivers/gpu/drm/nouveau/Makefile
+++ b/drivers/gpu/drm/nouveau/Makefile
@@ -28,7 +28,9 @@ nouveau-y += core/subdev/bar/nv50.o
28nouveau-y += core/subdev/bar/nvc0.o 28nouveau-y += core/subdev/bar/nvc0.o
29nouveau-y += core/subdev/bios/base.o 29nouveau-y += core/subdev/bios/base.o
30nouveau-y += core/subdev/bios/bit.o 30nouveau-y += core/subdev/bios/bit.o
31nouveau-y += core/subdev/bios/boost.o
31nouveau-y += core/subdev/bios/conn.o 32nouveau-y += core/subdev/bios/conn.o
33nouveau-y += core/subdev/bios/cstep.o
32nouveau-y += core/subdev/bios/dcb.o 34nouveau-y += core/subdev/bios/dcb.o
33nouveau-y += core/subdev/bios/disp.o 35nouveau-y += core/subdev/bios/disp.o
34nouveau-y += core/subdev/bios/dp.o 36nouveau-y += core/subdev/bios/dp.o
@@ -39,17 +41,26 @@ nouveau-y += core/subdev/bios/init.o
39nouveau-y += core/subdev/bios/mxm.o 41nouveau-y += core/subdev/bios/mxm.o
40nouveau-y += core/subdev/bios/perf.o 42nouveau-y += core/subdev/bios/perf.o
41nouveau-y += core/subdev/bios/pll.o 43nouveau-y += core/subdev/bios/pll.o
44nouveau-y += core/subdev/bios/rammap.o
45nouveau-y += core/subdev/bios/timing.o
42nouveau-y += core/subdev/bios/therm.o 46nouveau-y += core/subdev/bios/therm.o
47nouveau-y += core/subdev/bios/vmap.o
48nouveau-y += core/subdev/bios/volt.o
43nouveau-y += core/subdev/bios/xpio.o 49nouveau-y += core/subdev/bios/xpio.o
50nouveau-y += core/subdev/bus/hwsq.o
44nouveau-y += core/subdev/bus/nv04.o 51nouveau-y += core/subdev/bus/nv04.o
45nouveau-y += core/subdev/bus/nv31.o 52nouveau-y += core/subdev/bus/nv31.o
46nouveau-y += core/subdev/bus/nv50.o 53nouveau-y += core/subdev/bus/nv50.o
54nouveau-y += core/subdev/bus/nv94.o
47nouveau-y += core/subdev/bus/nvc0.o 55nouveau-y += core/subdev/bus/nvc0.o
56nouveau-y += core/subdev/clock/base.o
48nouveau-y += core/subdev/clock/nv04.o 57nouveau-y += core/subdev/clock/nv04.o
49nouveau-y += core/subdev/clock/nv40.o 58nouveau-y += core/subdev/clock/nv40.o
50nouveau-y += core/subdev/clock/nv50.o 59nouveau-y += core/subdev/clock/nv50.o
60nouveau-y += core/subdev/clock/nv84.o
51nouveau-y += core/subdev/clock/nva3.o 61nouveau-y += core/subdev/clock/nva3.o
52nouveau-y += core/subdev/clock/nvc0.o 62nouveau-y += core/subdev/clock/nvc0.o
63nouveau-y += core/subdev/clock/nve0.o
53nouveau-y += core/subdev/clock/pllnv04.o 64nouveau-y += core/subdev/clock/pllnv04.o
54nouveau-y += core/subdev/clock/pllnva3.o 65nouveau-y += core/subdev/clock/pllnva3.o
55nouveau-y += core/subdev/devinit/base.o 66nouveau-y += core/subdev/devinit/base.o
@@ -78,7 +89,12 @@ nouveau-y += core/subdev/fb/nv47.o
78nouveau-y += core/subdev/fb/nv49.o 89nouveau-y += core/subdev/fb/nv49.o
79nouveau-y += core/subdev/fb/nv4e.o 90nouveau-y += core/subdev/fb/nv4e.o
80nouveau-y += core/subdev/fb/nv50.o 91nouveau-y += core/subdev/fb/nv50.o
92nouveau-y += core/subdev/fb/nv84.o
93nouveau-y += core/subdev/fb/nva3.o
94nouveau-y += core/subdev/fb/nvaa.o
95nouveau-y += core/subdev/fb/nvaf.o
81nouveau-y += core/subdev/fb/nvc0.o 96nouveau-y += core/subdev/fb/nvc0.o
97nouveau-y += core/subdev/fb/nve0.o
82nouveau-y += core/subdev/fb/ramnv04.o 98nouveau-y += core/subdev/fb/ramnv04.o
83nouveau-y += core/subdev/fb/ramnv10.o 99nouveau-y += core/subdev/fb/ramnv10.o
84nouveau-y += core/subdev/fb/ramnv1a.o 100nouveau-y += core/subdev/fb/ramnv1a.o
@@ -89,7 +105,12 @@ nouveau-y += core/subdev/fb/ramnv44.o
89nouveau-y += core/subdev/fb/ramnv49.o 105nouveau-y += core/subdev/fb/ramnv49.o
90nouveau-y += core/subdev/fb/ramnv4e.o 106nouveau-y += core/subdev/fb/ramnv4e.o
91nouveau-y += core/subdev/fb/ramnv50.o 107nouveau-y += core/subdev/fb/ramnv50.o
108nouveau-y += core/subdev/fb/ramnva3.o
109nouveau-y += core/subdev/fb/ramnvaa.o
92nouveau-y += core/subdev/fb/ramnvc0.o 110nouveau-y += core/subdev/fb/ramnvc0.o
111nouveau-y += core/subdev/fb/ramnve0.o
112nouveau-y += core/subdev/fb/sddr3.o
113nouveau-y += core/subdev/fb/gddr5.o
93nouveau-y += core/subdev/gpio/base.o 114nouveau-y += core/subdev/gpio/base.o
94nouveau-y += core/subdev/gpio/nv10.o 115nouveau-y += core/subdev/gpio/nv10.o
95nouveau-y += core/subdev/gpio/nv50.o 116nouveau-y += core/subdev/gpio/nv50.o
@@ -113,13 +134,22 @@ nouveau-y += core/subdev/instmem/nv50.o
113nouveau-y += core/subdev/ltcg/nvc0.o 134nouveau-y += core/subdev/ltcg/nvc0.o
114nouveau-y += core/subdev/mc/base.o 135nouveau-y += core/subdev/mc/base.o
115nouveau-y += core/subdev/mc/nv04.o 136nouveau-y += core/subdev/mc/nv04.o
137nouveau-y += core/subdev/mc/nv40.o
116nouveau-y += core/subdev/mc/nv44.o 138nouveau-y += core/subdev/mc/nv44.o
117nouveau-y += core/subdev/mc/nv50.o 139nouveau-y += core/subdev/mc/nv50.o
140nouveau-y += core/subdev/mc/nv94.o
118nouveau-y += core/subdev/mc/nv98.o 141nouveau-y += core/subdev/mc/nv98.o
119nouveau-y += core/subdev/mc/nvc0.o 142nouveau-y += core/subdev/mc/nvc0.o
143nouveau-y += core/subdev/mc/nvc3.o
120nouveau-y += core/subdev/mxm/base.o 144nouveau-y += core/subdev/mxm/base.o
121nouveau-y += core/subdev/mxm/mxms.o 145nouveau-y += core/subdev/mxm/mxms.o
122nouveau-y += core/subdev/mxm/nv50.o 146nouveau-y += core/subdev/mxm/nv50.o
147nouveau-y += core/subdev/pwr/base.o
148nouveau-y += core/subdev/pwr/memx.o
149nouveau-y += core/subdev/pwr/nva3.o
150nouveau-y += core/subdev/pwr/nvc0.o
151nouveau-y += core/subdev/pwr/nvd0.o
152nouveau-y += core/subdev/pwr/nv108.o
123nouveau-y += core/subdev/therm/base.o 153nouveau-y += core/subdev/therm/base.o
124nouveau-y += core/subdev/therm/fan.o 154nouveau-y += core/subdev/therm/fan.o
125nouveau-y += core/subdev/therm/fannil.o 155nouveau-y += core/subdev/therm/fannil.o
@@ -140,6 +170,9 @@ nouveau-y += core/subdev/vm/nv41.o
140nouveau-y += core/subdev/vm/nv44.o 170nouveau-y += core/subdev/vm/nv44.o
141nouveau-y += core/subdev/vm/nv50.o 171nouveau-y += core/subdev/vm/nv50.o
142nouveau-y += core/subdev/vm/nvc0.o 172nouveau-y += core/subdev/vm/nvc0.o
173nouveau-y += core/subdev/volt/base.o
174nouveau-y += core/subdev/volt/gpio.o
175nouveau-y += core/subdev/volt/nv40.o
143 176
144nouveau-y += core/engine/falcon.o 177nouveau-y += core/engine/falcon.o
145nouveau-y += core/engine/xtensa.o 178nouveau-y += core/engine/xtensa.o
@@ -158,6 +191,7 @@ nouveau-y += core/engine/copy/nve0.o
158nouveau-y += core/engine/crypt/nv84.o 191nouveau-y += core/engine/crypt/nv84.o
159nouveau-y += core/engine/crypt/nv98.o 192nouveau-y += core/engine/crypt/nv98.o
160nouveau-y += core/engine/device/base.o 193nouveau-y += core/engine/device/base.o
194nouveau-y += core/engine/device/ctrl.o
161nouveau-y += core/engine/device/nv04.o 195nouveau-y += core/engine/device/nv04.o
162nouveau-y += core/engine/device/nv10.o 196nouveau-y += core/engine/device/nv10.o
163nouveau-y += core/engine/device/nv20.o 197nouveau-y += core/engine/device/nv20.o
@@ -227,8 +261,18 @@ nouveau-y += core/engine/graph/nve4.o
227nouveau-y += core/engine/graph/nvf0.o 261nouveau-y += core/engine/graph/nvf0.o
228nouveau-y += core/engine/mpeg/nv31.o 262nouveau-y += core/engine/mpeg/nv31.o
229nouveau-y += core/engine/mpeg/nv40.o 263nouveau-y += core/engine/mpeg/nv40.o
264nouveau-y += core/engine/mpeg/nv44.o
230nouveau-y += core/engine/mpeg/nv50.o 265nouveau-y += core/engine/mpeg/nv50.o
231nouveau-y += core/engine/mpeg/nv84.o 266nouveau-y += core/engine/mpeg/nv84.o
267nouveau-y += core/engine/perfmon/base.o
268nouveau-y += core/engine/perfmon/daemon.o
269nouveau-y += core/engine/perfmon/nv40.o
270nouveau-y += core/engine/perfmon/nv50.o
271nouveau-y += core/engine/perfmon/nv84.o
272nouveau-y += core/engine/perfmon/nva3.o
273nouveau-y += core/engine/perfmon/nvc0.o
274nouveau-y += core/engine/perfmon/nve0.o
275nouveau-y += core/engine/perfmon/nvf0.o
232nouveau-y += core/engine/ppp/nv98.o 276nouveau-y += core/engine/ppp/nv98.o
233nouveau-y += core/engine/ppp/nvc0.o 277nouveau-y += core/engine/ppp/nvc0.o
234nouveau-y += core/engine/software/nv04.o 278nouveau-y += core/engine/software/nv04.o
@@ -260,9 +304,7 @@ include $(src)/dispnv04/Makefile
260nouveau-y += nv50_display.o 304nouveau-y += nv50_display.o
261 305
262# drm/pm 306# drm/pm
263nouveau-y += nouveau_pm.o nouveau_volt.o nouveau_perf.o 307nouveau-y += nouveau_hwmon.o nouveau_sysfs.o
264nouveau-y += nv04_pm.o nv40_pm.o nv50_pm.o nva3_pm.o nvc0_pm.o
265nouveau-y += nouveau_mem.o
266 308
267# other random bits 309# other random bits
268nouveau-$(CONFIG_COMPAT) += nouveau_ioc32.o 310nouveau-$(CONFIG_COMPAT) += nouveau_ioc32.o
diff --git a/drivers/gpu/drm/nouveau/core/core/event.c b/drivers/gpu/drm/nouveau/core/core/event.c
index 7eb81c1b6fab..3f3c76581a9e 100644
--- a/drivers/gpu/drm/nouveau/core/core/event.c
+++ b/drivers/gpu/drm/nouveau/core/core/event.c
@@ -23,62 +23,114 @@
23#include <core/os.h> 23#include <core/os.h>
24#include <core/event.h> 24#include <core/event.h>
25 25
26static void 26void
27nouveau_event_put_locked(struct nouveau_event *event, int index, 27nouveau_event_put(struct nouveau_eventh *handler)
28 struct nouveau_eventh *handler)
29{ 28{
30 if (!--event->index[index].refs) { 29 struct nouveau_event *event = handler->event;
31 if (event->disable) 30 unsigned long flags;
32 event->disable(event, index); 31 if (__test_and_clear_bit(NVKM_EVENT_ENABLE, &handler->flags)) {
32 spin_lock_irqsave(&event->refs_lock, flags);
33 if (!--event->index[handler->index].refs) {
34 if (event->disable)
35 event->disable(event, handler->index);
36 }
37 spin_unlock_irqrestore(&event->refs_lock, flags);
33 } 38 }
34 list_del(&handler->head);
35} 39}
36 40
37void 41void
38nouveau_event_put(struct nouveau_event *event, int index, 42nouveau_event_get(struct nouveau_eventh *handler)
39 struct nouveau_eventh *handler)
40{ 43{
44 struct nouveau_event *event = handler->event;
41 unsigned long flags; 45 unsigned long flags;
46 if (!__test_and_set_bit(NVKM_EVENT_ENABLE, &handler->flags)) {
47 spin_lock_irqsave(&event->refs_lock, flags);
48 if (!event->index[handler->index].refs++) {
49 if (event->enable)
50 event->enable(event, handler->index);
51 }
52 spin_unlock_irqrestore(&event->refs_lock, flags);
53 }
54}
42 55
43 spin_lock_irqsave(&event->lock, flags); 56static void
44 if (index < event->index_nr) 57nouveau_event_fini(struct nouveau_eventh *handler)
45 nouveau_event_put_locked(event, index, handler); 58{
46 spin_unlock_irqrestore(&event->lock, flags); 59 struct nouveau_event *event = handler->event;
60 unsigned long flags;
61 nouveau_event_put(handler);
62 spin_lock_irqsave(&event->list_lock, flags);
63 list_del(&handler->head);
64 spin_unlock_irqrestore(&event->list_lock, flags);
47} 65}
48 66
49void 67static int
50nouveau_event_get(struct nouveau_event *event, int index, 68nouveau_event_init(struct nouveau_event *event, int index,
51 struct nouveau_eventh *handler) 69 int (*func)(void *, int), void *priv,
70 struct nouveau_eventh *handler)
52{ 71{
53 unsigned long flags; 72 unsigned long flags;
54 73
55 spin_lock_irqsave(&event->lock, flags); 74 if (index >= event->index_nr)
56 if (index < event->index_nr) { 75 return -EINVAL;
57 list_add(&handler->head, &event->index[index].list); 76
58 if (!event->index[index].refs++) { 77 handler->event = event;
59 if (event->enable) 78 handler->flags = 0;
60 event->enable(event, index); 79 handler->index = index;
61 } 80 handler->func = func;
81 handler->priv = priv;
82
83 spin_lock_irqsave(&event->list_lock, flags);
84 list_add_tail(&handler->head, &event->index[index].list);
85 spin_unlock_irqrestore(&event->list_lock, flags);
86 return 0;
87}
88
89int
90nouveau_event_new(struct nouveau_event *event, int index,
91 int (*func)(void *, int), void *priv,
92 struct nouveau_eventh **phandler)
93{
94 struct nouveau_eventh *handler;
95 int ret = -ENOMEM;
96
97 handler = *phandler = kmalloc(sizeof(*handler), GFP_KERNEL);
98 if (handler) {
99 ret = nouveau_event_init(event, index, func, priv, handler);
100 if (ret)
101 kfree(handler);
62 } 102 }
63 spin_unlock_irqrestore(&event->lock, flags); 103
104 return ret;
105}
106
107void
108nouveau_event_ref(struct nouveau_eventh *handler, struct nouveau_eventh **ref)
109{
110 BUG_ON(handler != NULL);
111 if (*ref) {
112 nouveau_event_fini(*ref);
113 kfree(*ref);
114 }
115 *ref = handler;
64} 116}
65 117
66void 118void
67nouveau_event_trigger(struct nouveau_event *event, int index) 119nouveau_event_trigger(struct nouveau_event *event, int index)
68{ 120{
69 struct nouveau_eventh *handler, *temp; 121 struct nouveau_eventh *handler;
70 unsigned long flags; 122 unsigned long flags;
71 123
72 if (index >= event->index_nr) 124 if (WARN_ON(index >= event->index_nr))
73 return; 125 return;
74 126
75 spin_lock_irqsave(&event->lock, flags); 127 spin_lock_irqsave(&event->list_lock, flags);
76 list_for_each_entry_safe(handler, temp, &event->index[index].list, head) { 128 list_for_each_entry(handler, &event->index[index].list, head) {
77 if (handler->func(handler, index) == NVKM_EVENT_DROP) { 129 if (test_bit(NVKM_EVENT_ENABLE, &handler->flags) &&
78 nouveau_event_put_locked(event, index, handler); 130 handler->func(handler->priv, index) == NVKM_EVENT_DROP)
79 } 131 nouveau_event_put(handler);
80 } 132 }
81 spin_unlock_irqrestore(&event->lock, flags); 133 spin_unlock_irqrestore(&event->list_lock, flags);
82} 134}
83 135
84void 136void
@@ -102,7 +154,8 @@ nouveau_event_create(int index_nr, struct nouveau_event **pevent)
102 if (!event) 154 if (!event)
103 return -ENOMEM; 155 return -ENOMEM;
104 156
105 spin_lock_init(&event->lock); 157 spin_lock_init(&event->list_lock);
158 spin_lock_init(&event->refs_lock);
106 for (i = 0; i < index_nr; i++) 159 for (i = 0; i < index_nr; i++)
107 INIT_LIST_HEAD(&event->index[i].list); 160 INIT_LIST_HEAD(&event->index[i].list);
108 event->index_nr = index_nr; 161 event->index_nr = index_nr;
diff --git a/drivers/gpu/drm/nouveau/core/core/option.c b/drivers/gpu/drm/nouveau/core/core/option.c
index 62a432ea39e5..9f6fcc5f66c2 100644
--- a/drivers/gpu/drm/nouveau/core/core/option.c
+++ b/drivers/gpu/drm/nouveau/core/core/option.c
@@ -25,15 +25,6 @@
25#include <core/option.h> 25#include <core/option.h>
26#include <core/debug.h> 26#include <core/debug.h>
27 27
28/* compares unterminated string 'str' with zero-terminated string 'cmp' */
29static inline int
30strncasecmpz(const char *str, const char *cmp, size_t len)
31{
32 if (strlen(cmp) != len)
33 return len;
34 return strncasecmp(str, cmp, len);
35}
36
37const char * 28const char *
38nouveau_stropt(const char *optstr, const char *opt, int *arglen) 29nouveau_stropt(const char *optstr, const char *opt, int *arglen)
39{ 30{
@@ -105,7 +96,7 @@ nouveau_dbgopt(const char *optstr, const char *sub)
105 else if (!strncasecmpz(optstr, "warn", len)) 96 else if (!strncasecmpz(optstr, "warn", len))
106 level = NV_DBG_WARN; 97 level = NV_DBG_WARN;
107 else if (!strncasecmpz(optstr, "info", len)) 98 else if (!strncasecmpz(optstr, "info", len))
108 level = NV_DBG_INFO; 99 level = NV_DBG_INFO_NORMAL;
109 else if (!strncasecmpz(optstr, "debug", len)) 100 else if (!strncasecmpz(optstr, "debug", len))
110 level = NV_DBG_DEBUG; 101 level = NV_DBG_DEBUG;
111 else if (!strncasecmpz(optstr, "trace", len)) 102 else if (!strncasecmpz(optstr, "trace", len))
diff --git a/drivers/gpu/drm/nouveau/core/core/printk.c b/drivers/gpu/drm/nouveau/core/core/printk.c
index 52fb2aa129e8..03e0060b13da 100644
--- a/drivers/gpu/drm/nouveau/core/core/printk.c
+++ b/drivers/gpu/drm/nouveau/core/core/printk.c
@@ -27,16 +27,38 @@
27#include <core/subdev.h> 27#include <core/subdev.h>
28#include <core/printk.h> 28#include <core/printk.h>
29 29
30int nv_printk_suspend_level = NV_DBG_DEBUG; 30int nv_info_debug_level = NV_DBG_INFO_NORMAL;
31 31
32void 32void
33nv_printk_(struct nouveau_object *object, const char *pfx, int level, 33nv_printk_(struct nouveau_object *object, int level, const char *fmt, ...)
34 const char *fmt, ...)
35{ 34{
36 static const char name[] = { '!', 'E', 'W', ' ', 'D', 'T', 'P', 'S' }; 35 static const char name[] = { '!', 'E', 'W', ' ', 'D', 'T', 'P', 'S' };
36 const char *pfx;
37 char mfmt[256]; 37 char mfmt[256];
38 va_list args; 38 va_list args;
39 39
40 switch (level) {
41 case NV_DBG_FATAL:
42 pfx = KERN_CRIT;
43 break;
44 case NV_DBG_ERROR:
45 pfx = KERN_ERR;
46 break;
47 case NV_DBG_WARN:
48 pfx = KERN_WARNING;
49 break;
50 case NV_DBG_INFO_NORMAL:
51 pfx = KERN_INFO;
52 break;
53 case NV_DBG_DEBUG:
54 case NV_DBG_PARANOIA:
55 case NV_DBG_TRACE:
56 case NV_DBG_SPAM:
57 default:
58 pfx = KERN_DEBUG;
59 break;
60 }
61
40 if (object && !nv_iclass(object, NV_CLIENT_CLASS)) { 62 if (object && !nv_iclass(object, NV_CLIENT_CLASS)) {
41 struct nouveau_object *device = object; 63 struct nouveau_object *device = object;
42 struct nouveau_object *subdev = object; 64 struct nouveau_object *subdev = object;
@@ -74,20 +96,3 @@ nv_printk_(struct nouveau_object *object, const char *pfx, int level,
74 vprintk(mfmt, args); 96 vprintk(mfmt, args);
75 va_end(args); 97 va_end(args);
76} 98}
77
78#define CONV_LEVEL(x) case NV_DBG_##x: return NV_PRINTK_##x
79
80const char *nv_printk_level_to_pfx(int level)
81{
82 switch (level) {
83 CONV_LEVEL(FATAL);
84 CONV_LEVEL(ERROR);
85 CONV_LEVEL(WARN);
86 CONV_LEVEL(INFO);
87 CONV_LEVEL(DEBUG);
88 CONV_LEVEL(PARANOIA);
89 CONV_LEVEL(TRACE);
90 CONV_LEVEL(SPAM);
91 }
92 return NV_PRINTK_DEBUG;
93}
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/base.c b/drivers/gpu/drm/nouveau/core/engine/device/base.c
index 4c72571655ad..9135b25a29d0 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/base.c
@@ -29,7 +29,7 @@
29 29
30#include <core/class.h> 30#include <core/class.h>
31 31
32#include <engine/device.h> 32#include "priv.h"
33 33
34static DEFINE_MUTEX(nv_devices_mutex); 34static DEFINE_MUTEX(nv_devices_mutex);
35static LIST_HEAD(nv_devices); 35static LIST_HEAD(nv_devices);
@@ -75,7 +75,9 @@ static const u64 disable_map[] = {
75 [NVDEV_SUBDEV_BAR] = NV_DEVICE_DISABLE_CORE, 75 [NVDEV_SUBDEV_BAR] = NV_DEVICE_DISABLE_CORE,
76 [NVDEV_SUBDEV_VOLT] = NV_DEVICE_DISABLE_CORE, 76 [NVDEV_SUBDEV_VOLT] = NV_DEVICE_DISABLE_CORE,
77 [NVDEV_SUBDEV_THERM] = NV_DEVICE_DISABLE_CORE, 77 [NVDEV_SUBDEV_THERM] = NV_DEVICE_DISABLE_CORE,
78 [NVDEV_SUBDEV_PWR] = NV_DEVICE_DISABLE_CORE,
78 [NVDEV_ENGINE_DMAOBJ] = NV_DEVICE_DISABLE_CORE, 79 [NVDEV_ENGINE_DMAOBJ] = NV_DEVICE_DISABLE_CORE,
80 [NVDEV_ENGINE_PERFMON] = NV_DEVICE_DISABLE_CORE,
79 [NVDEV_ENGINE_FIFO] = NV_DEVICE_DISABLE_FIFO, 81 [NVDEV_ENGINE_FIFO] = NV_DEVICE_DISABLE_FIFO,
80 [NVDEV_ENGINE_SW] = NV_DEVICE_DISABLE_FIFO, 82 [NVDEV_ENGINE_SW] = NV_DEVICE_DISABLE_FIFO,
81 [NVDEV_ENGINE_GR] = NV_DEVICE_DISABLE_GRAPH, 83 [NVDEV_ENGINE_GR] = NV_DEVICE_DISABLE_GRAPH,
@@ -87,7 +89,7 @@ static const u64 disable_map[] = {
87 [NVDEV_ENGINE_PPP] = NV_DEVICE_DISABLE_PPP, 89 [NVDEV_ENGINE_PPP] = NV_DEVICE_DISABLE_PPP,
88 [NVDEV_ENGINE_COPY0] = NV_DEVICE_DISABLE_COPY0, 90 [NVDEV_ENGINE_COPY0] = NV_DEVICE_DISABLE_COPY0,
89 [NVDEV_ENGINE_COPY1] = NV_DEVICE_DISABLE_COPY1, 91 [NVDEV_ENGINE_COPY1] = NV_DEVICE_DISABLE_COPY1,
90 [NVDEV_ENGINE_UNK1C1] = NV_DEVICE_DISABLE_UNK1C1, 92 [NVDEV_ENGINE_VIC] = NV_DEVICE_DISABLE_VIC,
91 [NVDEV_ENGINE_VENC] = NV_DEVICE_DISABLE_VENC, 93 [NVDEV_ENGINE_VENC] = NV_DEVICE_DISABLE_VENC,
92 [NVDEV_ENGINE_DISP] = NV_DEVICE_DISABLE_DISP, 94 [NVDEV_ENGINE_DISP] = NV_DEVICE_DISABLE_DISP,
93 [NVDEV_SUBDEV_NR] = 0, 95 [NVDEV_SUBDEV_NR] = 0,
@@ -119,10 +121,12 @@ nouveau_devobj_ctor(struct nouveau_object *parent,
119 return -ENODEV; 121 return -ENODEV;
120 } 122 }
121 123
122 ret = nouveau_parent_create(parent, nv_object(device), oclass, 0, NULL, 124 ret = nouveau_parent_create(parent, nv_object(device), oclass, 0,
125 nouveau_control_oclass,
123 (1ULL << NVDEV_ENGINE_DMAOBJ) | 126 (1ULL << NVDEV_ENGINE_DMAOBJ) |
124 (1ULL << NVDEV_ENGINE_FIFO) | 127 (1ULL << NVDEV_ENGINE_FIFO) |
125 (1ULL << NVDEV_ENGINE_DISP), &devobj); 128 (1ULL << NVDEV_ENGINE_DISP) |
129 (1ULL << NVDEV_ENGINE_PERFMON), &devobj);
126 *pobject = nv_object(devobj); 130 *pobject = nv_object(devobj);
127 if (ret) 131 if (ret)
128 return ret; 132 return ret;
@@ -158,22 +162,29 @@ nouveau_devobj_ctor(struct nouveau_object *parent,
158 iounmap(map); 162 iounmap(map);
159 163
160 /* determine chipset and derive architecture from it */ 164 /* determine chipset and derive architecture from it */
161 if ((boot0 & 0x0f000000) > 0) { 165 if ((boot0 & 0x1f000000) > 0) {
162 device->chipset = (boot0 & 0xff00000) >> 20; 166 device->chipset = (boot0 & 0x1ff00000) >> 20;
163 switch (device->chipset & 0xf0) { 167 switch (device->chipset & 0x1f0) {
164 case 0x10: device->card_type = NV_10; break; 168 case 0x010: {
165 case 0x20: device->card_type = NV_20; break; 169 if (0x461 & (1 << (device->chipset & 0xf)))
166 case 0x30: device->card_type = NV_30; break; 170 device->card_type = NV_10;
167 case 0x40: 171 else
168 case 0x60: device->card_type = NV_40; break; 172 device->card_type = NV_11;
169 case 0x50: 173 break;
170 case 0x80: 174 }
171 case 0x90: 175 case 0x020: device->card_type = NV_20; break;
172 case 0xa0: device->card_type = NV_50; break; 176 case 0x030: device->card_type = NV_30; break;
173 case 0xc0: device->card_type = NV_C0; break; 177 case 0x040:
174 case 0xd0: device->card_type = NV_D0; break; 178 case 0x060: device->card_type = NV_40; break;
175 case 0xe0: 179 case 0x050:
176 case 0xf0: device->card_type = NV_E0; break; 180 case 0x080:
181 case 0x090:
182 case 0x0a0: device->card_type = NV_50; break;
183 case 0x0c0: device->card_type = NV_C0; break;
184 case 0x0d0: device->card_type = NV_D0; break;
185 case 0x0e0:
186 case 0x0f0:
187 case 0x100: device->card_type = NV_E0; break;
177 default: 188 default:
178 break; 189 break;
179 } 190 }
@@ -188,7 +199,8 @@ nouveau_devobj_ctor(struct nouveau_object *parent,
188 199
189 switch (device->card_type) { 200 switch (device->card_type) {
190 case NV_04: ret = nv04_identify(device); break; 201 case NV_04: ret = nv04_identify(device); break;
191 case NV_10: ret = nv10_identify(device); break; 202 case NV_10:
203 case NV_11: ret = nv10_identify(device); break;
192 case NV_20: ret = nv20_identify(device); break; 204 case NV_20: ret = nv20_identify(device); break;
193 case NV_30: ret = nv30_identify(device); break; 205 case NV_30: ret = nv30_identify(device); break;
194 case NV_40: ret = nv40_identify(device); break; 206 case NV_40: ret = nv40_identify(device); break;
@@ -212,7 +224,7 @@ nouveau_devobj_ctor(struct nouveau_object *parent,
212 nv_info(device, "Family : NV%02X\n", device->card_type); 224 nv_info(device, "Family : NV%02X\n", device->card_type);
213 225
214 /* determine frequency of timing crystal */ 226 /* determine frequency of timing crystal */
215 if ( device->chipset < 0x17 || 227 if ( device->card_type <= NV_10 || device->chipset < 0x17 ||
216 (device->chipset >= 0x20 && device->chipset < 0x25)) 228 (device->chipset >= 0x20 && device->chipset < 0x25))
217 strap &= 0x00000040; 229 strap &= 0x00000040;
218 else 230 else
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/ctrl.c b/drivers/gpu/drm/nouveau/core/engine/device/ctrl.c
new file mode 100644
index 000000000000..4b69bf56ed01
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/device/ctrl.c
@@ -0,0 +1,144 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */
24
25#include <core/object.h>
26#include <core/class.h>
27
28#include <subdev/clock.h>
29
30#include "priv.h"
31
32static int
33nouveau_control_mthd_pstate_info(struct nouveau_object *object, u32 mthd,
34 void *data, u32 size)
35{
36 struct nouveau_clock *clk = nouveau_clock(object);
37 struct nv_control_pstate_info *args = data;
38
39 if (size < sizeof(*args))
40 return -EINVAL;
41
42 if (clk) {
43 args->count = clk->state_nr;
44 args->ustate = clk->ustate;
45 args->pstate = clk->pstate;
46 } else {
47 args->count = 0;
48 args->ustate = NV_CONTROL_PSTATE_INFO_USTATE_DISABLE;
49 args->pstate = NV_CONTROL_PSTATE_INFO_PSTATE_UNKNOWN;
50 }
51
52 return 0;
53}
54
55static int
56nouveau_control_mthd_pstate_attr(struct nouveau_object *object, u32 mthd,
57 void *data, u32 size)
58{
59 struct nouveau_clock *clk = nouveau_clock(object);
60 struct nv_control_pstate_attr *args = data;
61 struct nouveau_clocks *domain;
62 struct nouveau_pstate *pstate;
63 struct nouveau_cstate *cstate;
64 int i = 0, j = -1;
65 u32 lo, hi;
66
67 if ((size < sizeof(*args)) || !clk ||
68 (args->state >= 0 && args->state >= clk->state_nr))
69 return -EINVAL;
70 domain = clk->domains;
71
72 while (domain->name != nv_clk_src_max) {
73 if (domain->mname && ++j == args->index)
74 break;
75 domain++;
76 }
77
78 if (domain->name == nv_clk_src_max)
79 return -EINVAL;
80
81 if (args->state != NV_CONTROL_PSTATE_ATTR_STATE_CURRENT) {
82 list_for_each_entry(pstate, &clk->states, head) {
83 if (i++ == args->state)
84 break;
85 }
86
87 lo = pstate->base.domain[domain->name];
88 hi = lo;
89 list_for_each_entry(cstate, &pstate->list, head) {
90 lo = min(lo, cstate->domain[domain->name]);
91 hi = max(hi, cstate->domain[domain->name]);
92 }
93
94 args->state = pstate->pstate;
95 } else {
96 lo = max(clk->read(clk, domain->name), 0);
97 hi = lo;
98 }
99
100 snprintf(args->name, sizeof(args->name), "%s", domain->mname);
101 snprintf(args->unit, sizeof(args->unit), "MHz");
102 args->min = lo / domain->mdiv;
103 args->max = hi / domain->mdiv;
104
105 args->index = 0;
106 while ((++domain)->name != nv_clk_src_max) {
107 if (domain->mname) {
108 args->index = ++j;
109 break;
110 }
111 }
112
113 return 0;
114}
115
116static int
117nouveau_control_mthd_pstate_user(struct nouveau_object *object, u32 mthd,
118 void *data, u32 size)
119{
120 struct nouveau_clock *clk = nouveau_clock(object);
121 struct nv_control_pstate_user *args = data;
122
123 if (size < sizeof(*args) || !clk)
124 return -EINVAL;
125
126 return nouveau_clock_ustate(clk, args->state);
127}
128
129struct nouveau_oclass
130nouveau_control_oclass[] = {
131 { .handle = NV_CONTROL_CLASS,
132 .ofuncs = &nouveau_object_ofuncs,
133 .omthds = (struct nouveau_omthds[]) {
134 { NV_CONTROL_PSTATE_INFO,
135 NV_CONTROL_PSTATE_INFO, nouveau_control_mthd_pstate_info },
136 { NV_CONTROL_PSTATE_ATTR,
137 NV_CONTROL_PSTATE_ATTR, nouveau_control_mthd_pstate_attr },
138 { NV_CONTROL_PSTATE_USER,
139 NV_CONTROL_PSTATE_USER, nouveau_control_mthd_pstate_user },
140 {},
141 },
142 },
143 {}
144};
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv04.c b/drivers/gpu/drm/nouveau/core/engine/device/nv04.c
index a0284cf09c0f..dbd2dde7b7e7 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv04.c
@@ -50,15 +50,15 @@ nv04_identify(struct nouveau_device *device)
50 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; 50 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
51 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; 51 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
52 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv04_devinit_oclass; 52 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv04_devinit_oclass;
53 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 53 device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
54 device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass; 54 device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
55 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 55 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
56 device->oclass[NVDEV_SUBDEV_FB ] = &nv04_fb_oclass; 56 device->oclass[NVDEV_SUBDEV_FB ] = nv04_fb_oclass;
57 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; 57 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
58 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; 58 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
59 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 59 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
60 device->oclass[NVDEV_ENGINE_FIFO ] = &nv04_fifo_oclass; 60 device->oclass[NVDEV_ENGINE_FIFO ] = nv04_fifo_oclass;
61 device->oclass[NVDEV_ENGINE_SW ] = &nv04_software_oclass; 61 device->oclass[NVDEV_ENGINE_SW ] = nv04_software_oclass;
62 device->oclass[NVDEV_ENGINE_GR ] = &nv04_graph_oclass; 62 device->oclass[NVDEV_ENGINE_GR ] = &nv04_graph_oclass;
63 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; 63 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
64 break; 64 break;
@@ -68,15 +68,15 @@ nv04_identify(struct nouveau_device *device)
68 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; 68 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
69 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; 69 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
70 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv05_devinit_oclass; 70 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv05_devinit_oclass;
71 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 71 device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
72 device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass; 72 device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
73 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 73 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
74 device->oclass[NVDEV_SUBDEV_FB ] = &nv04_fb_oclass; 74 device->oclass[NVDEV_SUBDEV_FB ] = nv04_fb_oclass;
75 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; 75 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
76 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; 76 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
77 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 77 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
78 device->oclass[NVDEV_ENGINE_FIFO ] = &nv04_fifo_oclass; 78 device->oclass[NVDEV_ENGINE_FIFO ] = nv04_fifo_oclass;
79 device->oclass[NVDEV_ENGINE_SW ] = &nv04_software_oclass; 79 device->oclass[NVDEV_ENGINE_SW ] = nv04_software_oclass;
80 device->oclass[NVDEV_ENGINE_GR ] = &nv04_graph_oclass; 80 device->oclass[NVDEV_ENGINE_GR ] = &nv04_graph_oclass;
81 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; 81 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
82 break; 82 break;
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv10.c b/drivers/gpu/drm/nouveau/core/engine/device/nv10.c
index 1b7809a095c3..6e03dd6abeea 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nv10.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv10.c
@@ -52,10 +52,10 @@ nv10_identify(struct nouveau_device *device)
52 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; 52 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
53 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; 53 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
54 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass; 54 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
55 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 55 device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
56 device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass; 56 device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
57 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 57 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
58 device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass; 58 device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass;
59 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; 59 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
60 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; 60 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
61 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 61 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -69,15 +69,15 @@ nv10_identify(struct nouveau_device *device)
69 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; 69 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
70 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; 70 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
71 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass; 71 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
72 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 72 device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
73 device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass; 73 device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
74 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 74 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
75 device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass; 75 device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass;
76 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; 76 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
77 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; 77 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
78 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 78 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
79 device->oclass[NVDEV_ENGINE_FIFO ] = &nv10_fifo_oclass; 79 device->oclass[NVDEV_ENGINE_FIFO ] = nv10_fifo_oclass;
80 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass; 80 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
81 device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass; 81 device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass;
82 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; 82 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
83 break; 83 break;
@@ -88,15 +88,15 @@ nv10_identify(struct nouveau_device *device)
88 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; 88 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
89 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; 89 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
90 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass; 90 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
91 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 91 device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
92 device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass; 92 device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
93 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 93 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
94 device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass; 94 device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass;
95 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; 95 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
96 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; 96 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
97 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 97 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
98 device->oclass[NVDEV_ENGINE_FIFO ] = &nv10_fifo_oclass; 98 device->oclass[NVDEV_ENGINE_FIFO ] = nv10_fifo_oclass;
99 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass; 99 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
100 device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass; 100 device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass;
101 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; 101 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
102 break; 102 break;
@@ -107,15 +107,15 @@ nv10_identify(struct nouveau_device *device)
107 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; 107 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
108 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; 108 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
109 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; 109 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
110 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 110 device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
111 device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass; 111 device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
112 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 112 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
113 device->oclass[NVDEV_SUBDEV_FB ] = &nv1a_fb_oclass; 113 device->oclass[NVDEV_SUBDEV_FB ] = nv1a_fb_oclass;
114 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; 114 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
115 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; 115 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
116 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 116 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
117 device->oclass[NVDEV_ENGINE_FIFO ] = &nv10_fifo_oclass; 117 device->oclass[NVDEV_ENGINE_FIFO ] = nv10_fifo_oclass;
118 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass; 118 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
119 device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass; 119 device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass;
120 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; 120 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
121 break; 121 break;
@@ -126,15 +126,15 @@ nv10_identify(struct nouveau_device *device)
126 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; 126 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
127 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; 127 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
128 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass; 128 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
129 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 129 device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
130 device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass; 130 device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
131 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 131 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
132 device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass; 132 device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass;
133 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; 133 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
134 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; 134 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
135 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 135 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
136 device->oclass[NVDEV_ENGINE_FIFO ] = &nv10_fifo_oclass; 136 device->oclass[NVDEV_ENGINE_FIFO ] = nv10_fifo_oclass;
137 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass; 137 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
138 device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass; 138 device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass;
139 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; 139 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
140 break; 140 break;
@@ -145,15 +145,15 @@ nv10_identify(struct nouveau_device *device)
145 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; 145 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
146 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; 146 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
147 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass; 147 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
148 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 148 device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
149 device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass; 149 device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
150 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 150 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
151 device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass; 151 device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass;
152 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; 152 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
153 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; 153 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
154 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 154 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
155 device->oclass[NVDEV_ENGINE_FIFO ] = &nv17_fifo_oclass; 155 device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
156 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass; 156 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
157 device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass; 157 device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass;
158 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; 158 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
159 break; 159 break;
@@ -164,15 +164,15 @@ nv10_identify(struct nouveau_device *device)
164 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; 164 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
165 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; 165 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
166 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; 166 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
167 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 167 device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
168 device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass; 168 device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
169 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 169 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
170 device->oclass[NVDEV_SUBDEV_FB ] = &nv1a_fb_oclass; 170 device->oclass[NVDEV_SUBDEV_FB ] = nv1a_fb_oclass;
171 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; 171 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
172 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; 172 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
173 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 173 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
174 device->oclass[NVDEV_ENGINE_FIFO ] = &nv17_fifo_oclass; 174 device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
175 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass; 175 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
176 device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass; 176 device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass;
177 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; 177 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
178 break; 178 break;
@@ -183,15 +183,15 @@ nv10_identify(struct nouveau_device *device)
183 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; 183 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
184 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; 184 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
185 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass; 185 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
186 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 186 device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
187 device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass; 187 device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
188 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 188 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
189 device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass; 189 device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass;
190 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; 190 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
191 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; 191 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
192 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 192 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
193 device->oclass[NVDEV_ENGINE_FIFO ] = &nv17_fifo_oclass; 193 device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
194 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass; 194 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
195 device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass; 195 device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass;
196 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; 196 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
197 break; 197 break;
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv20.c b/drivers/gpu/drm/nouveau/core/engine/device/nv20.c
index 12a4005fa619..dcde53b9f07f 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nv20.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv20.c
@@ -53,15 +53,15 @@ nv20_identify(struct nouveau_device *device)
53 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; 53 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
54 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; 54 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
55 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass; 55 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
56 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 56 device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
57 device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass; 57 device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
58 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 58 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
59 device->oclass[NVDEV_SUBDEV_FB ] = &nv20_fb_oclass; 59 device->oclass[NVDEV_SUBDEV_FB ] = nv20_fb_oclass;
60 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; 60 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
61 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; 61 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
62 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 62 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
63 device->oclass[NVDEV_ENGINE_FIFO ] = &nv17_fifo_oclass; 63 device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
64 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass; 64 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
65 device->oclass[NVDEV_ENGINE_GR ] = &nv20_graph_oclass; 65 device->oclass[NVDEV_ENGINE_GR ] = &nv20_graph_oclass;
66 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; 66 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
67 break; 67 break;
@@ -72,15 +72,15 @@ nv20_identify(struct nouveau_device *device)
72 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; 72 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
73 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; 73 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
74 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass; 74 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
75 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 75 device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
76 device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass; 76 device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
77 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 77 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
78 device->oclass[NVDEV_SUBDEV_FB ] = &nv25_fb_oclass; 78 device->oclass[NVDEV_SUBDEV_FB ] = nv25_fb_oclass;
79 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; 79 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
80 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; 80 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
81 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 81 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
82 device->oclass[NVDEV_ENGINE_FIFO ] = &nv17_fifo_oclass; 82 device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
83 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass; 83 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
84 device->oclass[NVDEV_ENGINE_GR ] = &nv25_graph_oclass; 84 device->oclass[NVDEV_ENGINE_GR ] = &nv25_graph_oclass;
85 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; 85 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
86 break; 86 break;
@@ -91,15 +91,15 @@ nv20_identify(struct nouveau_device *device)
91 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; 91 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
92 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; 92 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
93 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass; 93 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
94 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 94 device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
95 device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass; 95 device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
96 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 96 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
97 device->oclass[NVDEV_SUBDEV_FB ] = &nv25_fb_oclass; 97 device->oclass[NVDEV_SUBDEV_FB ] = nv25_fb_oclass;
98 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; 98 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
99 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; 99 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
100 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 100 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
101 device->oclass[NVDEV_ENGINE_FIFO ] = &nv17_fifo_oclass; 101 device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
102 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass; 102 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
103 device->oclass[NVDEV_ENGINE_GR ] = &nv25_graph_oclass; 103 device->oclass[NVDEV_ENGINE_GR ] = &nv25_graph_oclass;
104 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; 104 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
105 break; 105 break;
@@ -110,15 +110,15 @@ nv20_identify(struct nouveau_device *device)
110 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; 110 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
111 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; 111 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
112 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass; 112 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
113 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 113 device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
114 device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass; 114 device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
115 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 115 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
116 device->oclass[NVDEV_SUBDEV_FB ] = &nv25_fb_oclass; 116 device->oclass[NVDEV_SUBDEV_FB ] = nv25_fb_oclass;
117 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; 117 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
118 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; 118 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
119 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 119 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
120 device->oclass[NVDEV_ENGINE_FIFO ] = &nv17_fifo_oclass; 120 device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
121 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass; 121 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
122 device->oclass[NVDEV_ENGINE_GR ] = &nv2a_graph_oclass; 122 device->oclass[NVDEV_ENGINE_GR ] = &nv2a_graph_oclass;
123 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; 123 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
124 break; 124 break;
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv30.c b/drivers/gpu/drm/nouveau/core/engine/device/nv30.c
index cef0f1ea4c21..7b8662ef4f59 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nv30.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv30.c
@@ -53,15 +53,15 @@ nv30_identify(struct nouveau_device *device)
53 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; 53 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
54 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; 54 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
55 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass; 55 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
56 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 56 device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
57 device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass; 57 device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
58 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 58 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
59 device->oclass[NVDEV_SUBDEV_FB ] = &nv30_fb_oclass; 59 device->oclass[NVDEV_SUBDEV_FB ] = nv30_fb_oclass;
60 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; 60 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
61 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; 61 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
62 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 62 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
63 device->oclass[NVDEV_ENGINE_FIFO ] = &nv17_fifo_oclass; 63 device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
64 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass; 64 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
65 device->oclass[NVDEV_ENGINE_GR ] = &nv30_graph_oclass; 65 device->oclass[NVDEV_ENGINE_GR ] = &nv30_graph_oclass;
66 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; 66 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
67 break; 67 break;
@@ -72,15 +72,15 @@ nv30_identify(struct nouveau_device *device)
72 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; 72 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
73 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; 73 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
74 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass; 74 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
75 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 75 device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
76 device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass; 76 device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
77 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 77 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
78 device->oclass[NVDEV_SUBDEV_FB ] = &nv35_fb_oclass; 78 device->oclass[NVDEV_SUBDEV_FB ] = nv35_fb_oclass;
79 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; 79 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
80 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; 80 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
81 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 81 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
82 device->oclass[NVDEV_ENGINE_FIFO ] = &nv17_fifo_oclass; 82 device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
83 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass; 83 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
84 device->oclass[NVDEV_ENGINE_GR ] = &nv35_graph_oclass; 84 device->oclass[NVDEV_ENGINE_GR ] = &nv35_graph_oclass;
85 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; 85 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
86 break; 86 break;
@@ -91,15 +91,15 @@ nv30_identify(struct nouveau_device *device)
91 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; 91 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
92 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; 92 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
93 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass; 93 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
94 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 94 device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
95 device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass; 95 device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
96 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 96 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
97 device->oclass[NVDEV_SUBDEV_FB ] = &nv30_fb_oclass; 97 device->oclass[NVDEV_SUBDEV_FB ] = nv30_fb_oclass;
98 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; 98 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
99 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; 99 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
100 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 100 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
101 device->oclass[NVDEV_ENGINE_FIFO ] = &nv17_fifo_oclass; 101 device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
102 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass; 102 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
103 device->oclass[NVDEV_ENGINE_GR ] = &nv30_graph_oclass; 103 device->oclass[NVDEV_ENGINE_GR ] = &nv30_graph_oclass;
104 device->oclass[NVDEV_ENGINE_MPEG ] = &nv31_mpeg_oclass; 104 device->oclass[NVDEV_ENGINE_MPEG ] = &nv31_mpeg_oclass;
105 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; 105 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
@@ -111,15 +111,15 @@ nv30_identify(struct nouveau_device *device)
111 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; 111 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
112 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; 112 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
113 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass; 113 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
114 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 114 device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
115 device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass; 115 device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
116 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 116 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
117 device->oclass[NVDEV_SUBDEV_FB ] = &nv36_fb_oclass; 117 device->oclass[NVDEV_SUBDEV_FB ] = nv36_fb_oclass;
118 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; 118 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
119 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; 119 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
120 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 120 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
121 device->oclass[NVDEV_ENGINE_FIFO ] = &nv17_fifo_oclass; 121 device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
122 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass; 122 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
123 device->oclass[NVDEV_ENGINE_GR ] = &nv35_graph_oclass; 123 device->oclass[NVDEV_ENGINE_GR ] = &nv35_graph_oclass;
124 device->oclass[NVDEV_ENGINE_MPEG ] = &nv31_mpeg_oclass; 124 device->oclass[NVDEV_ENGINE_MPEG ] = &nv31_mpeg_oclass;
125 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; 125 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
@@ -131,15 +131,15 @@ nv30_identify(struct nouveau_device *device)
131 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; 131 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
132 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; 132 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
133 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass; 133 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
134 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 134 device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
135 device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass; 135 device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
136 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 136 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
137 device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass; 137 device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass;
138 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; 138 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
139 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; 139 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
140 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 140 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
141 device->oclass[NVDEV_ENGINE_FIFO ] = &nv17_fifo_oclass; 141 device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
142 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass; 142 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
143 device->oclass[NVDEV_ENGINE_GR ] = &nv34_graph_oclass; 143 device->oclass[NVDEV_ENGINE_GR ] = &nv34_graph_oclass;
144 device->oclass[NVDEV_ENGINE_MPEG ] = &nv31_mpeg_oclass; 144 device->oclass[NVDEV_ENGINE_MPEG ] = &nv31_mpeg_oclass;
145 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; 145 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv40.c b/drivers/gpu/drm/nouveau/core/engine/device/nv40.c
index 1719cb0ee595..c8c41e93695e 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv40.c
@@ -35,6 +35,7 @@
35#include <subdev/fb.h> 35#include <subdev/fb.h>
36#include <subdev/instmem.h> 36#include <subdev/instmem.h>
37#include <subdev/vm.h> 37#include <subdev/vm.h>
38#include <subdev/volt.h>
38 39
39#include <engine/device.h> 40#include <engine/device.h>
40#include <engine/dmaobj.h> 41#include <engine/dmaobj.h>
@@ -43,6 +44,7 @@
43#include <engine/graph.h> 44#include <engine/graph.h>
44#include <engine/mpeg.h> 45#include <engine/mpeg.h>
45#include <engine/disp.h> 46#include <engine/disp.h>
47#include <engine/perfmon.h>
46 48
47int 49int
48nv40_identify(struct nouveau_device *device) 50nv40_identify(struct nouveau_device *device)
@@ -56,18 +58,20 @@ nv40_identify(struct nouveau_device *device)
56 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; 58 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
57 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; 59 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
58 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; 60 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
59 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 61 device->oclass[NVDEV_SUBDEV_MC ] = nv40_mc_oclass;
60 device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass; 62 device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
61 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 63 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
62 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass; 64 device->oclass[NVDEV_SUBDEV_FB ] = nv40_fb_oclass;
63 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; 65 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
64 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; 66 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
67 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
65 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 68 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
66 device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass; 69 device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
67 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass; 70 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
68 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; 71 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
69 device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass; 72 device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
70 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; 73 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
74 device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass;
71 break; 75 break;
72 case 0x41: 76 case 0x41:
73 device->cname = "NV41"; 77 device->cname = "NV41";
@@ -77,18 +81,20 @@ nv40_identify(struct nouveau_device *device)
77 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; 81 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
78 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; 82 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
79 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; 83 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
80 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 84 device->oclass[NVDEV_SUBDEV_MC ] = nv40_mc_oclass;
81 device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass; 85 device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
82 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 86 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
83 device->oclass[NVDEV_SUBDEV_FB ] = &nv41_fb_oclass; 87 device->oclass[NVDEV_SUBDEV_FB ] = nv41_fb_oclass;
84 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; 88 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
85 device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass; 89 device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass;
90 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
86 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 91 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
87 device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass; 92 device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
88 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass; 93 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
89 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; 94 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
90 device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass; 95 device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
91 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; 96 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
97 device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass;
92 break; 98 break;
93 case 0x42: 99 case 0x42:
94 device->cname = "NV42"; 100 device->cname = "NV42";
@@ -98,18 +104,20 @@ nv40_identify(struct nouveau_device *device)
98 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; 104 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
99 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; 105 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
100 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; 106 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
101 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 107 device->oclass[NVDEV_SUBDEV_MC ] = nv40_mc_oclass;
102 device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass; 108 device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
103 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 109 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
104 device->oclass[NVDEV_SUBDEV_FB ] = &nv41_fb_oclass; 110 device->oclass[NVDEV_SUBDEV_FB ] = nv41_fb_oclass;
105 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; 111 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
106 device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass; 112 device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass;
113 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
107 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 114 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
108 device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass; 115 device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
109 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass; 116 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
110 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; 117 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
111 device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass; 118 device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
112 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; 119 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
120 device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass;
113 break; 121 break;
114 case 0x43: 122 case 0x43:
115 device->cname = "NV43"; 123 device->cname = "NV43";
@@ -119,18 +127,20 @@ nv40_identify(struct nouveau_device *device)
119 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; 127 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
120 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; 128 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
121 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; 129 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
122 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 130 device->oclass[NVDEV_SUBDEV_MC ] = nv40_mc_oclass;
123 device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass; 131 device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
124 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 132 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
125 device->oclass[NVDEV_SUBDEV_FB ] = &nv41_fb_oclass; 133 device->oclass[NVDEV_SUBDEV_FB ] = nv41_fb_oclass;
126 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; 134 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
127 device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass; 135 device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass;
136 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
128 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 137 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
129 device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass; 138 device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
130 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass; 139 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
131 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; 140 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
132 device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass; 141 device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
133 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; 142 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
143 device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass;
134 break; 144 break;
135 case 0x45: 145 case 0x45:
136 device->cname = "NV45"; 146 device->cname = "NV45";
@@ -140,18 +150,20 @@ nv40_identify(struct nouveau_device *device)
140 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; 150 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
141 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; 151 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
142 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; 152 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
143 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 153 device->oclass[NVDEV_SUBDEV_MC ] = nv40_mc_oclass;
144 device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass; 154 device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
145 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 155 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
146 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass; 156 device->oclass[NVDEV_SUBDEV_FB ] = nv40_fb_oclass;
147 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; 157 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
148 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; 158 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
159 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
149 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 160 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
150 device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass; 161 device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
151 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass; 162 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
152 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; 163 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
153 device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass; 164 device->oclass[NVDEV_ENGINE_MPEG ] = &nv44_mpeg_oclass;
154 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; 165 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
166 device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass;
155 break; 167 break;
156 case 0x47: 168 case 0x47:
157 device->cname = "G70"; 169 device->cname = "G70";
@@ -161,18 +173,20 @@ nv40_identify(struct nouveau_device *device)
161 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; 173 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
162 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; 174 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
163 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; 175 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
164 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 176 device->oclass[NVDEV_SUBDEV_MC ] = nv40_mc_oclass;
165 device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass; 177 device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
166 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 178 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
167 device->oclass[NVDEV_SUBDEV_FB ] = &nv47_fb_oclass; 179 device->oclass[NVDEV_SUBDEV_FB ] = nv47_fb_oclass;
168 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; 180 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
169 device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass; 181 device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass;
182 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
170 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 183 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
171 device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass; 184 device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
172 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass; 185 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
173 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; 186 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
174 device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass; 187 device->oclass[NVDEV_ENGINE_MPEG ] = &nv44_mpeg_oclass;
175 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; 188 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
189 device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass;
176 break; 190 break;
177 case 0x49: 191 case 0x49:
178 device->cname = "G71"; 192 device->cname = "G71";
@@ -182,18 +196,20 @@ nv40_identify(struct nouveau_device *device)
182 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; 196 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
183 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; 197 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
184 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; 198 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
185 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 199 device->oclass[NVDEV_SUBDEV_MC ] = nv40_mc_oclass;
186 device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass; 200 device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
187 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 201 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
188 device->oclass[NVDEV_SUBDEV_FB ] = &nv49_fb_oclass; 202 device->oclass[NVDEV_SUBDEV_FB ] = nv49_fb_oclass;
189 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; 203 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
190 device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass; 204 device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass;
205 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
191 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 206 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
192 device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass; 207 device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
193 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass; 208 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
194 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; 209 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
195 device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass; 210 device->oclass[NVDEV_ENGINE_MPEG ] = &nv44_mpeg_oclass;
196 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; 211 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
212 device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass;
197 break; 213 break;
198 case 0x4b: 214 case 0x4b:
199 device->cname = "G73"; 215 device->cname = "G73";
@@ -203,18 +219,20 @@ nv40_identify(struct nouveau_device *device)
203 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; 219 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
204 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; 220 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
205 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; 221 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
206 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 222 device->oclass[NVDEV_SUBDEV_MC ] = nv40_mc_oclass;
207 device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass; 223 device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
208 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 224 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
209 device->oclass[NVDEV_SUBDEV_FB ] = &nv49_fb_oclass; 225 device->oclass[NVDEV_SUBDEV_FB ] = nv49_fb_oclass;
210 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; 226 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
211 device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass; 227 device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass;
228 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
212 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 229 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
213 device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass; 230 device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
214 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass; 231 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
215 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; 232 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
216 device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass; 233 device->oclass[NVDEV_ENGINE_MPEG ] = &nv44_mpeg_oclass;
217 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; 234 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
235 device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass;
218 break; 236 break;
219 case 0x44: 237 case 0x44:
220 device->cname = "NV44"; 238 device->cname = "NV44";
@@ -224,18 +242,20 @@ nv40_identify(struct nouveau_device *device)
224 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; 242 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
225 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; 243 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
226 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; 244 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
227 device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass; 245 device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass;
228 device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass; 246 device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
229 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 247 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
230 device->oclass[NVDEV_SUBDEV_FB ] = &nv44_fb_oclass; 248 device->oclass[NVDEV_SUBDEV_FB ] = nv44_fb_oclass;
231 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; 249 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
232 device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass; 250 device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
251 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
233 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 252 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
234 device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass; 253 device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
235 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass; 254 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
236 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; 255 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
237 device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass; 256 device->oclass[NVDEV_ENGINE_MPEG ] = &nv44_mpeg_oclass;
238 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; 257 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
258 device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass;
239 break; 259 break;
240 case 0x46: 260 case 0x46:
241 device->cname = "G72"; 261 device->cname = "G72";
@@ -245,18 +265,20 @@ nv40_identify(struct nouveau_device *device)
245 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; 265 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
246 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; 266 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
247 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; 267 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
248 device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass; 268 device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass;
249 device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass; 269 device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
250 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 270 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
251 device->oclass[NVDEV_SUBDEV_FB ] = &nv46_fb_oclass; 271 device->oclass[NVDEV_SUBDEV_FB ] = nv46_fb_oclass;
252 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; 272 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
253 device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass; 273 device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
274 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
254 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 275 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
255 device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass; 276 device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
256 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass; 277 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
257 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; 278 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
258 device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass; 279 device->oclass[NVDEV_ENGINE_MPEG ] = &nv44_mpeg_oclass;
259 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; 280 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
281 device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass;
260 break; 282 break;
261 case 0x4a: 283 case 0x4a:
262 device->cname = "NV44A"; 284 device->cname = "NV44A";
@@ -266,18 +288,20 @@ nv40_identify(struct nouveau_device *device)
266 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; 288 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
267 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; 289 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
268 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; 290 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
269 device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass; 291 device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass;
270 device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass; 292 device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
271 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 293 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
272 device->oclass[NVDEV_SUBDEV_FB ] = &nv44_fb_oclass; 294 device->oclass[NVDEV_SUBDEV_FB ] = nv44_fb_oclass;
273 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; 295 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
274 device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass; 296 device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
297 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
275 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 298 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
276 device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass; 299 device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
277 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass; 300 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
278 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; 301 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
279 device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass; 302 device->oclass[NVDEV_ENGINE_MPEG ] = &nv44_mpeg_oclass;
280 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; 303 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
304 device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass;
281 break; 305 break;
282 case 0x4c: 306 case 0x4c:
283 device->cname = "C61"; 307 device->cname = "C61";
@@ -287,18 +311,20 @@ nv40_identify(struct nouveau_device *device)
287 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; 311 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
288 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; 312 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
289 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; 313 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
290 device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass; 314 device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass;
291 device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass; 315 device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
292 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 316 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
293 device->oclass[NVDEV_SUBDEV_FB ] = &nv46_fb_oclass; 317 device->oclass[NVDEV_SUBDEV_FB ] = nv46_fb_oclass;
294 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; 318 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
295 device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass; 319 device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
320 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
296 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 321 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
297 device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass; 322 device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
298 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass; 323 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
299 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; 324 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
300 device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass; 325 device->oclass[NVDEV_ENGINE_MPEG ] = &nv44_mpeg_oclass;
301 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; 326 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
327 device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass;
302 break; 328 break;
303 case 0x4e: 329 case 0x4e:
304 device->cname = "C51"; 330 device->cname = "C51";
@@ -308,18 +334,20 @@ nv40_identify(struct nouveau_device *device)
308 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; 334 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
309 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; 335 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
310 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; 336 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
311 device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass; 337 device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass;
312 device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass; 338 device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
313 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 339 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
314 device->oclass[NVDEV_SUBDEV_FB ] = &nv4e_fb_oclass; 340 device->oclass[NVDEV_SUBDEV_FB ] = nv4e_fb_oclass;
315 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; 341 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
316 device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass; 342 device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
343 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
317 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 344 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
318 device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass; 345 device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
319 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass; 346 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
320 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; 347 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
321 device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass; 348 device->oclass[NVDEV_ENGINE_MPEG ] = &nv44_mpeg_oclass;
322 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; 349 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
350 device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass;
323 break; 351 break;
324 case 0x63: 352 case 0x63:
325 device->cname = "C73"; 353 device->cname = "C73";
@@ -329,18 +357,20 @@ nv40_identify(struct nouveau_device *device)
329 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; 357 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
330 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; 358 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
331 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; 359 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
332 device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass; 360 device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass;
333 device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass; 361 device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
334 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 362 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
335 device->oclass[NVDEV_SUBDEV_FB ] = &nv46_fb_oclass; 363 device->oclass[NVDEV_SUBDEV_FB ] = nv46_fb_oclass;
336 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; 364 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
337 device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass; 365 device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
366 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
338 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 367 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
339 device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass; 368 device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
340 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass; 369 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
341 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; 370 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
342 device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass; 371 device->oclass[NVDEV_ENGINE_MPEG ] = &nv44_mpeg_oclass;
343 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; 372 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
373 device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass;
344 break; 374 break;
345 case 0x67: 375 case 0x67:
346 device->cname = "C67"; 376 device->cname = "C67";
@@ -350,18 +380,20 @@ nv40_identify(struct nouveau_device *device)
350 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; 380 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
351 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; 381 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
352 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; 382 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
353 device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass; 383 device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass;
354 device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass; 384 device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
355 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 385 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
356 device->oclass[NVDEV_SUBDEV_FB ] = &nv46_fb_oclass; 386 device->oclass[NVDEV_SUBDEV_FB ] = nv46_fb_oclass;
357 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; 387 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
358 device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass; 388 device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
389 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
359 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 390 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
360 device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass; 391 device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
361 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass; 392 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
362 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; 393 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
363 device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass; 394 device->oclass[NVDEV_ENGINE_MPEG ] = &nv44_mpeg_oclass;
364 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; 395 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
396 device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass;
365 break; 397 break;
366 case 0x68: 398 case 0x68:
367 device->cname = "C68"; 399 device->cname = "C68";
@@ -371,18 +403,20 @@ nv40_identify(struct nouveau_device *device)
371 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; 403 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
372 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; 404 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
373 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; 405 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
374 device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass; 406 device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass;
375 device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass; 407 device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
376 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 408 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
377 device->oclass[NVDEV_SUBDEV_FB ] = &nv46_fb_oclass; 409 device->oclass[NVDEV_SUBDEV_FB ] = nv46_fb_oclass;
378 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; 410 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
379 device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass; 411 device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
412 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
380 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 413 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
381 device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass; 414 device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
382 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass; 415 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
383 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; 416 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
384 device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass; 417 device->oclass[NVDEV_ENGINE_MPEG ] = &nv44_mpeg_oclass;
385 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; 418 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
419 device->oclass[NVDEV_ENGINE_PERFMON] = nv40_perfmon_oclass;
386 break; 420 break;
387 default: 421 default:
388 nv_fatal(device, "unknown Curie chipset\n"); 422 nv_fatal(device, "unknown Curie chipset\n");
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv50.c b/drivers/gpu/drm/nouveau/core/engine/device/nv50.c
index ffc18b80c5d9..db139827047c 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv50.c
@@ -36,6 +36,8 @@
36#include <subdev/instmem.h> 36#include <subdev/instmem.h>
37#include <subdev/vm.h> 37#include <subdev/vm.h>
38#include <subdev/bar.h> 38#include <subdev/bar.h>
39#include <subdev/pwr.h>
40#include <subdev/volt.h>
39 41
40#include <engine/device.h> 42#include <engine/device.h>
41#include <engine/dmaobj.h> 43#include <engine/dmaobj.h>
@@ -49,6 +51,7 @@
49#include <engine/ppp.h> 51#include <engine/ppp.h>
50#include <engine/copy.h> 52#include <engine/copy.h>
51#include <engine/disp.h> 53#include <engine/disp.h>
54#include <engine/perfmon.h>
52 55
53int 56int
54nv50_identify(struct nouveau_device *device) 57nv50_identify(struct nouveau_device *device)
@@ -59,257 +62,277 @@ nv50_identify(struct nouveau_device *device)
59 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 62 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
60 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass; 63 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
61 device->oclass[NVDEV_SUBDEV_I2C ] = &nv50_i2c_oclass; 64 device->oclass[NVDEV_SUBDEV_I2C ] = &nv50_i2c_oclass;
62 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass; 65 device->oclass[NVDEV_SUBDEV_CLOCK ] = nv50_clock_oclass;
63 device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass; 66 device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
64 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 67 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
65 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; 68 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
66 device->oclass[NVDEV_SUBDEV_MC ] = &nv50_mc_oclass; 69 device->oclass[NVDEV_SUBDEV_MC ] = nv50_mc_oclass;
67 device->oclass[NVDEV_SUBDEV_BUS ] = &nv50_bus_oclass; 70 device->oclass[NVDEV_SUBDEV_BUS ] = nv50_bus_oclass;
68 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 71 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
69 device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass; 72 device->oclass[NVDEV_SUBDEV_FB ] = nv50_fb_oclass;
70 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; 73 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
71 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; 74 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
72 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; 75 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
76 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
73 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass; 77 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
74 device->oclass[NVDEV_ENGINE_FIFO ] = &nv50_fifo_oclass; 78 device->oclass[NVDEV_ENGINE_FIFO ] = nv50_fifo_oclass;
75 device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass; 79 device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass;
76 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass; 80 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
77 device->oclass[NVDEV_ENGINE_MPEG ] = &nv50_mpeg_oclass; 81 device->oclass[NVDEV_ENGINE_MPEG ] = &nv50_mpeg_oclass;
78 device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass; 82 device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
83 device->oclass[NVDEV_ENGINE_PERFMON] = nv50_perfmon_oclass;
79 break; 84 break;
80 case 0x84: 85 case 0x84:
81 device->cname = "G84"; 86 device->cname = "G84";
82 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 87 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
83 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass; 88 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
84 device->oclass[NVDEV_SUBDEV_I2C ] = &nv50_i2c_oclass; 89 device->oclass[NVDEV_SUBDEV_I2C ] = &nv50_i2c_oclass;
85 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass; 90 device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass;
86 device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass; 91 device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
87 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 92 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
88 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; 93 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
89 device->oclass[NVDEV_SUBDEV_MC ] = &nv50_mc_oclass; 94 device->oclass[NVDEV_SUBDEV_MC ] = nv50_mc_oclass;
90 device->oclass[NVDEV_SUBDEV_BUS ] = &nv50_bus_oclass; 95 device->oclass[NVDEV_SUBDEV_BUS ] = nv50_bus_oclass;
91 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 96 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
92 device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass; 97 device->oclass[NVDEV_SUBDEV_FB ] = nv84_fb_oclass;
93 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; 98 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
94 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; 99 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
95 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; 100 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
101 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
96 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass; 102 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
97 device->oclass[NVDEV_ENGINE_FIFO ] = &nv84_fifo_oclass; 103 device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass;
98 device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass; 104 device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass;
99 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass; 105 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
100 device->oclass[NVDEV_ENGINE_MPEG ] = &nv84_mpeg_oclass; 106 device->oclass[NVDEV_ENGINE_MPEG ] = &nv84_mpeg_oclass;
101 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass; 107 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
102 device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass; 108 device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass;
103 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass; 109 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
104 device->oclass[NVDEV_ENGINE_DISP ] = &nv84_disp_oclass; 110 device->oclass[NVDEV_ENGINE_DISP ] = &nv84_disp_oclass;
111 device->oclass[NVDEV_ENGINE_PERFMON] = nv84_perfmon_oclass;
105 break; 112 break;
106 case 0x86: 113 case 0x86:
107 device->cname = "G86"; 114 device->cname = "G86";
108 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 115 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
109 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass; 116 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
110 device->oclass[NVDEV_SUBDEV_I2C ] = &nv50_i2c_oclass; 117 device->oclass[NVDEV_SUBDEV_I2C ] = &nv50_i2c_oclass;
111 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass; 118 device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass;
112 device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass; 119 device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
113 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 120 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
114 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; 121 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
115 device->oclass[NVDEV_SUBDEV_MC ] = &nv50_mc_oclass; 122 device->oclass[NVDEV_SUBDEV_MC ] = nv50_mc_oclass;
116 device->oclass[NVDEV_SUBDEV_BUS ] = &nv50_bus_oclass; 123 device->oclass[NVDEV_SUBDEV_BUS ] = nv50_bus_oclass;
117 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 124 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
118 device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass; 125 device->oclass[NVDEV_SUBDEV_FB ] = nv84_fb_oclass;
119 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; 126 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
120 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; 127 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
121 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; 128 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
129 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
122 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass; 130 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
123 device->oclass[NVDEV_ENGINE_FIFO ] = &nv84_fifo_oclass; 131 device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass;
124 device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass; 132 device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass;
125 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass; 133 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
126 device->oclass[NVDEV_ENGINE_MPEG ] = &nv84_mpeg_oclass; 134 device->oclass[NVDEV_ENGINE_MPEG ] = &nv84_mpeg_oclass;
127 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass; 135 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
128 device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass; 136 device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass;
129 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass; 137 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
130 device->oclass[NVDEV_ENGINE_DISP ] = &nv84_disp_oclass; 138 device->oclass[NVDEV_ENGINE_DISP ] = &nv84_disp_oclass;
139 device->oclass[NVDEV_ENGINE_PERFMON] = nv84_perfmon_oclass;
131 break; 140 break;
132 case 0x92: 141 case 0x92:
133 device->cname = "G92"; 142 device->cname = "G92";
134 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 143 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
135 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass; 144 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
136 device->oclass[NVDEV_SUBDEV_I2C ] = &nv50_i2c_oclass; 145 device->oclass[NVDEV_SUBDEV_I2C ] = &nv50_i2c_oclass;
137 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass; 146 device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass;
138 device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass; 147 device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
139 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 148 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
140 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; 149 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
141 device->oclass[NVDEV_SUBDEV_MC ] = &nv50_mc_oclass; 150 device->oclass[NVDEV_SUBDEV_MC ] = nv50_mc_oclass;
142 device->oclass[NVDEV_SUBDEV_BUS ] = &nv50_bus_oclass; 151 device->oclass[NVDEV_SUBDEV_BUS ] = nv50_bus_oclass;
143 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 152 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
144 device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass; 153 device->oclass[NVDEV_SUBDEV_FB ] = nv84_fb_oclass;
145 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; 154 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
146 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; 155 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
147 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; 156 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
157 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
148 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass; 158 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
149 device->oclass[NVDEV_ENGINE_FIFO ] = &nv84_fifo_oclass; 159 device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass;
150 device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass; 160 device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass;
151 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass; 161 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
152 device->oclass[NVDEV_ENGINE_MPEG ] = &nv84_mpeg_oclass; 162 device->oclass[NVDEV_ENGINE_MPEG ] = &nv84_mpeg_oclass;
153 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass; 163 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
154 device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass; 164 device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass;
155 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass; 165 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
156 device->oclass[NVDEV_ENGINE_DISP ] = &nv84_disp_oclass; 166 device->oclass[NVDEV_ENGINE_DISP ] = &nv84_disp_oclass;
167 device->oclass[NVDEV_ENGINE_PERFMON] = nv84_perfmon_oclass;
157 break; 168 break;
158 case 0x94: 169 case 0x94:
159 device->cname = "G94"; 170 device->cname = "G94";
160 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 171 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
161 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass; 172 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
162 device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass; 173 device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
163 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass; 174 device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass;
164 device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass; 175 device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
165 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 176 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
166 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; 177 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
167 device->oclass[NVDEV_SUBDEV_MC ] = &nv50_mc_oclass; 178 device->oclass[NVDEV_SUBDEV_MC ] = nv94_mc_oclass;
168 device->oclass[NVDEV_SUBDEV_BUS ] = &nv50_bus_oclass; 179 device->oclass[NVDEV_SUBDEV_BUS ] = nv94_bus_oclass;
169 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 180 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
170 device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass; 181 device->oclass[NVDEV_SUBDEV_FB ] = nv84_fb_oclass;
171 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; 182 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
172 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; 183 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
173 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; 184 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
185 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
174 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass; 186 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
175 device->oclass[NVDEV_ENGINE_FIFO ] = &nv84_fifo_oclass; 187 device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass;
176 device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass; 188 device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass;
177 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass; 189 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
178 device->oclass[NVDEV_ENGINE_MPEG ] = &nv84_mpeg_oclass; 190 device->oclass[NVDEV_ENGINE_MPEG ] = &nv84_mpeg_oclass;
179 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass; 191 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
180 device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass; 192 device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass;
181 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass; 193 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
182 device->oclass[NVDEV_ENGINE_DISP ] = &nv94_disp_oclass; 194 device->oclass[NVDEV_ENGINE_DISP ] = &nv94_disp_oclass;
195 device->oclass[NVDEV_ENGINE_PERFMON] = nv84_perfmon_oclass;
183 break; 196 break;
184 case 0x96: 197 case 0x96:
185 device->cname = "G96"; 198 device->cname = "G96";
186 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 199 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
187 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass; 200 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
188 device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass; 201 device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
189 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass; 202 device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass;
190 device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass; 203 device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
191 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 204 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
192 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; 205 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
193 device->oclass[NVDEV_SUBDEV_MC ] = &nv50_mc_oclass; 206 device->oclass[NVDEV_SUBDEV_MC ] = nv94_mc_oclass;
194 device->oclass[NVDEV_SUBDEV_BUS ] = &nv50_bus_oclass; 207 device->oclass[NVDEV_SUBDEV_BUS ] = nv94_bus_oclass;
195 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 208 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
196 device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass; 209 device->oclass[NVDEV_SUBDEV_FB ] = nv84_fb_oclass;
197 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; 210 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
198 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; 211 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
199 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; 212 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
213 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
200 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass; 214 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
201 device->oclass[NVDEV_ENGINE_FIFO ] = &nv84_fifo_oclass; 215 device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass;
202 device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass; 216 device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass;
203 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass; 217 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
204 device->oclass[NVDEV_ENGINE_MPEG ] = &nv84_mpeg_oclass; 218 device->oclass[NVDEV_ENGINE_MPEG ] = &nv84_mpeg_oclass;
205 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass; 219 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
206 device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass; 220 device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass;
207 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass; 221 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
208 device->oclass[NVDEV_ENGINE_DISP ] = &nv94_disp_oclass; 222 device->oclass[NVDEV_ENGINE_DISP ] = &nv94_disp_oclass;
223 device->oclass[NVDEV_ENGINE_PERFMON] = nv84_perfmon_oclass;
209 break; 224 break;
210 case 0x98: 225 case 0x98:
211 device->cname = "G98"; 226 device->cname = "G98";
212 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 227 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
213 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass; 228 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
214 device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass; 229 device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
215 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass; 230 device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass;
216 device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass; 231 device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
217 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 232 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
218 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; 233 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
219 device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass; 234 device->oclass[NVDEV_SUBDEV_MC ] = nv98_mc_oclass;
220 device->oclass[NVDEV_SUBDEV_BUS ] = &nv50_bus_oclass; 235 device->oclass[NVDEV_SUBDEV_BUS ] = nv94_bus_oclass;
221 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 236 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
222 device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass; 237 device->oclass[NVDEV_SUBDEV_FB ] = nv84_fb_oclass;
223 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; 238 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
224 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; 239 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
225 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; 240 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
241 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
226 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass; 242 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
227 device->oclass[NVDEV_ENGINE_FIFO ] = &nv84_fifo_oclass; 243 device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass;
228 device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass; 244 device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass;
229 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass; 245 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
230 device->oclass[NVDEV_ENGINE_VP ] = &nv98_vp_oclass; 246 device->oclass[NVDEV_ENGINE_VP ] = &nv98_vp_oclass;
231 device->oclass[NVDEV_ENGINE_CRYPT ] = &nv98_crypt_oclass; 247 device->oclass[NVDEV_ENGINE_CRYPT ] = &nv98_crypt_oclass;
232 device->oclass[NVDEV_ENGINE_BSP ] = &nv98_bsp_oclass; 248 device->oclass[NVDEV_ENGINE_BSP ] = &nv98_bsp_oclass;
233 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass; 249 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
234 device->oclass[NVDEV_ENGINE_DISP ] = &nv94_disp_oclass; 250 device->oclass[NVDEV_ENGINE_DISP ] = &nv94_disp_oclass;
251 device->oclass[NVDEV_ENGINE_PERFMON] = nv84_perfmon_oclass;
235 break; 252 break;
236 case 0xa0: 253 case 0xa0:
237 device->cname = "G200"; 254 device->cname = "G200";
238 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 255 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
239 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass; 256 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
240 device->oclass[NVDEV_SUBDEV_I2C ] = &nv50_i2c_oclass; 257 device->oclass[NVDEV_SUBDEV_I2C ] = &nv50_i2c_oclass;
241 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass; 258 device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass;
242 device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass; 259 device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
243 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 260 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
244 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; 261 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
245 device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass; 262 device->oclass[NVDEV_SUBDEV_MC ] = nv98_mc_oclass;
246 device->oclass[NVDEV_SUBDEV_BUS ] = &nv50_bus_oclass; 263 device->oclass[NVDEV_SUBDEV_BUS ] = nv94_bus_oclass;
247 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 264 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
248 device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass; 265 device->oclass[NVDEV_SUBDEV_FB ] = nv84_fb_oclass;
249 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; 266 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
250 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; 267 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
251 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; 268 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
269 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
252 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass; 270 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
253 device->oclass[NVDEV_ENGINE_FIFO ] = &nv84_fifo_oclass; 271 device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass;
254 device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass; 272 device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass;
255 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass; 273 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
256 device->oclass[NVDEV_ENGINE_MPEG ] = &nv84_mpeg_oclass; 274 device->oclass[NVDEV_ENGINE_MPEG ] = &nv84_mpeg_oclass;
257 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass; 275 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
258 device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass; 276 device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass;
259 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass; 277 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
260 device->oclass[NVDEV_ENGINE_DISP ] = &nva0_disp_oclass; 278 device->oclass[NVDEV_ENGINE_DISP ] = &nva0_disp_oclass;
279 device->oclass[NVDEV_ENGINE_PERFMON] = nv84_perfmon_oclass;
261 break; 280 break;
262 case 0xaa: 281 case 0xaa:
263 device->cname = "MCP77/MCP78"; 282 device->cname = "MCP77/MCP78";
264 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 283 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
265 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass; 284 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
266 device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass; 285 device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
267 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass; 286 device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass;
268 device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass; 287 device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
269 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 288 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
270 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; 289 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
271 device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass; 290 device->oclass[NVDEV_SUBDEV_MC ] = nv98_mc_oclass;
272 device->oclass[NVDEV_SUBDEV_BUS ] = &nv50_bus_oclass; 291 device->oclass[NVDEV_SUBDEV_BUS ] = nv94_bus_oclass;
273 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 292 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
274 device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass; 293 device->oclass[NVDEV_SUBDEV_FB ] = nvaa_fb_oclass;
275 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; 294 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
276 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; 295 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
277 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; 296 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
297 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
278 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass; 298 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
279 device->oclass[NVDEV_ENGINE_FIFO ] = &nv84_fifo_oclass; 299 device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass;
280 device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass; 300 device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass;
281 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass; 301 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
282 device->oclass[NVDEV_ENGINE_VP ] = &nv98_vp_oclass; 302 device->oclass[NVDEV_ENGINE_VP ] = &nv98_vp_oclass;
283 device->oclass[NVDEV_ENGINE_CRYPT ] = &nv98_crypt_oclass; 303 device->oclass[NVDEV_ENGINE_CRYPT ] = &nv98_crypt_oclass;
284 device->oclass[NVDEV_ENGINE_BSP ] = &nv98_bsp_oclass; 304 device->oclass[NVDEV_ENGINE_BSP ] = &nv98_bsp_oclass;
285 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass; 305 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
286 device->oclass[NVDEV_ENGINE_DISP ] = &nv94_disp_oclass; 306 device->oclass[NVDEV_ENGINE_DISP ] = &nv94_disp_oclass;
307 device->oclass[NVDEV_ENGINE_PERFMON] = nv84_perfmon_oclass;
287 break; 308 break;
288 case 0xac: 309 case 0xac:
289 device->cname = "MCP79/MCP7A"; 310 device->cname = "MCP79/MCP7A";
290 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 311 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
291 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass; 312 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
292 device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass; 313 device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
293 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass; 314 device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass;
294 device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass; 315 device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
295 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 316 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
296 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; 317 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
297 device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass; 318 device->oclass[NVDEV_SUBDEV_MC ] = nv98_mc_oclass;
298 device->oclass[NVDEV_SUBDEV_BUS ] = &nv50_bus_oclass; 319 device->oclass[NVDEV_SUBDEV_BUS ] = nv94_bus_oclass;
299 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 320 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
300 device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass; 321 device->oclass[NVDEV_SUBDEV_FB ] = nvaa_fb_oclass;
301 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; 322 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
302 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; 323 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
303 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; 324 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
325 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
304 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass; 326 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
305 device->oclass[NVDEV_ENGINE_FIFO ] = &nv84_fifo_oclass; 327 device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass;
306 device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass; 328 device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass;
307 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass; 329 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
308 device->oclass[NVDEV_ENGINE_VP ] = &nv98_vp_oclass; 330 device->oclass[NVDEV_ENGINE_VP ] = &nv98_vp_oclass;
309 device->oclass[NVDEV_ENGINE_CRYPT ] = &nv98_crypt_oclass; 331 device->oclass[NVDEV_ENGINE_CRYPT ] = &nv98_crypt_oclass;
310 device->oclass[NVDEV_ENGINE_BSP ] = &nv98_bsp_oclass; 332 device->oclass[NVDEV_ENGINE_BSP ] = &nv98_bsp_oclass;
311 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass; 333 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
312 device->oclass[NVDEV_ENGINE_DISP ] = &nv94_disp_oclass; 334 device->oclass[NVDEV_ENGINE_DISP ] = &nv94_disp_oclass;
335 device->oclass[NVDEV_ENGINE_PERFMON] = nv84_perfmon_oclass;
313 break; 336 break;
314 case 0xa3: 337 case 0xa3:
315 device->cname = "GT215"; 338 device->cname = "GT215";
@@ -320,16 +343,18 @@ nv50_identify(struct nouveau_device *device)
320 device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass; 343 device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
321 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 344 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
322 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nva3_devinit_oclass; 345 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nva3_devinit_oclass;
323 device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass; 346 device->oclass[NVDEV_SUBDEV_MC ] = nv98_mc_oclass;
324 device->oclass[NVDEV_SUBDEV_BUS ] = &nv50_bus_oclass; 347 device->oclass[NVDEV_SUBDEV_BUS ] = nv94_bus_oclass;
325 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 348 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
326 device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass; 349 device->oclass[NVDEV_SUBDEV_FB ] = nva3_fb_oclass;
327 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; 350 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
328 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; 351 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
329 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; 352 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
353 device->oclass[NVDEV_SUBDEV_PWR ] = &nva3_pwr_oclass;
354 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
330 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass; 355 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
331 device->oclass[NVDEV_ENGINE_FIFO ] = &nv84_fifo_oclass; 356 device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass;
332 device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass; 357 device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass;
333 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass; 358 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
334 device->oclass[NVDEV_ENGINE_MPEG ] = &nv84_mpeg_oclass; 359 device->oclass[NVDEV_ENGINE_MPEG ] = &nv84_mpeg_oclass;
335 device->oclass[NVDEV_ENGINE_VP ] = &nv98_vp_oclass; 360 device->oclass[NVDEV_ENGINE_VP ] = &nv98_vp_oclass;
@@ -337,6 +362,7 @@ nv50_identify(struct nouveau_device *device)
337 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass; 362 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
338 device->oclass[NVDEV_ENGINE_COPY0 ] = &nva3_copy_oclass; 363 device->oclass[NVDEV_ENGINE_COPY0 ] = &nva3_copy_oclass;
339 device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass; 364 device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass;
365 device->oclass[NVDEV_ENGINE_PERFMON] = nva3_perfmon_oclass;
340 break; 366 break;
341 case 0xa5: 367 case 0xa5:
342 device->cname = "GT216"; 368 device->cname = "GT216";
@@ -347,22 +373,25 @@ nv50_identify(struct nouveau_device *device)
347 device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass; 373 device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
348 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 374 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
349 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nva3_devinit_oclass; 375 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nva3_devinit_oclass;
350 device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass; 376 device->oclass[NVDEV_SUBDEV_MC ] = nv98_mc_oclass;
351 device->oclass[NVDEV_SUBDEV_BUS ] = &nv50_bus_oclass; 377 device->oclass[NVDEV_SUBDEV_BUS ] = nv94_bus_oclass;
352 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 378 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
353 device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass; 379 device->oclass[NVDEV_SUBDEV_FB ] = nva3_fb_oclass;
354 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; 380 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
355 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; 381 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
356 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; 382 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
383 device->oclass[NVDEV_SUBDEV_PWR ] = &nva3_pwr_oclass;
384 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
357 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass; 385 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
358 device->oclass[NVDEV_ENGINE_FIFO ] = &nv84_fifo_oclass; 386 device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass;
359 device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass; 387 device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass;
360 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass; 388 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
361 device->oclass[NVDEV_ENGINE_VP ] = &nv98_vp_oclass; 389 device->oclass[NVDEV_ENGINE_VP ] = &nv98_vp_oclass;
362 device->oclass[NVDEV_ENGINE_BSP ] = &nv98_bsp_oclass; 390 device->oclass[NVDEV_ENGINE_BSP ] = &nv98_bsp_oclass;
363 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass; 391 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
364 device->oclass[NVDEV_ENGINE_COPY0 ] = &nva3_copy_oclass; 392 device->oclass[NVDEV_ENGINE_COPY0 ] = &nva3_copy_oclass;
365 device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass; 393 device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass;
394 device->oclass[NVDEV_ENGINE_PERFMON] = nva3_perfmon_oclass;
366 break; 395 break;
367 case 0xa8: 396 case 0xa8:
368 device->cname = "GT218"; 397 device->cname = "GT218";
@@ -373,22 +402,25 @@ nv50_identify(struct nouveau_device *device)
373 device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass; 402 device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
374 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 403 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
375 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nva3_devinit_oclass; 404 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nva3_devinit_oclass;
376 device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass; 405 device->oclass[NVDEV_SUBDEV_MC ] = nv98_mc_oclass;
377 device->oclass[NVDEV_SUBDEV_BUS ] = &nv50_bus_oclass; 406 device->oclass[NVDEV_SUBDEV_BUS ] = nv94_bus_oclass;
378 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 407 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
379 device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass; 408 device->oclass[NVDEV_SUBDEV_FB ] = nva3_fb_oclass;
380 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; 409 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
381 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; 410 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
382 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; 411 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
412 device->oclass[NVDEV_SUBDEV_PWR ] = &nva3_pwr_oclass;
413 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
383 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass; 414 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
384 device->oclass[NVDEV_ENGINE_FIFO ] = &nv84_fifo_oclass; 415 device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass;
385 device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass; 416 device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass;
386 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass; 417 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
387 device->oclass[NVDEV_ENGINE_VP ] = &nv98_vp_oclass; 418 device->oclass[NVDEV_ENGINE_VP ] = &nv98_vp_oclass;
388 device->oclass[NVDEV_ENGINE_BSP ] = &nv98_bsp_oclass; 419 device->oclass[NVDEV_ENGINE_BSP ] = &nv98_bsp_oclass;
389 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass; 420 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
390 device->oclass[NVDEV_ENGINE_COPY0 ] = &nva3_copy_oclass; 421 device->oclass[NVDEV_ENGINE_COPY0 ] = &nva3_copy_oclass;
391 device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass; 422 device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass;
423 device->oclass[NVDEV_ENGINE_PERFMON] = nva3_perfmon_oclass;
392 break; 424 break;
393 case 0xaf: 425 case 0xaf:
394 device->cname = "MCP89"; 426 device->cname = "MCP89";
@@ -399,22 +431,25 @@ nv50_identify(struct nouveau_device *device)
399 device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass; 431 device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
400 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 432 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
401 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nva3_devinit_oclass; 433 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nva3_devinit_oclass;
402 device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass; 434 device->oclass[NVDEV_SUBDEV_MC ] = nv98_mc_oclass;
403 device->oclass[NVDEV_SUBDEV_BUS ] = &nv50_bus_oclass; 435 device->oclass[NVDEV_SUBDEV_BUS ] = nv94_bus_oclass;
404 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 436 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
405 device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass; 437 device->oclass[NVDEV_SUBDEV_FB ] = nvaf_fb_oclass;
406 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; 438 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
407 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; 439 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
408 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; 440 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
441 device->oclass[NVDEV_SUBDEV_PWR ] = &nva3_pwr_oclass;
442 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
409 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass; 443 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
410 device->oclass[NVDEV_ENGINE_FIFO ] = &nv84_fifo_oclass; 444 device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass;
411 device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass; 445 device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass;
412 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass; 446 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
413 device->oclass[NVDEV_ENGINE_VP ] = &nv98_vp_oclass; 447 device->oclass[NVDEV_ENGINE_VP ] = &nv98_vp_oclass;
414 device->oclass[NVDEV_ENGINE_BSP ] = &nv98_bsp_oclass; 448 device->oclass[NVDEV_ENGINE_BSP ] = &nv98_bsp_oclass;
415 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass; 449 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
416 device->oclass[NVDEV_ENGINE_COPY0 ] = &nva3_copy_oclass; 450 device->oclass[NVDEV_ENGINE_COPY0 ] = &nva3_copy_oclass;
417 device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass; 451 device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass;
452 device->oclass[NVDEV_ENGINE_PERFMON] = nva3_perfmon_oclass;
418 break; 453 break;
419 default: 454 default:
420 nv_fatal(device, "unknown Tesla chipset\n"); 455 nv_fatal(device, "unknown Tesla chipset\n");
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
index 418f51f50d7a..606598f226fc 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
@@ -38,6 +38,8 @@
38#include <subdev/instmem.h> 38#include <subdev/instmem.h>
39#include <subdev/vm.h> 39#include <subdev/vm.h>
40#include <subdev/bar.h> 40#include <subdev/bar.h>
41#include <subdev/pwr.h>
42#include <subdev/volt.h>
41 43
42#include <engine/device.h> 44#include <engine/device.h>
43#include <engine/dmaobj.h> 45#include <engine/dmaobj.h>
@@ -49,6 +51,7 @@
49#include <engine/ppp.h> 51#include <engine/ppp.h>
50#include <engine/copy.h> 52#include <engine/copy.h>
51#include <engine/disp.h> 53#include <engine/disp.h>
54#include <engine/perfmon.h>
52 55
53int 56int
54nvc0_identify(struct nouveau_device *device) 57nvc0_identify(struct nouveau_device *device)
@@ -63,18 +66,20 @@ nvc0_identify(struct nouveau_device *device)
63 device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass; 66 device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
64 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 67 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
65 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass; 68 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
66 device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass; 69 device->oclass[NVDEV_SUBDEV_MC ] = nvc0_mc_oclass;
67 device->oclass[NVDEV_SUBDEV_BUS ] = &nvc0_bus_oclass; 70 device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
68 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 71 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
69 device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass; 72 device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass;
70 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass; 73 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
71 device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass; 74 device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
72 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; 75 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
73 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; 76 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
74 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; 77 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
78 device->oclass[NVDEV_SUBDEV_PWR ] = &nvc0_pwr_oclass;
79 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
75 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass; 80 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
76 device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass; 81 device->oclass[NVDEV_ENGINE_FIFO ] = nvc0_fifo_oclass;
77 device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass; 82 device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
78 device->oclass[NVDEV_ENGINE_GR ] = nvc0_graph_oclass; 83 device->oclass[NVDEV_ENGINE_GR ] = nvc0_graph_oclass;
79 device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass; 84 device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass;
80 device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass; 85 device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
@@ -82,6 +87,7 @@ nvc0_identify(struct nouveau_device *device)
82 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass; 87 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
83 device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass; 88 device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
84 device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass; 89 device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass;
90 device->oclass[NVDEV_ENGINE_PERFMON] = &nvc0_perfmon_oclass;
85 break; 91 break;
86 case 0xc4: 92 case 0xc4:
87 device->cname = "GF104"; 93 device->cname = "GF104";
@@ -92,18 +98,20 @@ nvc0_identify(struct nouveau_device *device)
92 device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass; 98 device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
93 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 99 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
94 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass; 100 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
95 device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass; 101 device->oclass[NVDEV_SUBDEV_MC ] = nvc0_mc_oclass;
96 device->oclass[NVDEV_SUBDEV_BUS ] = &nvc0_bus_oclass; 102 device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
97 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 103 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
98 device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass; 104 device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass;
99 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass; 105 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
100 device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass; 106 device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
101 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; 107 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
102 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; 108 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
103 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; 109 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
110 device->oclass[NVDEV_SUBDEV_PWR ] = &nvc0_pwr_oclass;
111 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
104 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass; 112 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
105 device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass; 113 device->oclass[NVDEV_ENGINE_FIFO ] = nvc0_fifo_oclass;
106 device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass; 114 device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
107 device->oclass[NVDEV_ENGINE_GR ] = nvc3_graph_oclass; 115 device->oclass[NVDEV_ENGINE_GR ] = nvc3_graph_oclass;
108 device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass; 116 device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass;
109 device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass; 117 device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
@@ -111,6 +119,7 @@ nvc0_identify(struct nouveau_device *device)
111 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass; 119 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
112 device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass; 120 device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
113 device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass; 121 device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass;
122 device->oclass[NVDEV_ENGINE_PERFMON] = &nvc0_perfmon_oclass;
114 break; 123 break;
115 case 0xc3: 124 case 0xc3:
116 device->cname = "GF106"; 125 device->cname = "GF106";
@@ -121,24 +130,27 @@ nvc0_identify(struct nouveau_device *device)
121 device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass; 130 device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
122 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 131 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
123 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass; 132 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
124 device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass; 133 device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass;
125 device->oclass[NVDEV_SUBDEV_BUS ] = &nvc0_bus_oclass; 134 device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
126 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 135 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
127 device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass; 136 device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass;
128 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass; 137 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
129 device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass; 138 device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
130 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; 139 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
131 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; 140 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
132 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; 141 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
142 device->oclass[NVDEV_SUBDEV_PWR ] = &nvc0_pwr_oclass;
143 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
133 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass; 144 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
134 device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass; 145 device->oclass[NVDEV_ENGINE_FIFO ] = nvc0_fifo_oclass;
135 device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass; 146 device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
136 device->oclass[NVDEV_ENGINE_GR ] = nvc3_graph_oclass; 147 device->oclass[NVDEV_ENGINE_GR ] = nvc3_graph_oclass;
137 device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass; 148 device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass;
138 device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass; 149 device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
139 device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass; 150 device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
140 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass; 151 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
141 device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass; 152 device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass;
153 device->oclass[NVDEV_ENGINE_PERFMON] = &nvc0_perfmon_oclass;
142 break; 154 break;
143 case 0xce: 155 case 0xce:
144 device->cname = "GF114"; 156 device->cname = "GF114";
@@ -149,18 +161,20 @@ nvc0_identify(struct nouveau_device *device)
149 device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass; 161 device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
150 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 162 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
151 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass; 163 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
152 device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass; 164 device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass;
153 device->oclass[NVDEV_SUBDEV_BUS ] = &nvc0_bus_oclass; 165 device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
154 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 166 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
155 device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass; 167 device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass;
156 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass; 168 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
157 device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass; 169 device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
158 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; 170 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
159 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; 171 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
160 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; 172 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
173 device->oclass[NVDEV_SUBDEV_PWR ] = &nvc0_pwr_oclass;
174 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
161 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass; 175 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
162 device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass; 176 device->oclass[NVDEV_ENGINE_FIFO ] = nvc0_fifo_oclass;
163 device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass; 177 device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
164 device->oclass[NVDEV_ENGINE_GR ] = nvc3_graph_oclass; 178 device->oclass[NVDEV_ENGINE_GR ] = nvc3_graph_oclass;
165 device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass; 179 device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass;
166 device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass; 180 device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
@@ -168,6 +182,7 @@ nvc0_identify(struct nouveau_device *device)
168 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass; 182 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
169 device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass; 183 device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
170 device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass; 184 device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass;
185 device->oclass[NVDEV_ENGINE_PERFMON] = &nvc0_perfmon_oclass;
171 break; 186 break;
172 case 0xcf: 187 case 0xcf:
173 device->cname = "GF116"; 188 device->cname = "GF116";
@@ -178,18 +193,20 @@ nvc0_identify(struct nouveau_device *device)
178 device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass; 193 device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
179 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 194 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
180 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass; 195 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
181 device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass; 196 device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass;
182 device->oclass[NVDEV_SUBDEV_BUS ] = &nvc0_bus_oclass; 197 device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
183 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 198 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
184 device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass; 199 device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass;
185 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass; 200 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
186 device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass; 201 device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
187 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; 202 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
188 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; 203 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
189 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; 204 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
205 device->oclass[NVDEV_SUBDEV_PWR ] = &nvc0_pwr_oclass;
206 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
190 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass; 207 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
191 device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass; 208 device->oclass[NVDEV_ENGINE_FIFO ] = nvc0_fifo_oclass;
192 device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass; 209 device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
193 device->oclass[NVDEV_ENGINE_GR ] = nvc3_graph_oclass; 210 device->oclass[NVDEV_ENGINE_GR ] = nvc3_graph_oclass;
194 device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass; 211 device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass;
195 device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass; 212 device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
@@ -197,6 +214,7 @@ nvc0_identify(struct nouveau_device *device)
197 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass; 214 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
198 device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass; 215 device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
199 device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass; 216 device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass;
217 device->oclass[NVDEV_ENGINE_PERFMON] = &nvc0_perfmon_oclass;
200 break; 218 break;
201 case 0xc1: 219 case 0xc1:
202 device->cname = "GF108"; 220 device->cname = "GF108";
@@ -207,24 +225,27 @@ nvc0_identify(struct nouveau_device *device)
207 device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass; 225 device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
208 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 226 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
209 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass; 227 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
210 device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass; 228 device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass;
211 device->oclass[NVDEV_SUBDEV_BUS ] = &nvc0_bus_oclass; 229 device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
212 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 230 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
213 device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass; 231 device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass;
214 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass; 232 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
215 device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass; 233 device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
216 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; 234 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
217 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; 235 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
218 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; 236 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
237 device->oclass[NVDEV_SUBDEV_PWR ] = &nvc0_pwr_oclass;
238 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
219 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass; 239 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
220 device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass; 240 device->oclass[NVDEV_ENGINE_FIFO ] = nvc0_fifo_oclass;
221 device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass; 241 device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
222 device->oclass[NVDEV_ENGINE_GR ] = nvc1_graph_oclass; 242 device->oclass[NVDEV_ENGINE_GR ] = nvc1_graph_oclass;
223 device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass; 243 device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass;
224 device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass; 244 device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
225 device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass; 245 device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
226 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass; 246 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
227 device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass; 247 device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass;
248 device->oclass[NVDEV_ENGINE_PERFMON] = &nvc0_perfmon_oclass;
228 break; 249 break;
229 case 0xc8: 250 case 0xc8:
230 device->cname = "GF110"; 251 device->cname = "GF110";
@@ -235,18 +256,20 @@ nvc0_identify(struct nouveau_device *device)
235 device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass; 256 device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
236 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 257 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
237 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass; 258 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
238 device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass; 259 device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass;
239 device->oclass[NVDEV_SUBDEV_BUS ] = &nvc0_bus_oclass; 260 device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
240 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 261 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
241 device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass; 262 device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass;
242 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass; 263 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
243 device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass; 264 device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
244 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; 265 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
245 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; 266 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
246 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; 267 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
268 device->oclass[NVDEV_SUBDEV_PWR ] = &nvc0_pwr_oclass;
269 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
247 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass; 270 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
248 device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass; 271 device->oclass[NVDEV_ENGINE_FIFO ] = nvc0_fifo_oclass;
249 device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass; 272 device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
250 device->oclass[NVDEV_ENGINE_GR ] = nvc8_graph_oclass; 273 device->oclass[NVDEV_ENGINE_GR ] = nvc8_graph_oclass;
251 device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass; 274 device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass;
252 device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass; 275 device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
@@ -254,6 +277,7 @@ nvc0_identify(struct nouveau_device *device)
254 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass; 277 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
255 device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass; 278 device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
256 device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass; 279 device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass;
280 device->oclass[NVDEV_ENGINE_PERFMON] = &nvc0_perfmon_oclass;
257 break; 281 break;
258 case 0xd9: 282 case 0xd9:
259 device->cname = "GF119"; 283 device->cname = "GF119";
@@ -264,24 +288,27 @@ nvc0_identify(struct nouveau_device *device)
264 device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass; 288 device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass;
265 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 289 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
266 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass; 290 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
267 device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass; 291 device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass;
268 device->oclass[NVDEV_SUBDEV_BUS ] = &nvc0_bus_oclass; 292 device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
269 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 293 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
270 device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass; 294 device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass;
271 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass; 295 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
272 device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass; 296 device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
273 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; 297 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
274 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; 298 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
275 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; 299 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
300 device->oclass[NVDEV_SUBDEV_PWR ] = &nvd0_pwr_oclass;
301 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
276 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass; 302 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
277 device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass; 303 device->oclass[NVDEV_ENGINE_FIFO ] = nvc0_fifo_oclass;
278 device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass; 304 device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
279 device->oclass[NVDEV_ENGINE_GR ] = nvd9_graph_oclass; 305 device->oclass[NVDEV_ENGINE_GR ] = nvd9_graph_oclass;
280 device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass; 306 device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass;
281 device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass; 307 device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
282 device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass; 308 device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
283 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass; 309 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
284 device->oclass[NVDEV_ENGINE_DISP ] = &nvd0_disp_oclass; 310 device->oclass[NVDEV_ENGINE_DISP ] = &nvd0_disp_oclass;
311 device->oclass[NVDEV_ENGINE_PERFMON] = &nvc0_perfmon_oclass;
285 break; 312 break;
286 case 0xd7: 313 case 0xd7:
287 device->cname = "GF117"; 314 device->cname = "GF117";
@@ -292,24 +319,25 @@ nvc0_identify(struct nouveau_device *device)
292 device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass; 319 device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass;
293 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 320 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
294 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass; 321 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
295 device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass; 322 device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass;
296 device->oclass[NVDEV_SUBDEV_BUS ] = &nvc0_bus_oclass; 323 device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
297 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 324 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
298 device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass; 325 device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass;
299 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass; 326 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
300 device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass; 327 device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
301 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; 328 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
302 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; 329 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
303 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; 330 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
304 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass; 331 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
305 device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass; 332 device->oclass[NVDEV_ENGINE_FIFO ] = nvc0_fifo_oclass;
306 device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass; 333 device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
307 device->oclass[NVDEV_ENGINE_GR ] = nvd7_graph_oclass; 334 device->oclass[NVDEV_ENGINE_GR ] = nvd7_graph_oclass;
308 device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass; 335 device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass;
309 device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass; 336 device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
310 device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass; 337 device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
311 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass; 338 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
312 device->oclass[NVDEV_ENGINE_DISP ] = &nvd0_disp_oclass; 339 device->oclass[NVDEV_ENGINE_DISP ] = &nvd0_disp_oclass;
340 device->oclass[NVDEV_ENGINE_PERFMON] = &nvc0_perfmon_oclass;
313 break; 341 break;
314 default: 342 default:
315 nv_fatal(device, "unknown Fermi chipset\n"); 343 nv_fatal(device, "unknown Fermi chipset\n");
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nve0.c b/drivers/gpu/drm/nouveau/core/engine/device/nve0.c
index 7aca1877add4..3900104976fc 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nve0.c
@@ -38,6 +38,8 @@
38#include <subdev/instmem.h> 38#include <subdev/instmem.h>
39#include <subdev/vm.h> 39#include <subdev/vm.h>
40#include <subdev/bar.h> 40#include <subdev/bar.h>
41#include <subdev/pwr.h>
42#include <subdev/volt.h>
41 43
42#include <engine/device.h> 44#include <engine/device.h>
43#include <engine/dmaobj.h> 45#include <engine/dmaobj.h>
@@ -49,6 +51,7 @@
49#include <engine/bsp.h> 51#include <engine/bsp.h>
50#include <engine/vp.h> 52#include <engine/vp.h>
51#include <engine/ppp.h> 53#include <engine/ppp.h>
54#include <engine/perfmon.h>
52 55
53int 56int
54nve0_identify(struct nouveau_device *device) 57nve0_identify(struct nouveau_device *device)
@@ -59,22 +62,24 @@ nve0_identify(struct nouveau_device *device)
59 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 62 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
60 device->oclass[NVDEV_SUBDEV_GPIO ] = &nve0_gpio_oclass; 63 device->oclass[NVDEV_SUBDEV_GPIO ] = &nve0_gpio_oclass;
61 device->oclass[NVDEV_SUBDEV_I2C ] = &nvd0_i2c_oclass; 64 device->oclass[NVDEV_SUBDEV_I2C ] = &nvd0_i2c_oclass;
62 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass; 65 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nve0_clock_oclass;
63 device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass; 66 device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass;
64 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 67 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
65 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass; 68 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
66 device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass; 69 device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass;
67 device->oclass[NVDEV_SUBDEV_BUS ] = &nvc0_bus_oclass; 70 device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
68 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 71 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
69 device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass; 72 device->oclass[NVDEV_SUBDEV_FB ] = nve0_fb_oclass;
70 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass; 73 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
71 device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass; 74 device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass;
72 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; 75 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
73 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; 76 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
74 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; 77 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
78 device->oclass[NVDEV_SUBDEV_PWR ] = &nvd0_pwr_oclass;
79 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
75 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass; 80 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
76 device->oclass[NVDEV_ENGINE_FIFO ] = &nve0_fifo_oclass; 81 device->oclass[NVDEV_ENGINE_FIFO ] = nve0_fifo_oclass;
77 device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass; 82 device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
78 device->oclass[NVDEV_ENGINE_GR ] = nve4_graph_oclass; 83 device->oclass[NVDEV_ENGINE_GR ] = nve4_graph_oclass;
79 device->oclass[NVDEV_ENGINE_DISP ] = &nve0_disp_oclass; 84 device->oclass[NVDEV_ENGINE_DISP ] = &nve0_disp_oclass;
80 device->oclass[NVDEV_ENGINE_COPY0 ] = &nve0_copy0_oclass; 85 device->oclass[NVDEV_ENGINE_COPY0 ] = &nve0_copy0_oclass;
@@ -83,28 +88,31 @@ nve0_identify(struct nouveau_device *device)
83 device->oclass[NVDEV_ENGINE_BSP ] = &nve0_bsp_oclass; 88 device->oclass[NVDEV_ENGINE_BSP ] = &nve0_bsp_oclass;
84 device->oclass[NVDEV_ENGINE_VP ] = &nve0_vp_oclass; 89 device->oclass[NVDEV_ENGINE_VP ] = &nve0_vp_oclass;
85 device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass; 90 device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
91 device->oclass[NVDEV_ENGINE_PERFMON] = &nve0_perfmon_oclass;
86 break; 92 break;
87 case 0xe7: 93 case 0xe7:
88 device->cname = "GK107"; 94 device->cname = "GK107";
89 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 95 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
90 device->oclass[NVDEV_SUBDEV_GPIO ] = &nve0_gpio_oclass; 96 device->oclass[NVDEV_SUBDEV_GPIO ] = &nve0_gpio_oclass;
91 device->oclass[NVDEV_SUBDEV_I2C ] = &nvd0_i2c_oclass; 97 device->oclass[NVDEV_SUBDEV_I2C ] = &nvd0_i2c_oclass;
92 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass; 98 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nve0_clock_oclass;
93 device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass; 99 device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass;
94 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 100 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
95 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass; 101 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
96 device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass; 102 device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass;
97 device->oclass[NVDEV_SUBDEV_BUS ] = &nvc0_bus_oclass; 103 device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
98 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 104 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
99 device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass; 105 device->oclass[NVDEV_SUBDEV_FB ] = nve0_fb_oclass;
100 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass; 106 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
101 device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass; 107 device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass;
102 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; 108 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
103 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; 109 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
104 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; 110 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
111 device->oclass[NVDEV_SUBDEV_PWR ] = &nvd0_pwr_oclass;
112 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
105 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass; 113 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
106 device->oclass[NVDEV_ENGINE_FIFO ] = &nve0_fifo_oclass; 114 device->oclass[NVDEV_ENGINE_FIFO ] = nve0_fifo_oclass;
107 device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass; 115 device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
108 device->oclass[NVDEV_ENGINE_GR ] = nve4_graph_oclass; 116 device->oclass[NVDEV_ENGINE_GR ] = nve4_graph_oclass;
109 device->oclass[NVDEV_ENGINE_DISP ] = &nve0_disp_oclass; 117 device->oclass[NVDEV_ENGINE_DISP ] = &nve0_disp_oclass;
110 device->oclass[NVDEV_ENGINE_COPY0 ] = &nve0_copy0_oclass; 118 device->oclass[NVDEV_ENGINE_COPY0 ] = &nve0_copy0_oclass;
@@ -113,28 +121,31 @@ nve0_identify(struct nouveau_device *device)
113 device->oclass[NVDEV_ENGINE_BSP ] = &nve0_bsp_oclass; 121 device->oclass[NVDEV_ENGINE_BSP ] = &nve0_bsp_oclass;
114 device->oclass[NVDEV_ENGINE_VP ] = &nve0_vp_oclass; 122 device->oclass[NVDEV_ENGINE_VP ] = &nve0_vp_oclass;
115 device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass; 123 device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
124 device->oclass[NVDEV_ENGINE_PERFMON] = &nve0_perfmon_oclass;
116 break; 125 break;
117 case 0xe6: 126 case 0xe6:
118 device->cname = "GK106"; 127 device->cname = "GK106";
119 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 128 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
120 device->oclass[NVDEV_SUBDEV_GPIO ] = &nve0_gpio_oclass; 129 device->oclass[NVDEV_SUBDEV_GPIO ] = &nve0_gpio_oclass;
121 device->oclass[NVDEV_SUBDEV_I2C ] = &nvd0_i2c_oclass; 130 device->oclass[NVDEV_SUBDEV_I2C ] = &nvd0_i2c_oclass;
122 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass; 131 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nve0_clock_oclass;
123 device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass; 132 device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass;
124 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 133 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
125 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass; 134 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
126 device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass; 135 device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass;
127 device->oclass[NVDEV_SUBDEV_BUS ] = &nvc0_bus_oclass; 136 device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
128 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 137 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
129 device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass; 138 device->oclass[NVDEV_SUBDEV_FB ] = nve0_fb_oclass;
130 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass; 139 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
131 device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass; 140 device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass;
132 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; 141 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
133 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; 142 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
134 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; 143 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
144 device->oclass[NVDEV_SUBDEV_PWR ] = &nvd0_pwr_oclass;
145 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
135 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass; 146 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
136 device->oclass[NVDEV_ENGINE_FIFO ] = &nve0_fifo_oclass; 147 device->oclass[NVDEV_ENGINE_FIFO ] = nve0_fifo_oclass;
137 device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass; 148 device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
138 device->oclass[NVDEV_ENGINE_GR ] = nve4_graph_oclass; 149 device->oclass[NVDEV_ENGINE_GR ] = nve4_graph_oclass;
139 device->oclass[NVDEV_ENGINE_DISP ] = &nve0_disp_oclass; 150 device->oclass[NVDEV_ENGINE_DISP ] = &nve0_disp_oclass;
140 device->oclass[NVDEV_ENGINE_COPY0 ] = &nve0_copy0_oclass; 151 device->oclass[NVDEV_ENGINE_COPY0 ] = &nve0_copy0_oclass;
@@ -143,28 +154,31 @@ nve0_identify(struct nouveau_device *device)
143 device->oclass[NVDEV_ENGINE_BSP ] = &nve0_bsp_oclass; 154 device->oclass[NVDEV_ENGINE_BSP ] = &nve0_bsp_oclass;
144 device->oclass[NVDEV_ENGINE_VP ] = &nve0_vp_oclass; 155 device->oclass[NVDEV_ENGINE_VP ] = &nve0_vp_oclass;
145 device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass; 156 device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
157 device->oclass[NVDEV_ENGINE_PERFMON] = &nve0_perfmon_oclass;
146 break; 158 break;
147 case 0xf0: 159 case 0xf0:
148 device->cname = "GK110"; 160 device->cname = "GK110";
149 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 161 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
150 device->oclass[NVDEV_SUBDEV_GPIO ] = &nve0_gpio_oclass; 162 device->oclass[NVDEV_SUBDEV_GPIO ] = &nve0_gpio_oclass;
151 device->oclass[NVDEV_SUBDEV_I2C ] = &nvd0_i2c_oclass; 163 device->oclass[NVDEV_SUBDEV_I2C ] = &nvd0_i2c_oclass;
152 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass; 164 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nve0_clock_oclass;
153 device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass; 165 device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass;
154 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 166 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
155 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass; 167 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
156 device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass; 168 device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass;
157 device->oclass[NVDEV_SUBDEV_BUS ] = &nvc0_bus_oclass; 169 device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
158 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 170 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
159 device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass; 171 device->oclass[NVDEV_SUBDEV_FB ] = nve0_fb_oclass;
160 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass; 172 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
161 device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass; 173 device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass;
162 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; 174 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
163 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; 175 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
164 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; 176 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
177 device->oclass[NVDEV_SUBDEV_PWR ] = &nvd0_pwr_oclass;
178 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
165 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass; 179 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
166 device->oclass[NVDEV_ENGINE_FIFO ] = &nve0_fifo_oclass; 180 device->oclass[NVDEV_ENGINE_FIFO ] = nve0_fifo_oclass;
167 device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass; 181 device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
168 device->oclass[NVDEV_ENGINE_GR ] = nvf0_graph_oclass; 182 device->oclass[NVDEV_ENGINE_GR ] = nvf0_graph_oclass;
169 device->oclass[NVDEV_ENGINE_DISP ] = &nvf0_disp_oclass; 183 device->oclass[NVDEV_ENGINE_DISP ] = &nvf0_disp_oclass;
170 device->oclass[NVDEV_ENGINE_COPY0 ] = &nve0_copy0_oclass; 184 device->oclass[NVDEV_ENGINE_COPY0 ] = &nve0_copy0_oclass;
@@ -175,6 +189,43 @@ nve0_identify(struct nouveau_device *device)
175 device->oclass[NVDEV_ENGINE_VP ] = &nve0_vp_oclass; 189 device->oclass[NVDEV_ENGINE_VP ] = &nve0_vp_oclass;
176 device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass; 190 device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
177#endif 191#endif
192 device->oclass[NVDEV_ENGINE_PERFMON] = &nvf0_perfmon_oclass;
193 break;
194 case 0x108:
195 device->cname = "GK208";
196 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
197 device->oclass[NVDEV_SUBDEV_GPIO ] = &nve0_gpio_oclass;
198 device->oclass[NVDEV_SUBDEV_I2C ] = &nvd0_i2c_oclass;
199 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nve0_clock_oclass;
200 device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass;
201 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
202 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
203 device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass;
204 device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
205 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
206 device->oclass[NVDEV_SUBDEV_FB ] = nve0_fb_oclass;
207 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
208 device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass;
209 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
210 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
211 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
212 device->oclass[NVDEV_SUBDEV_PWR ] = &nv108_pwr_oclass;
213 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
214 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
215#if 0
216 device->oclass[NVDEV_ENGINE_FIFO ] = nve0_fifo_oclass;
217 device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
218 device->oclass[NVDEV_ENGINE_GR ] = nvf0_graph_oclass;
219#endif
220 device->oclass[NVDEV_ENGINE_DISP ] = &nvf0_disp_oclass;
221#if 0
222 device->oclass[NVDEV_ENGINE_COPY0 ] = &nve0_copy0_oclass;
223 device->oclass[NVDEV_ENGINE_COPY1 ] = &nve0_copy1_oclass;
224 device->oclass[NVDEV_ENGINE_COPY2 ] = &nve0_copy2_oclass;
225 device->oclass[NVDEV_ENGINE_BSP ] = &nve0_bsp_oclass;
226 device->oclass[NVDEV_ENGINE_VP ] = &nve0_vp_oclass;
227 device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
228#endif
178 break; 229 break;
179 default: 230 default:
180 nv_fatal(device, "unknown Kepler chipset\n"); 231 nv_fatal(device, "unknown Kepler chipset\n");
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/priv.h b/drivers/gpu/drm/nouveau/core/engine/device/priv.h
new file mode 100644
index 000000000000..035fd5b9cfc3
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/device/priv.h
@@ -0,0 +1,8 @@
1#ifndef __NVKM_DEVICE_PRIV_H__
2#define __NVKM_DEVICE_PRIV_H__
3
4#include <engine/device.h>
5
6extern struct nouveau_oclass nouveau_control_oclass[];
7
8#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/dport.c b/drivers/gpu/drm/nouveau/core/engine/disp/dport.c
index 054d9cff4f53..1bd4c63369c1 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/dport.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/dport.c
@@ -70,17 +70,10 @@ dp_set_link_config(struct dp_state *dp)
70 }; 70 };
71 u32 lnkcmp; 71 u32 lnkcmp;
72 u8 sink[2]; 72 u8 sink[2];
73 int ret;
73 74
74 DBG("%d lanes at %d KB/s\n", dp->link_nr, dp->link_bw); 75 DBG("%d lanes at %d KB/s\n", dp->link_nr, dp->link_bw);
75 76
76 /* set desired link configuration on the sink */
77 sink[0] = dp->link_bw / 27000;
78 sink[1] = dp->link_nr;
79 if (dp->dpcd[DPCD_RC02] & DPCD_RC02_ENHANCED_FRAME_CAP)
80 sink[1] |= DPCD_LC01_ENHANCED_FRAME_EN;
81
82 nv_wraux(dp->aux, DPCD_LC00, sink, 2);
83
84 /* set desired link configuration on the source */ 77 /* set desired link configuration on the source */
85 if ((lnkcmp = dp->info.lnkcmp)) { 78 if ((lnkcmp = dp->info.lnkcmp)) {
86 if (dp->version < 0x30) { 79 if (dp->version < 0x30) {
@@ -96,10 +89,22 @@ dp_set_link_config(struct dp_state *dp)
96 nvbios_exec(&init); 89 nvbios_exec(&init);
97 } 90 }
98 91
99 return dp->func->lnk_ctl(dp->disp, dp->outp, dp->head, 92 ret = dp->func->lnk_ctl(dp->disp, dp->outp, dp->head,
100 dp->link_nr, dp->link_bw / 27000, 93 dp->link_nr, dp->link_bw / 27000,
101 dp->dpcd[DPCD_RC02] & 94 dp->dpcd[DPCD_RC02] &
102 DPCD_RC02_ENHANCED_FRAME_CAP); 95 DPCD_RC02_ENHANCED_FRAME_CAP);
96 if (ret) {
97 ERR("lnk_ctl failed with %d\n", ret);
98 return ret;
99 }
100
101 /* set desired link configuration on the sink */
102 sink[0] = dp->link_bw / 27000;
103 sink[1] = dp->link_nr;
104 if (dp->dpcd[DPCD_RC02] & DPCD_RC02_ENHANCED_FRAME_CAP)
105 sink[1] |= DPCD_LC01_ENHANCED_FRAME_EN;
106
107 return nv_wraux(dp->aux, DPCD_LC00, sink, 2);
103} 108}
104 109
105static void 110static void
@@ -294,8 +299,17 @@ nouveau_dp_train(struct nouveau_disp *disp, const struct nouveau_dp_func *func,
294 299
295 ret = nv_rdaux(dp->aux, 0x00000, dp->dpcd, sizeof(dp->dpcd)); 300 ret = nv_rdaux(dp->aux, 0x00000, dp->dpcd, sizeof(dp->dpcd));
296 if (ret) { 301 if (ret) {
302 /* it's possible the display has been unplugged before we
303 * get here. we still need to execute the full set of
304 * vbios scripts, and program the OR at a high enough
305 * frequency to satisfy the target mode. failure to do
306 * so results at best in an UPDATE hanging, and at worst
307 * with PDISP running away to join the circus.
308 */
309 dp->dpcd[1] = link_bw[0] / 27000;
310 dp->dpcd[2] = 4;
311 dp->dpcd[3] = 0x00;
297 ERR("failed to read DPCD\n"); 312 ERR("failed to read DPCD\n");
298 return ret;
299 } 313 }
300 314
301 /* adjust required bandwidth for 8B/10B coding overhead */ 315 /* adjust required bandwidth for 8B/10B coding overhead */
@@ -308,7 +322,7 @@ nouveau_dp_train(struct nouveau_disp *disp, const struct nouveau_dp_func *func,
308 while (*link_bw > (dp->dpcd[1] * 27000)) 322 while (*link_bw > (dp->dpcd[1] * 27000))
309 link_bw++; 323 link_bw++;
310 324
311 while (link_bw[0]) { 325 while ((ret = -EIO) && link_bw[0]) {
312 /* find minimum required lane count at this link rate */ 326 /* find minimum required lane count at this link rate */
313 dp->link_nr = dp->dpcd[2] & DPCD_RC02_MAX_LANE_COUNT; 327 dp->link_nr = dp->dpcd[2] & DPCD_RC02_MAX_LANE_COUNT;
314 while ((dp->link_nr >> 1) * link_bw[0] > datarate) 328 while ((dp->link_nr >> 1) * link_bw[0] > datarate)
@@ -328,8 +342,10 @@ nouveau_dp_train(struct nouveau_disp *disp, const struct nouveau_dp_func *func,
328 !dp_link_train_eq(dp)) 342 !dp_link_train_eq(dp))
329 break; 343 break;
330 } else 344 } else
331 if (ret >= 1) { 345 if (ret) {
332 /* dp_set_link_config() handled training */ 346 /* dp_set_link_config() handled training, or
347 * we failed to communicate with the sink.
348 */
333 break; 349 break;
334 } 350 }
335 351
@@ -339,8 +355,10 @@ nouveau_dp_train(struct nouveau_disp *disp, const struct nouveau_dp_func *func,
339 355
340 /* finish link training */ 356 /* finish link training */
341 dp_set_training_pattern(dp, 0); 357 dp_set_training_pattern(dp, 0);
358 if (ret < 0)
359 ERR("link training failed\n");
342 360
343 /* execute post-train script from vbios */ 361 /* execute post-train script from vbios */
344 dp_link_train_fini(dp); 362 dp_link_train_fini(dp);
345 return true; 363 return (ret < 0) ? false : true;
346} 364}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv04.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv04.c
index 05e903f08a36..a0bc8a89b699 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv04.c
@@ -59,6 +59,7 @@ nv04_disp_intr(struct nouveau_subdev *subdev)
59 struct nv04_disp_priv *priv = (void *)subdev; 59 struct nv04_disp_priv *priv = (void *)subdev;
60 u32 crtc0 = nv_rd32(priv, 0x600100); 60 u32 crtc0 = nv_rd32(priv, 0x600100);
61 u32 crtc1 = nv_rd32(priv, 0x602100); 61 u32 crtc1 = nv_rd32(priv, 0x602100);
62 u32 pvideo;
62 63
63 if (crtc0 & 0x00000001) { 64 if (crtc0 & 0x00000001) {
64 nouveau_event_trigger(priv->base.vblank, 0); 65 nouveau_event_trigger(priv->base.vblank, 0);
@@ -69,6 +70,14 @@ nv04_disp_intr(struct nouveau_subdev *subdev)
69 nouveau_event_trigger(priv->base.vblank, 1); 70 nouveau_event_trigger(priv->base.vblank, 1);
70 nv_wr32(priv, 0x602100, 0x00000001); 71 nv_wr32(priv, 0x602100, 0x00000001);
71 } 72 }
73
74 if (nv_device(priv)->chipset >= 0x10 &&
75 nv_device(priv)->chipset <= 0x40) {
76 pvideo = nv_rd32(priv, 0x8100);
77 if (pvideo & ~0x11)
78 nv_info(priv, "PVIDEO intr: %08x\n", pvideo);
79 nv_wr32(priv, 0x8100, pvideo);
80 }
72} 81}
73 82
74static int 83static int
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
index 52dd7a1db729..378a015091d2 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
@@ -541,6 +541,15 @@ nvd0_disp_base_init(struct nouveau_object *object)
541 nv_wr32(priv, 0x6100a0, 0x00000000); 541 nv_wr32(priv, 0x6100a0, 0x00000000);
542 nv_wr32(priv, 0x6100b0, 0x00000307); 542 nv_wr32(priv, 0x6100b0, 0x00000307);
543 543
544 /* disable underflow reporting, preventing an intermittent issue
545 * on some nve4 boards where the production vbios left this
546 * setting enabled by default.
547 *
548 * ftp://download.nvidia.com/open-gpu-doc/gk104-disable-underflow-reporting/1/gk104-disable-underflow-reporting.txt
549 */
550 for (i = 0; i < priv->head.nr; i++)
551 nv_mask(priv, 0x616308 + (i * 0x800), 0x00000111, 0x00000010);
552
544 return 0; 553 return 0;
545} 554}
546 555
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c b/drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c
index 7ec4ee83fb64..eea3ef59693d 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c
@@ -97,8 +97,9 @@ nv94_sor_dp_drv_ctl(struct nouveau_disp *disp, struct dcb_output *outp,
97{ 97{
98 struct nouveau_bios *bios = nouveau_bios(disp); 98 struct nouveau_bios *bios = nouveau_bios(disp);
99 struct nv50_disp_priv *priv = (void *)disp; 99 struct nv50_disp_priv *priv = (void *)disp;
100 const u32 shift = nv94_sor_dp_lane_map(priv, lane);
100 const u32 loff = nv94_sor_loff(outp); 101 const u32 loff = nv94_sor_loff(outp);
101 u32 addr, shift = nv94_sor_dp_lane_map(priv, lane); 102 u32 addr, data[3];
102 u8 ver, hdr, cnt, len; 103 u8 ver, hdr, cnt, len;
103 struct nvbios_dpout info; 104 struct nvbios_dpout info;
104 struct nvbios_dpcfg ocfg; 105 struct nvbios_dpcfg ocfg;
@@ -113,9 +114,12 @@ nv94_sor_dp_drv_ctl(struct nouveau_disp *disp, struct dcb_output *outp,
113 if (!addr) 114 if (!addr)
114 return -EINVAL; 115 return -EINVAL;
115 116
116 nv_mask(priv, 0x61c118 + loff, 0x000000ff << shift, ocfg.drv << shift); 117 data[0] = nv_rd32(priv, 0x61c118 + loff) & ~(0x000000ff << shift);
117 nv_mask(priv, 0x61c120 + loff, 0x000000ff << shift, ocfg.pre << shift); 118 data[1] = nv_rd32(priv, 0x61c120 + loff) & ~(0x000000ff << shift);
118 nv_mask(priv, 0x61c130 + loff, 0x0000ff00, ocfg.unk << 8); 119 data[2] = nv_rd32(priv, 0x61c130 + loff) & ~(0x0000ff00);
120 nv_wr32(priv, 0x61c118 + loff, data[0] | (ocfg.drv << shift));
121 nv_wr32(priv, 0x61c120 + loff, data[1] | (ocfg.pre << shift));
122 nv_wr32(priv, 0x61c130 + loff, data[2] | (ocfg.unk << 8));
119 return 0; 123 return 0;
120} 124}
121 125
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c
index 9e1d435d7282..d2df572f16a3 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c
@@ -93,8 +93,9 @@ nvd0_sor_dp_drv_ctl(struct nouveau_disp *disp, struct dcb_output *outp,
93{ 93{
94 struct nouveau_bios *bios = nouveau_bios(disp); 94 struct nouveau_bios *bios = nouveau_bios(disp);
95 struct nv50_disp_priv *priv = (void *)disp; 95 struct nv50_disp_priv *priv = (void *)disp;
96 const u32 shift = nvd0_sor_dp_lane_map(priv, lane);
96 const u32 loff = nvd0_sor_loff(outp); 97 const u32 loff = nvd0_sor_loff(outp);
97 u32 addr, shift = nvd0_sor_dp_lane_map(priv, lane); 98 u32 addr, data[3];
98 u8 ver, hdr, cnt, len; 99 u8 ver, hdr, cnt, len;
99 struct nvbios_dpout info; 100 struct nvbios_dpout info;
100 struct nvbios_dpcfg ocfg; 101 struct nvbios_dpcfg ocfg;
@@ -109,9 +110,12 @@ nvd0_sor_dp_drv_ctl(struct nouveau_disp *disp, struct dcb_output *outp,
109 if (!addr) 110 if (!addr)
110 return -EINVAL; 111 return -EINVAL;
111 112
112 nv_mask(priv, 0x61c118 + loff, 0x000000ff << shift, ocfg.drv << shift); 113 data[0] = nv_rd32(priv, 0x61c118 + loff) & ~(0x000000ff << shift);
113 nv_mask(priv, 0x61c120 + loff, 0x000000ff << shift, ocfg.pre << shift); 114 data[1] = nv_rd32(priv, 0x61c120 + loff) & ~(0x000000ff << shift);
114 nv_mask(priv, 0x61c130 + loff, 0x0000ff00, ocfg.unk << 8); 115 data[2] = nv_rd32(priv, 0x61c130 + loff) & ~(0x0000ff00);
116 nv_wr32(priv, 0x61c118 + loff, data[0] | (ocfg.drv << shift));
117 nv_wr32(priv, 0x61c120 + loff, data[1] | (ocfg.pre << shift));
118 nv_wr32(priv, 0x61c130 + loff, data[2] | (ocfg.unk << 8));
115 nv_mask(priv, 0x61c13c + loff, 0x00000000, 0x00000000); 119 nv_mask(priv, 0x61c13c + loff, 0x00000000, 0x00000000);
116 return 0; 120 return 0;
117} 121}
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c
index f877bd524a92..54f26cc801c7 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c
@@ -632,8 +632,8 @@ nv04_fifo_init(struct nouveau_object *object)
632 return 0; 632 return 0;
633} 633}
634 634
635struct nouveau_oclass 635struct nouveau_oclass *
636nv04_fifo_oclass = { 636nv04_fifo_oclass = &(struct nouveau_oclass) {
637 .handle = NV_ENGINE(FIFO, 0x04), 637 .handle = NV_ENGINE(FIFO, 0x04),
638 .ofuncs = &(struct nouveau_ofuncs) { 638 .ofuncs = &(struct nouveau_ofuncs) {
639 .ctor = nv04_fifo_ctor, 639 .ctor = nv04_fifo_ctor,
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c
index 2c927c1d173b..571a22aa1ae5 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c
@@ -159,8 +159,8 @@ nv10_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
159 return 0; 159 return 0;
160} 160}
161 161
162struct nouveau_oclass 162struct nouveau_oclass *
163nv10_fifo_oclass = { 163nv10_fifo_oclass = &(struct nouveau_oclass) {
164 .handle = NV_ENGINE(FIFO, 0x10), 164 .handle = NV_ENGINE(FIFO, 0x10),
165 .ofuncs = &(struct nouveau_ofuncs) { 165 .ofuncs = &(struct nouveau_ofuncs) {
166 .ctor = nv10_fifo_ctor, 166 .ctor = nv10_fifo_ctor,
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c
index a9cb51d38c57..f25760209316 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c
@@ -196,8 +196,8 @@ nv17_fifo_init(struct nouveau_object *object)
196 return 0; 196 return 0;
197} 197}
198 198
199struct nouveau_oclass 199struct nouveau_oclass *
200nv17_fifo_oclass = { 200nv17_fifo_oclass = &(struct nouveau_oclass) {
201 .handle = NV_ENGINE(FIFO, 0x17), 201 .handle = NV_ENGINE(FIFO, 0x17),
202 .ofuncs = &(struct nouveau_ofuncs) { 202 .ofuncs = &(struct nouveau_ofuncs) {
203 .ctor = nv17_fifo_ctor, 203 .ctor = nv17_fifo_ctor,
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c
index 5c7433d5069f..343487ed2238 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c
@@ -337,8 +337,8 @@ nv40_fifo_init(struct nouveau_object *object)
337 return 0; 337 return 0;
338} 338}
339 339
340struct nouveau_oclass 340struct nouveau_oclass *
341nv40_fifo_oclass = { 341nv40_fifo_oclass = &(struct nouveau_oclass) {
342 .handle = NV_ENGINE(FIFO, 0x40), 342 .handle = NV_ENGINE(FIFO, 0x40),
343 .ofuncs = &(struct nouveau_ofuncs) { 343 .ofuncs = &(struct nouveau_ofuncs) {
344 .ctor = nv40_fifo_ctor, 344 .ctor = nv40_fifo_ctor,
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
index 7e5dff51d3c5..5f555788121c 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
@@ -502,8 +502,8 @@ nv50_fifo_init(struct nouveau_object *object)
502 return 0; 502 return 0;
503} 503}
504 504
505struct nouveau_oclass 505struct nouveau_oclass *
506nv50_fifo_oclass = { 506nv50_fifo_oclass = &(struct nouveau_oclass) {
507 .handle = NV_ENGINE(FIFO, 0x50), 507 .handle = NV_ENGINE(FIFO, 0x50),
508 .ofuncs = &(struct nouveau_ofuncs) { 508 .ofuncs = &(struct nouveau_ofuncs) {
509 .ctor = nv50_fifo_ctor, 509 .ctor = nv50_fifo_ctor,
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
index 91a87cd7195a..0908dc834c84 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
@@ -144,7 +144,7 @@ nv84_fifo_object_attach(struct nouveau_object *parent,
144 case NVDEV_ENGINE_COPY0 : context |= 0x00300000; break; 144 case NVDEV_ENGINE_COPY0 : context |= 0x00300000; break;
145 case NVDEV_ENGINE_VP : context |= 0x00400000; break; 145 case NVDEV_ENGINE_VP : context |= 0x00400000; break;
146 case NVDEV_ENGINE_CRYPT : 146 case NVDEV_ENGINE_CRYPT :
147 case NVDEV_ENGINE_UNK1C1: context |= 0x00500000; break; 147 case NVDEV_ENGINE_VIC : context |= 0x00500000; break;
148 case NVDEV_ENGINE_BSP : context |= 0x00600000; break; 148 case NVDEV_ENGINE_BSP : context |= 0x00600000; break;
149 default: 149 default:
150 return -EINVAL; 150 return -EINVAL;
@@ -180,7 +180,7 @@ nv84_fifo_chan_ctor_dma(struct nouveau_object *parent,
180 (1ULL << NVDEV_ENGINE_BSP) | 180 (1ULL << NVDEV_ENGINE_BSP) |
181 (1ULL << NVDEV_ENGINE_PPP) | 181 (1ULL << NVDEV_ENGINE_PPP) |
182 (1ULL << NVDEV_ENGINE_COPY0) | 182 (1ULL << NVDEV_ENGINE_COPY0) |
183 (1ULL << NVDEV_ENGINE_UNK1C1), &chan); 183 (1ULL << NVDEV_ENGINE_VIC), &chan);
184 *pobject = nv_object(chan); 184 *pobject = nv_object(chan);
185 if (ret) 185 if (ret)
186 return ret; 186 return ret;
@@ -243,7 +243,7 @@ nv84_fifo_chan_ctor_ind(struct nouveau_object *parent,
243 (1ULL << NVDEV_ENGINE_BSP) | 243 (1ULL << NVDEV_ENGINE_BSP) |
244 (1ULL << NVDEV_ENGINE_PPP) | 244 (1ULL << NVDEV_ENGINE_PPP) |
245 (1ULL << NVDEV_ENGINE_COPY0) | 245 (1ULL << NVDEV_ENGINE_COPY0) |
246 (1ULL << NVDEV_ENGINE_UNK1C1), &chan); 246 (1ULL << NVDEV_ENGINE_VIC), &chan);
247 *pobject = nv_object(chan); 247 *pobject = nv_object(chan);
248 if (ret) 248 if (ret)
249 return ret; 249 return ret;
@@ -435,8 +435,8 @@ nv84_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
435 return 0; 435 return 0;
436} 436}
437 437
438struct nouveau_oclass 438struct nouveau_oclass *
439nv84_fifo_oclass = { 439nv84_fifo_oclass = &(struct nouveau_oclass) {
440 .handle = NV_ENGINE(FIFO, 0x84), 440 .handle = NV_ENGINE(FIFO, 0x84),
441 .ofuncs = &(struct nouveau_ofuncs) { 441 .ofuncs = &(struct nouveau_ofuncs) {
442 .ctor = nv84_fifo_ctor, 442 .ctor = nv84_fifo_ctor,
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
index ce92f289e751..e21453a94971 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
@@ -720,8 +720,8 @@ nvc0_fifo_init(struct nouveau_object *object)
720 return 0; 720 return 0;
721} 721}
722 722
723struct nouveau_oclass 723struct nouveau_oclass *
724nvc0_fifo_oclass = { 724nvc0_fifo_oclass = &(struct nouveau_oclass) {
725 .handle = NV_ENGINE(FIFO, 0xc0), 725 .handle = NV_ENGINE(FIFO, 0xc0),
726 .ofuncs = &(struct nouveau_ofuncs) { 726 .ofuncs = &(struct nouveau_ofuncs) {
727 .ctor = nvc0_fifo_ctor, 727 .ctor = nvc0_fifo_ctor,
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
index 8e8121abe31b..fcd449e5aba7 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
@@ -675,8 +675,8 @@ nve0_fifo_init(struct nouveau_object *object)
675 return 0; 675 return 0;
676} 676}
677 677
678struct nouveau_oclass 678struct nouveau_oclass *
679nve0_fifo_oclass = { 679nve0_fifo_oclass = &(struct nouveau_oclass) {
680 .handle = NV_ENGINE(FIFO, 0xe0), 680 .handle = NV_ENGINE(FIFO, 0xe0),
681 .ofuncs = &(struct nouveau_ofuncs) { 681 .ofuncs = &(struct nouveau_ofuncs) {
682 .ctor = nve0_fifo_ctor, 682 .ctor = nve0_fifo_ctor,
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc1.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc1.c
index e5be3ee7f172..71b4283f7fad 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc1.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc1.c
@@ -587,6 +587,7 @@ nvc1_grctx_init_unk58xx[] = {
587 { 0x405870, 4, 0x04, 0x00000001 }, 587 { 0x405870, 4, 0x04, 0x00000001 },
588 { 0x405a00, 2, 0x04, 0x00000000 }, 588 { 0x405a00, 2, 0x04, 0x00000000 },
589 { 0x405a18, 1, 0x04, 0x00000000 }, 589 { 0x405a18, 1, 0x04, 0x00000000 },
590 {}
590}; 591};
591 592
592static struct nvc0_graph_init 593static struct nvc0_graph_init
@@ -598,6 +599,7 @@ nvc1_grctx_init_rop[] = {
598 { 0x408904, 1, 0x04, 0x62000001 }, 599 { 0x408904, 1, 0x04, 0x62000001 },
599 { 0x408908, 1, 0x04, 0x00c80929 }, 600 { 0x408908, 1, 0x04, 0x00c80929 },
600 { 0x408980, 1, 0x04, 0x0000011d }, 601 { 0x408980, 1, 0x04, 0x0000011d },
602 {}
601}; 603};
602 604
603static struct nvc0_graph_init 605static struct nvc0_graph_init
@@ -671,6 +673,7 @@ nvc1_grctx_init_gpc_0[] = {
671 { 0x419000, 1, 0x04, 0x00000780 }, 673 { 0x419000, 1, 0x04, 0x00000780 },
672 { 0x419004, 2, 0x04, 0x00000000 }, 674 { 0x419004, 2, 0x04, 0x00000000 },
673 { 0x419014, 1, 0x04, 0x00000004 }, 675 { 0x419014, 1, 0x04, 0x00000004 },
676 {}
674}; 677};
675 678
676static struct nvc0_graph_init 679static struct nvc0_graph_init
@@ -717,6 +720,7 @@ nvc1_grctx_init_tpc[] = {
717 { 0x419e98, 1, 0x04, 0x00000000 }, 720 { 0x419e98, 1, 0x04, 0x00000000 },
718 { 0x419ee0, 1, 0x04, 0x00011110 }, 721 { 0x419ee0, 1, 0x04, 0x00011110 },
719 { 0x419f30, 11, 0x04, 0x00000000 }, 722 { 0x419f30, 11, 0x04, 0x00000000 },
723 {}
720}; 724};
721 725
722void 726void
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd7.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd7.c
index 438e78410808..c4740d528532 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd7.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd7.c
@@ -258,6 +258,7 @@ nvd7_grctx_init_hub[] = {
258 nvc0_grctx_init_unk78xx, 258 nvc0_grctx_init_unk78xx,
259 nvc0_grctx_init_unk80xx, 259 nvc0_grctx_init_unk80xx,
260 nvd9_grctx_init_rop, 260 nvd9_grctx_init_rop,
261 NULL
261}; 262};
262 263
263struct nvc0_graph_init * 264struct nvc0_graph_init *
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd9.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd9.c
index 818a4751df46..a1102cbf2fdc 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd9.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd9.c
@@ -466,6 +466,7 @@ nvd9_grctx_init_hub[] = {
466 nvc0_grctx_init_unk78xx, 466 nvc0_grctx_init_unk78xx,
467 nvc0_grctx_init_unk80xx, 467 nvc0_grctx_init_unk80xx,
468 nvd9_grctx_init_rop, 468 nvd9_grctx_init_rop,
469 NULL
469}; 470};
470 471
471struct nvc0_graph_init * 472struct nvc0_graph_init *
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv10.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv10.c
index 23c143aaa556..4532f7e5618c 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv10.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv10.c
@@ -945,7 +945,8 @@ nv10_graph_load_context(struct nv10_graph_chan *chan, int chid)
945 for (i = 0; i < ARRAY_SIZE(nv10_graph_ctx_regs); i++) 945 for (i = 0; i < ARRAY_SIZE(nv10_graph_ctx_regs); i++)
946 nv_wr32(priv, nv10_graph_ctx_regs[i], chan->nv10[i]); 946 nv_wr32(priv, nv10_graph_ctx_regs[i], chan->nv10[i]);
947 947
948 if (nv_device(priv)->chipset >= 0x17) { 948 if (nv_device(priv)->card_type >= NV_11 &&
949 nv_device(priv)->chipset >= 0x17) {
949 for (i = 0; i < ARRAY_SIZE(nv17_graph_ctx_regs); i++) 950 for (i = 0; i < ARRAY_SIZE(nv17_graph_ctx_regs); i++)
950 nv_wr32(priv, nv17_graph_ctx_regs[i], chan->nv17[i]); 951 nv_wr32(priv, nv17_graph_ctx_regs[i], chan->nv17[i]);
951 } 952 }
@@ -970,7 +971,8 @@ nv10_graph_unload_context(struct nv10_graph_chan *chan)
970 for (i = 0; i < ARRAY_SIZE(nv10_graph_ctx_regs); i++) 971 for (i = 0; i < ARRAY_SIZE(nv10_graph_ctx_regs); i++)
971 chan->nv10[i] = nv_rd32(priv, nv10_graph_ctx_regs[i]); 972 chan->nv10[i] = nv_rd32(priv, nv10_graph_ctx_regs[i]);
972 973
973 if (nv_device(priv)->chipset >= 0x17) { 974 if (nv_device(priv)->card_type >= NV_11 &&
975 nv_device(priv)->chipset >= 0x17) {
974 for (i = 0; i < ARRAY_SIZE(nv17_graph_ctx_regs); i++) 976 for (i = 0; i < ARRAY_SIZE(nv17_graph_ctx_regs); i++)
975 chan->nv17[i] = nv_rd32(priv, nv17_graph_ctx_regs[i]); 977 chan->nv17[i] = nv_rd32(priv, nv17_graph_ctx_regs[i]);
976 } 978 }
@@ -1052,7 +1054,8 @@ nv10_graph_context_ctor(struct nouveau_object *parent,
1052 NV_WRITE_CTX(0x00400e14, 0x00001000); 1054 NV_WRITE_CTX(0x00400e14, 0x00001000);
1053 NV_WRITE_CTX(0x00400e30, 0x00080008); 1055 NV_WRITE_CTX(0x00400e30, 0x00080008);
1054 NV_WRITE_CTX(0x00400e34, 0x00080008); 1056 NV_WRITE_CTX(0x00400e34, 0x00080008);
1055 if (nv_device(priv)->chipset >= 0x17) { 1057 if (nv_device(priv)->card_type >= NV_11 &&
1058 nv_device(priv)->chipset >= 0x17) {
1056 /* is it really needed ??? */ 1059 /* is it really needed ??? */
1057 NV17_WRITE_CTX(NV10_PGRAPH_DEBUG_4, 1060 NV17_WRITE_CTX(NV10_PGRAPH_DEBUG_4,
1058 nv_rd32(priv, NV10_PGRAPH_DEBUG_4)); 1061 nv_rd32(priv, NV10_PGRAPH_DEBUG_4));
@@ -1231,7 +1234,7 @@ nv10_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
1231 nv_engine(priv)->sclass = nv10_graph_sclass; 1234 nv_engine(priv)->sclass = nv10_graph_sclass;
1232 else 1235 else
1233 if (nv_device(priv)->chipset < 0x17 || 1236 if (nv_device(priv)->chipset < 0x17 ||
1234 nv_device(priv)->chipset == 0x1a) 1237 nv_device(priv)->card_type < NV_11)
1235 nv_engine(priv)->sclass = nv15_graph_sclass; 1238 nv_engine(priv)->sclass = nv15_graph_sclass;
1236 else 1239 else
1237 nv_engine(priv)->sclass = nv17_graph_sclass; 1240 nv_engine(priv)->sclass = nv17_graph_sclass;
@@ -1270,7 +1273,8 @@ nv10_graph_init(struct nouveau_object *object)
1270 nv_wr32(priv, NV04_PGRAPH_DEBUG_2, 0x25f92ad9); 1273 nv_wr32(priv, NV04_PGRAPH_DEBUG_2, 0x25f92ad9);
1271 nv_wr32(priv, NV04_PGRAPH_DEBUG_3, 0x55DE0830 | (1 << 29) | (1 << 31)); 1274 nv_wr32(priv, NV04_PGRAPH_DEBUG_3, 0x55DE0830 | (1 << 29) | (1 << 31));
1272 1275
1273 if (nv_device(priv)->chipset >= 0x17) { 1276 if (nv_device(priv)->card_type >= NV_11 &&
1277 nv_device(priv)->chipset >= 0x17) {
1274 nv_wr32(priv, NV10_PGRAPH_DEBUG_4, 0x1f000000); 1278 nv_wr32(priv, NV10_PGRAPH_DEBUG_4, 0x1f000000);
1275 nv_wr32(priv, 0x400a10, 0x03ff3fb6); 1279 nv_wr32(priv, 0x400a10, 0x03ff3fb6);
1276 nv_wr32(priv, 0x400838, 0x002f8684); 1280 nv_wr32(priv, 0x400838, 0x002f8684);
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
index 3f4f35cc3848..434bb4b0fa2e 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
@@ -1138,7 +1138,7 @@ nvc0_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
1138 if (ret) 1138 if (ret)
1139 return ret; 1139 return ret;
1140 1140
1141 nv_subdev(priv)->unit = 0x18001000; 1141 nv_subdev(priv)->unit = 0x08001000;
1142 nv_subdev(priv)->intr = nvc0_graph_intr; 1142 nv_subdev(priv)->intr = nvc0_graph_intr;
1143 1143
1144 priv->base.units = nvc0_graph_units; 1144 priv->base.units = nvc0_graph_units;
diff --git a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c
index c19004301309..7eb6d94c84e2 100644
--- a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c
+++ b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c
@@ -34,16 +34,7 @@
34 34
35#include <engine/fifo.h> 35#include <engine/fifo.h>
36#include <engine/mpeg.h> 36#include <engine/mpeg.h>
37#include <engine/graph/nv40.h> 37#include <engine/mpeg/nv31.h>
38
39struct nv31_mpeg_priv {
40 struct nouveau_mpeg base;
41 atomic_t refcount;
42};
43
44struct nv31_mpeg_chan {
45 struct nouveau_object base;
46};
47 38
48/******************************************************************************* 39/*******************************************************************************
49 * MPEG object classes 40 * MPEG object classes
@@ -89,18 +80,18 @@ nv31_mpeg_mthd_dma(struct nouveau_object *object, u32 mthd, void *arg, u32 len)
89 80
90 if (mthd == 0x0190) { 81 if (mthd == 0x0190) {
91 /* DMA_CMD */ 82 /* DMA_CMD */
92 nv_mask(priv, 0x00b300, 0x00030000, (dma0 & 0x00030000)); 83 nv_mask(priv, 0x00b300, 0x00010000, (dma0 & 0x00030000) ? 0x00010000 : 0);
93 nv_wr32(priv, 0x00b334, base); 84 nv_wr32(priv, 0x00b334, base);
94 nv_wr32(priv, 0x00b324, size); 85 nv_wr32(priv, 0x00b324, size);
95 } else 86 } else
96 if (mthd == 0x01a0) { 87 if (mthd == 0x01a0) {
97 /* DMA_DATA */ 88 /* DMA_DATA */
98 nv_mask(priv, 0x00b300, 0x000c0000, (dma0 & 0x00030000) << 2); 89 nv_mask(priv, 0x00b300, 0x00020000, (dma0 & 0x00030000) ? 0x00020000 : 0);
99 nv_wr32(priv, 0x00b360, base); 90 nv_wr32(priv, 0x00b360, base);
100 nv_wr32(priv, 0x00b364, size); 91 nv_wr32(priv, 0x00b364, size);
101 } else { 92 } else {
102 /* DMA_IMAGE, VRAM only */ 93 /* DMA_IMAGE, VRAM only */
103 if (dma0 & 0x000c0000) 94 if (dma0 & 0x00030000)
104 return -EINVAL; 95 return -EINVAL;
105 96
106 nv_wr32(priv, 0x00b370, base); 97 nv_wr32(priv, 0x00b370, base);
@@ -110,7 +101,7 @@ nv31_mpeg_mthd_dma(struct nouveau_object *object, u32 mthd, void *arg, u32 len)
110 return 0; 101 return 0;
111} 102}
112 103
113static struct nouveau_ofuncs 104struct nouveau_ofuncs
114nv31_mpeg_ofuncs = { 105nv31_mpeg_ofuncs = {
115 .ctor = nv31_mpeg_object_ctor, 106 .ctor = nv31_mpeg_object_ctor,
116 .dtor = _nouveau_gpuobj_dtor, 107 .dtor = _nouveau_gpuobj_dtor,
@@ -146,16 +137,23 @@ nv31_mpeg_context_ctor(struct nouveau_object *parent,
146{ 137{
147 struct nv31_mpeg_priv *priv = (void *)engine; 138 struct nv31_mpeg_priv *priv = (void *)engine;
148 struct nv31_mpeg_chan *chan; 139 struct nv31_mpeg_chan *chan;
140 unsigned long flags;
149 int ret; 141 int ret;
150 142
151 if (!atomic_add_unless(&priv->refcount, 1, 1))
152 return -EBUSY;
153
154 ret = nouveau_object_create(parent, engine, oclass, 0, &chan); 143 ret = nouveau_object_create(parent, engine, oclass, 0, &chan);
155 *pobject = nv_object(chan); 144 *pobject = nv_object(chan);
156 if (ret) 145 if (ret)
157 return ret; 146 return ret;
158 147
148 spin_lock_irqsave(&nv_engine(priv)->lock, flags);
149 if (priv->chan) {
150 spin_unlock_irqrestore(&nv_engine(priv)->lock, flags);
151 nouveau_object_destroy(&chan->base);
152 *pobject = NULL;
153 return -EBUSY;
154 }
155 priv->chan = chan;
156 spin_unlock_irqrestore(&nv_engine(priv)->lock, flags);
159 return 0; 157 return 0;
160} 158}
161 159
@@ -164,11 +162,15 @@ nv31_mpeg_context_dtor(struct nouveau_object *object)
164{ 162{
165 struct nv31_mpeg_priv *priv = (void *)object->engine; 163 struct nv31_mpeg_priv *priv = (void *)object->engine;
166 struct nv31_mpeg_chan *chan = (void *)object; 164 struct nv31_mpeg_chan *chan = (void *)object;
167 atomic_dec(&priv->refcount); 165 unsigned long flags;
166
167 spin_lock_irqsave(&nv_engine(priv)->lock, flags);
168 priv->chan = NULL;
169 spin_unlock_irqrestore(&nv_engine(priv)->lock, flags);
168 nouveau_object_destroy(&chan->base); 170 nouveau_object_destroy(&chan->base);
169} 171}
170 172
171static struct nouveau_oclass 173struct nouveau_oclass
172nv31_mpeg_cclass = { 174nv31_mpeg_cclass = {
173 .handle = NV_ENGCTX(MPEG, 0x31), 175 .handle = NV_ENGCTX(MPEG, 0x31),
174 .ofuncs = &(struct nouveau_ofuncs) { 176 .ofuncs = &(struct nouveau_ofuncs) {
@@ -197,21 +199,19 @@ nv31_mpeg_tile_prog(struct nouveau_engine *engine, int i)
197void 199void
198nv31_mpeg_intr(struct nouveau_subdev *subdev) 200nv31_mpeg_intr(struct nouveau_subdev *subdev)
199{ 201{
202 struct nv31_mpeg_priv *priv = (void *)subdev;
200 struct nouveau_fifo *pfifo = nouveau_fifo(subdev); 203 struct nouveau_fifo *pfifo = nouveau_fifo(subdev);
201 struct nouveau_engine *engine = nv_engine(subdev);
202 struct nouveau_object *engctx;
203 struct nouveau_handle *handle; 204 struct nouveau_handle *handle;
204 struct nv31_mpeg_priv *priv = (void *)subdev; 205 struct nouveau_object *engctx;
205 u32 inst = nv_rd32(priv, 0x00b318) & 0x000fffff;
206 u32 stat = nv_rd32(priv, 0x00b100); 206 u32 stat = nv_rd32(priv, 0x00b100);
207 u32 type = nv_rd32(priv, 0x00b230); 207 u32 type = nv_rd32(priv, 0x00b230);
208 u32 mthd = nv_rd32(priv, 0x00b234); 208 u32 mthd = nv_rd32(priv, 0x00b234);
209 u32 data = nv_rd32(priv, 0x00b238); 209 u32 data = nv_rd32(priv, 0x00b238);
210 u32 show = stat; 210 u32 show = stat;
211 int chid; 211 unsigned long flags;
212 212
213 engctx = nouveau_engctx_get(engine, inst); 213 spin_lock_irqsave(&nv_engine(priv)->lock, flags);
214 chid = pfifo->chid(pfifo, engctx); 214 engctx = nv_object(priv->chan);
215 215
216 if (stat & 0x01000000) { 216 if (stat & 0x01000000) {
217 /* happens on initial binding of the object */ 217 /* happens on initial binding of the object */
@@ -220,7 +220,7 @@ nv31_mpeg_intr(struct nouveau_subdev *subdev)
220 show &= ~0x01000000; 220 show &= ~0x01000000;
221 } 221 }
222 222
223 if (type == 0x00000010) { 223 if (type == 0x00000010 && engctx) {
224 handle = nouveau_handle_get_class(engctx, 0x3174); 224 handle = nouveau_handle_get_class(engctx, 0x3174);
225 if (handle && !nv_call(handle->object, mthd, data)) 225 if (handle && !nv_call(handle->object, mthd, data))
226 show &= ~0x01000000; 226 show &= ~0x01000000;
@@ -232,13 +232,12 @@ nv31_mpeg_intr(struct nouveau_subdev *subdev)
232 nv_wr32(priv, 0x00b230, 0x00000001); 232 nv_wr32(priv, 0x00b230, 0x00000001);
233 233
234 if (show) { 234 if (show) {
235 nv_error(priv, 235 nv_error(priv, "ch %d [%s] 0x%08x 0x%08x 0x%08x 0x%08x\n",
236 "ch %d [0x%08x %s] 0x%08x 0x%08x 0x%08x 0x%08x\n", 236 pfifo->chid(pfifo, engctx),
237 chid, inst << 4, nouveau_client_name(engctx), stat, 237 nouveau_client_name(engctx), stat, type, mthd, data);
238 type, mthd, data);
239 } 238 }
240 239
241 nouveau_engctx_put(engctx); 240 spin_unlock_irqrestore(&nv_engine(priv)->lock, flags);
242} 241}
243 242
244static int 243static int
@@ -284,10 +283,7 @@ nv31_mpeg_init(struct nouveau_object *object)
284 /* PMPEG init */ 283 /* PMPEG init */
285 nv_wr32(priv, 0x00b32c, 0x00000000); 284 nv_wr32(priv, 0x00b32c, 0x00000000);
286 nv_wr32(priv, 0x00b314, 0x00000100); 285 nv_wr32(priv, 0x00b314, 0x00000100);
287 if (nv_device(priv)->chipset >= 0x40 && nv44_graph_class(priv)) 286 nv_wr32(priv, 0x00b220, 0x00000031);
288 nv_wr32(priv, 0x00b220, 0x00000044);
289 else
290 nv_wr32(priv, 0x00b220, 0x00000031);
291 nv_wr32(priv, 0x00b300, 0x02001ec1); 287 nv_wr32(priv, 0x00b300, 0x02001ec1);
292 nv_mask(priv, 0x00b32c, 0x00000001, 0x00000001); 288 nv_mask(priv, 0x00b32c, 0x00000001, 0x00000001);
293 289
diff --git a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.h b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.h
new file mode 100644
index 000000000000..d08629d0b6ad
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.h
@@ -0,0 +1,15 @@
1#ifndef __NV31_MPEG_H__
2#define __NV31_MPEG_H__
3
4#include <engine/mpeg.h>
5
6struct nv31_mpeg_chan {
7 struct nouveau_object base;
8};
9
10struct nv31_mpeg_priv {
11 struct nouveau_mpeg base;
12 struct nv31_mpeg_chan *chan;
13};
14
15#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c
index dd6196072e9c..d4e7ec0ba68c 100644
--- a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c
@@ -31,66 +31,63 @@
31#include <subdev/instmem.h> 31#include <subdev/instmem.h>
32 32
33#include <engine/mpeg.h> 33#include <engine/mpeg.h>
34#include <engine/graph/nv40.h> 34#include <engine/mpeg/nv31.h>
35
36struct nv40_mpeg_priv {
37 struct nouveau_mpeg base;
38};
39
40struct nv40_mpeg_chan {
41 struct nouveau_mpeg_chan base;
42};
43 35
44/******************************************************************************* 36/*******************************************************************************
45 * PMPEG context 37 * MPEG object classes
46 ******************************************************************************/ 38 ******************************************************************************/
47 39
48static int 40static int
49nv40_mpeg_context_ctor(struct nouveau_object *parent, 41nv40_mpeg_mthd_dma(struct nouveau_object *object, u32 mthd, void *arg, u32 len)
50 struct nouveau_object *engine,
51 struct nouveau_oclass *oclass, void *data, u32 size,
52 struct nouveau_object **pobject)
53{ 42{
54 struct nv40_mpeg_chan *chan; 43 struct nouveau_instmem *imem = nouveau_instmem(object);
55 int ret; 44 struct nv31_mpeg_priv *priv = (void *)object->engine;
56 45 u32 inst = *(u32 *)arg << 4;
57 ret = nouveau_mpeg_context_create(parent, engine, oclass, NULL, 46 u32 dma0 = nv_ro32(imem, inst + 0);
58 264 * 4, 16, 47 u32 dma1 = nv_ro32(imem, inst + 4);
59 NVOBJ_FLAG_ZERO_ALLOC, &chan); 48 u32 dma2 = nv_ro32(imem, inst + 8);
60 *pobject = nv_object(chan); 49 u32 base = (dma2 & 0xfffff000) | (dma0 >> 20);
61 if (ret) 50 u32 size = dma1 + 1;
62 return ret; 51
52 /* only allow linear DMA objects */
53 if (!(dma0 & 0x00002000))
54 return -EINVAL;
55
56 if (mthd == 0x0190) {
57 /* DMA_CMD */
58 nv_mask(priv, 0x00b300, 0x00030000, (dma0 & 0x00030000));
59 nv_wr32(priv, 0x00b334, base);
60 nv_wr32(priv, 0x00b324, size);
61 } else
62 if (mthd == 0x01a0) {
63 /* DMA_DATA */
64 nv_mask(priv, 0x00b300, 0x000c0000, (dma0 & 0x00030000) << 2);
65 nv_wr32(priv, 0x00b360, base);
66 nv_wr32(priv, 0x00b364, size);
67 } else {
68 /* DMA_IMAGE, VRAM only */
69 if (dma0 & 0x00030000)
70 return -EINVAL;
71
72 nv_wr32(priv, 0x00b370, base);
73 nv_wr32(priv, 0x00b374, size);
74 }
63 75
64 nv_wo32(&chan->base.base, 0x78, 0x02001ec1);
65 return 0; 76 return 0;
66} 77}
67 78
68static int 79static struct nouveau_omthds
69nv40_mpeg_context_fini(struct nouveau_object *object, bool suspend) 80nv40_mpeg_omthds[] = {
70{ 81 { 0x0190, 0x0190, nv40_mpeg_mthd_dma },
71 82 { 0x01a0, 0x01a0, nv40_mpeg_mthd_dma },
72 struct nv40_mpeg_priv *priv = (void *)object->engine; 83 { 0x01b0, 0x01b0, nv40_mpeg_mthd_dma },
73 struct nv40_mpeg_chan *chan = (void *)object; 84 {}
74 u32 inst = 0x80000000 | nv_gpuobj(chan)->addr >> 4; 85};
75
76 nv_mask(priv, 0x00b32c, 0x00000001, 0x00000000);
77 if (nv_rd32(priv, 0x00b318) == inst)
78 nv_mask(priv, 0x00b318, 0x80000000, 0x00000000);
79 nv_mask(priv, 0x00b32c, 0x00000001, 0x00000001);
80 return 0;
81}
82 86
83static struct nouveau_oclass 87struct nouveau_oclass
84nv40_mpeg_cclass = { 88nv40_mpeg_sclass[] = {
85 .handle = NV_ENGCTX(MPEG, 0x40), 89 { 0x3174, &nv31_mpeg_ofuncs, nv40_mpeg_omthds },
86 .ofuncs = &(struct nouveau_ofuncs) { 90 {}
87 .ctor = nv40_mpeg_context_ctor,
88 .dtor = _nouveau_mpeg_context_dtor,
89 .init = _nouveau_mpeg_context_init,
90 .fini = nv40_mpeg_context_fini,
91 .rd32 = _nouveau_mpeg_context_rd32,
92 .wr32 = _nouveau_mpeg_context_wr32,
93 },
94}; 91};
95 92
96/******************************************************************************* 93/*******************************************************************************
@@ -100,7 +97,7 @@ nv40_mpeg_cclass = {
100static void 97static void
101nv40_mpeg_intr(struct nouveau_subdev *subdev) 98nv40_mpeg_intr(struct nouveau_subdev *subdev)
102{ 99{
103 struct nv40_mpeg_priv *priv = (void *)subdev; 100 struct nv31_mpeg_priv *priv = (void *)subdev;
104 u32 stat; 101 u32 stat;
105 102
106 if ((stat = nv_rd32(priv, 0x00b100))) 103 if ((stat = nv_rd32(priv, 0x00b100)))
@@ -117,7 +114,7 @@ nv40_mpeg_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
117 struct nouveau_oclass *oclass, void *data, u32 size, 114 struct nouveau_oclass *oclass, void *data, u32 size,
118 struct nouveau_object **pobject) 115 struct nouveau_object **pobject)
119{ 116{
120 struct nv40_mpeg_priv *priv; 117 struct nv31_mpeg_priv *priv;
121 int ret; 118 int ret;
122 119
123 ret = nouveau_mpeg_create(parent, engine, oclass, &priv); 120 ret = nouveau_mpeg_create(parent, engine, oclass, &priv);
@@ -127,8 +124,8 @@ nv40_mpeg_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
127 124
128 nv_subdev(priv)->unit = 0x00000002; 125 nv_subdev(priv)->unit = 0x00000002;
129 nv_subdev(priv)->intr = nv40_mpeg_intr; 126 nv_subdev(priv)->intr = nv40_mpeg_intr;
130 nv_engine(priv)->cclass = &nv40_mpeg_cclass; 127 nv_engine(priv)->cclass = &nv31_mpeg_cclass;
131 nv_engine(priv)->sclass = nv31_mpeg_sclass; 128 nv_engine(priv)->sclass = nv40_mpeg_sclass;
132 nv_engine(priv)->tile_prog = nv31_mpeg_tile_prog; 129 nv_engine(priv)->tile_prog = nv31_mpeg_tile_prog;
133 return 0; 130 return 0;
134} 131}
diff --git a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv44.c b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv44.c
new file mode 100644
index 000000000000..3d8c2133e0e8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv44.c
@@ -0,0 +1,194 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/os.h>
26#include <core/class.h>
27#include <core/client.h>
28#include <core/engctx.h>
29#include <core/handle.h>
30
31#include <subdev/fb.h>
32#include <subdev/timer.h>
33#include <subdev/instmem.h>
34
35#include <engine/fifo.h>
36#include <engine/mpeg.h>
37
38struct nv44_mpeg_priv {
39 struct nouveau_mpeg base;
40};
41
42struct nv44_mpeg_chan {
43 struct nouveau_mpeg_chan base;
44};
45
46/*******************************************************************************
47 * PMPEG context
48 ******************************************************************************/
49
50static int
51nv44_mpeg_context_ctor(struct nouveau_object *parent,
52 struct nouveau_object *engine,
53 struct nouveau_oclass *oclass, void *data, u32 size,
54 struct nouveau_object **pobject)
55{
56 struct nv44_mpeg_chan *chan;
57 int ret;
58
59 ret = nouveau_mpeg_context_create(parent, engine, oclass, NULL,
60 264 * 4, 16,
61 NVOBJ_FLAG_ZERO_ALLOC, &chan);
62 *pobject = nv_object(chan);
63 if (ret)
64 return ret;
65
66 nv_wo32(&chan->base.base, 0x78, 0x02001ec1);
67 return 0;
68}
69
70static int
71nv44_mpeg_context_fini(struct nouveau_object *object, bool suspend)
72{
73
74 struct nv44_mpeg_priv *priv = (void *)object->engine;
75 struct nv44_mpeg_chan *chan = (void *)object;
76 u32 inst = 0x80000000 | nv_gpuobj(chan)->addr >> 4;
77
78 nv_mask(priv, 0x00b32c, 0x00000001, 0x00000000);
79 if (nv_rd32(priv, 0x00b318) == inst)
80 nv_mask(priv, 0x00b318, 0x80000000, 0x00000000);
81 nv_mask(priv, 0x00b32c, 0x00000001, 0x00000001);
82 return 0;
83}
84
85static struct nouveau_oclass
86nv44_mpeg_cclass = {
87 .handle = NV_ENGCTX(MPEG, 0x44),
88 .ofuncs = &(struct nouveau_ofuncs) {
89 .ctor = nv44_mpeg_context_ctor,
90 .dtor = _nouveau_mpeg_context_dtor,
91 .init = _nouveau_mpeg_context_init,
92 .fini = nv44_mpeg_context_fini,
93 .rd32 = _nouveau_mpeg_context_rd32,
94 .wr32 = _nouveau_mpeg_context_wr32,
95 },
96};
97
98/*******************************************************************************
99 * PMPEG engine/subdev functions
100 ******************************************************************************/
101
102static void
103nv44_mpeg_intr(struct nouveau_subdev *subdev)
104{
105 struct nouveau_fifo *pfifo = nouveau_fifo(subdev);
106 struct nouveau_engine *engine = nv_engine(subdev);
107 struct nouveau_object *engctx;
108 struct nouveau_handle *handle;
109 struct nv44_mpeg_priv *priv = (void *)subdev;
110 u32 inst = nv_rd32(priv, 0x00b318) & 0x000fffff;
111 u32 stat = nv_rd32(priv, 0x00b100);
112 u32 type = nv_rd32(priv, 0x00b230);
113 u32 mthd = nv_rd32(priv, 0x00b234);
114 u32 data = nv_rd32(priv, 0x00b238);
115 u32 show = stat;
116 int chid;
117
118 engctx = nouveau_engctx_get(engine, inst);
119 chid = pfifo->chid(pfifo, engctx);
120
121 if (stat & 0x01000000) {
122 /* happens on initial binding of the object */
123 if (type == 0x00000020 && mthd == 0x0000) {
124 nv_mask(priv, 0x00b308, 0x00000000, 0x00000000);
125 show &= ~0x01000000;
126 }
127
128 if (type == 0x00000010) {
129 handle = nouveau_handle_get_class(engctx, 0x3174);
130 if (handle && !nv_call(handle->object, mthd, data))
131 show &= ~0x01000000;
132 nouveau_handle_put(handle);
133 }
134 }
135
136 nv_wr32(priv, 0x00b100, stat);
137 nv_wr32(priv, 0x00b230, 0x00000001);
138
139 if (show) {
140 nv_error(priv,
141 "ch %d [0x%08x %s] 0x%08x 0x%08x 0x%08x 0x%08x\n",
142 chid, inst << 4, nouveau_client_name(engctx), stat,
143 type, mthd, data);
144 }
145
146 nouveau_engctx_put(engctx);
147}
148
149static void
150nv44_mpeg_me_intr(struct nouveau_subdev *subdev)
151{
152 struct nv44_mpeg_priv *priv = (void *)subdev;
153 u32 stat;
154
155 if ((stat = nv_rd32(priv, 0x00b100)))
156 nv44_mpeg_intr(subdev);
157
158 if ((stat = nv_rd32(priv, 0x00b800))) {
159 nv_error(priv, "PMSRCH 0x%08x\n", stat);
160 nv_wr32(priv, 0x00b800, stat);
161 }
162}
163
164static int
165nv44_mpeg_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
166 struct nouveau_oclass *oclass, void *data, u32 size,
167 struct nouveau_object **pobject)
168{
169 struct nv44_mpeg_priv *priv;
170 int ret;
171
172 ret = nouveau_mpeg_create(parent, engine, oclass, &priv);
173 *pobject = nv_object(priv);
174 if (ret)
175 return ret;
176
177 nv_subdev(priv)->unit = 0x00000002;
178 nv_subdev(priv)->intr = nv44_mpeg_me_intr;
179 nv_engine(priv)->cclass = &nv44_mpeg_cclass;
180 nv_engine(priv)->sclass = nv40_mpeg_sclass;
181 nv_engine(priv)->tile_prog = nv31_mpeg_tile_prog;
182 return 0;
183}
184
185struct nouveau_oclass
186nv44_mpeg_oclass = {
187 .handle = NV_ENGINE(MPEG, 0x44),
188 .ofuncs = &(struct nouveau_ofuncs) {
189 .ctor = nv44_mpeg_ctor,
190 .dtor = _nouveau_mpeg_dtor,
191 .init = nv31_mpeg_init,
192 .fini = _nouveau_mpeg_fini,
193 },
194};
diff --git a/drivers/gpu/drm/nouveau/core/engine/perfmon/base.c b/drivers/gpu/drm/nouveau/core/engine/perfmon/base.c
new file mode 100644
index 000000000000..e9c5e51943ef
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/perfmon/base.c
@@ -0,0 +1,449 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/option.h>
26#include <core/class.h>
27
28#include <subdev/clock.h>
29
30#include "priv.h"
31
32#define QUAD_MASK 0x0f
33#define QUAD_FREE 0x01
34
35static struct nouveau_perfsig *
36nouveau_perfsig_find_(struct nouveau_perfdom *dom, const char *name, u32 size)
37{
38 char path[64];
39 int i;
40
41 if (name[0] != '/') {
42 for (i = 0; i < dom->signal_nr; i++) {
43 if ( dom->signal[i].name &&
44 !strncmp(name, dom->signal[i].name, size))
45 return &dom->signal[i];
46 }
47 } else {
48 for (i = 0; i < dom->signal_nr; i++) {
49 snprintf(path, sizeof(path), "/%s/%02x", dom->name, i);
50 if (!strncmp(name, path, size))
51 return &dom->signal[i];
52 }
53 }
54
55 return NULL;
56}
57
58struct nouveau_perfsig *
59nouveau_perfsig_find(struct nouveau_perfmon *ppm, const char *name, u32 size,
60 struct nouveau_perfdom **pdom)
61{
62 struct nouveau_perfdom *dom = *pdom;
63 struct nouveau_perfsig *sig;
64
65 if (dom == NULL) {
66 list_for_each_entry(dom, &ppm->domains, head) {
67 sig = nouveau_perfsig_find_(dom, name, size);
68 if (sig) {
69 *pdom = dom;
70 return sig;
71 }
72 }
73
74 return NULL;
75 }
76
77 return nouveau_perfsig_find_(dom, name, size);
78}
79
80struct nouveau_perfctr *
81nouveau_perfsig_wrap(struct nouveau_perfmon *ppm, const char *name,
82 struct nouveau_perfdom **pdom)
83{
84 struct nouveau_perfsig *sig;
85 struct nouveau_perfctr *ctr;
86
87 sig = nouveau_perfsig_find(ppm, name, strlen(name), pdom);
88 if (!sig)
89 return NULL;
90
91 ctr = kzalloc(sizeof(*ctr), GFP_KERNEL);
92 if (ctr) {
93 ctr->signal[0] = sig;
94 ctr->logic_op = 0xaaaa;
95 }
96
97 return ctr;
98}
99
100/*******************************************************************************
101 * Perfmon object classes
102 ******************************************************************************/
103static int
104nouveau_perfctr_query(struct nouveau_object *object, u32 mthd,
105 void *data, u32 size)
106{
107 struct nouveau_device *device = nv_device(object);
108 struct nouveau_perfmon *ppm = (void *)object->engine;
109 struct nouveau_perfdom *dom = NULL, *chk;
110 struct nv_perfctr_query *args = data;
111 const bool all = nouveau_boolopt(device->cfgopt, "NvPmShowAll", false);
112 const bool raw = nouveau_boolopt(device->cfgopt, "NvPmUnnamed", all);
113 const char *name;
114 int tmp = 0, di, si;
115 char path[64];
116
117 if (size < sizeof(*args))
118 return -EINVAL;
119
120 di = (args->iter & 0xff000000) >> 24;
121 si = (args->iter & 0x00ffffff) - 1;
122
123 list_for_each_entry(chk, &ppm->domains, head) {
124 if (tmp++ == di) {
125 dom = chk;
126 break;
127 }
128 }
129
130 if (dom == NULL || si >= (int)dom->signal_nr)
131 return -EINVAL;
132
133 if (si >= 0) {
134 if (raw || !(name = dom->signal[si].name)) {
135 snprintf(path, sizeof(path), "/%s/%02x", dom->name, si);
136 name = path;
137 }
138
139 if (args->name)
140 strncpy(args->name, name, args->size);
141 args->size = strlen(name) + 1;
142 }
143
144 do {
145 while (++si < dom->signal_nr) {
146 if (all || dom->signal[si].name) {
147 args->iter = (di << 24) | ++si;
148 return 0;
149 }
150 }
151 si = -1;
152 di = di + 1;
153 dom = list_entry(dom->head.next, typeof(*dom), head);
154 } while (&dom->head != &ppm->domains);
155
156 args->iter = 0xffffffff;
157 return 0;
158}
159
160static int
161nouveau_perfctr_sample(struct nouveau_object *object, u32 mthd,
162 void *data, u32 size)
163{
164 struct nouveau_perfmon *ppm = (void *)object->engine;
165 struct nouveau_perfctr *ctr, *tmp;
166 struct nouveau_perfdom *dom;
167 struct nv_perfctr_sample *args = data;
168
169 if (size < sizeof(*args))
170 return -EINVAL;
171 ppm->sequence++;
172
173 list_for_each_entry(dom, &ppm->domains, head) {
174 /* sample previous batch of counters */
175 if (dom->quad != QUAD_MASK) {
176 dom->func->next(ppm, dom);
177 tmp = NULL;
178 while (!list_empty(&dom->list)) {
179 ctr = list_first_entry(&dom->list,
180 typeof(*ctr), head);
181 if (ctr->slot < 0) break;
182 if ( tmp && tmp == ctr) break;
183 if (!tmp) tmp = ctr;
184 dom->func->read(ppm, dom, ctr);
185 ctr->slot = -1;
186 list_move_tail(&ctr->head, &dom->list);
187 }
188 }
189
190 dom->quad = QUAD_MASK;
191
192 /* setup next batch of counters for sampling */
193 list_for_each_entry(ctr, &dom->list, head) {
194 ctr->slot = ffs(dom->quad) - 1;
195 if (ctr->slot < 0)
196 break;
197 dom->quad &= ~(QUAD_FREE << ctr->slot);
198 dom->func->init(ppm, dom, ctr);
199 }
200
201 if (dom->quad != QUAD_MASK)
202 dom->func->next(ppm, dom);
203 }
204
205 return 0;
206}
207
208static int
209nouveau_perfctr_read(struct nouveau_object *object, u32 mthd,
210 void *data, u32 size)
211{
212 struct nouveau_perfctr *ctr = (void *)object;
213 struct nv_perfctr_read *args = data;
214
215 if (size < sizeof(*args))
216 return -EINVAL;
217 if (!ctr->clk)
218 return -EAGAIN;
219
220 args->clk = ctr->clk;
221 args->ctr = ctr->ctr;
222 return 0;
223}
224
225static void
226nouveau_perfctr_dtor(struct nouveau_object *object)
227{
228 struct nouveau_perfctr *ctr = (void *)object;
229 if (ctr->head.next)
230 list_del(&ctr->head);
231 nouveau_object_destroy(&ctr->base);
232}
233
234static int
235nouveau_perfctr_ctor(struct nouveau_object *parent,
236 struct nouveau_object *engine,
237 struct nouveau_oclass *oclass, void *data, u32 size,
238 struct nouveau_object **pobject)
239{
240 struct nouveau_perfmon *ppm = (void *)engine;
241 struct nouveau_perfdom *dom = NULL;
242 struct nouveau_perfsig *sig[4] = {};
243 struct nouveau_perfctr *ctr;
244 struct nv_perfctr_class *args = data;
245 int ret, i;
246
247 if (size < sizeof(*args))
248 return -EINVAL;
249
250 for (i = 0; i < ARRAY_SIZE(args->signal) && args->signal[i].name; i++) {
251 sig[i] = nouveau_perfsig_find(ppm, args->signal[i].name,
252 args->signal[i].size, &dom);
253 if (!sig[i])
254 return -EINVAL;
255 }
256
257 ret = nouveau_object_create(parent, engine, oclass, 0, &ctr);
258 *pobject = nv_object(ctr);
259 if (ret)
260 return ret;
261
262 ctr->slot = -1;
263 ctr->logic_op = args->logic_op;
264 ctr->signal[0] = sig[0];
265 ctr->signal[1] = sig[1];
266 ctr->signal[2] = sig[2];
267 ctr->signal[3] = sig[3];
268 if (dom)
269 list_add_tail(&ctr->head, &dom->list);
270 return 0;
271}
272
273static struct nouveau_ofuncs
274nouveau_perfctr_ofuncs = {
275 .ctor = nouveau_perfctr_ctor,
276 .dtor = nouveau_perfctr_dtor,
277 .init = nouveau_object_init,
278 .fini = nouveau_object_fini,
279};
280
281static struct nouveau_omthds
282nouveau_perfctr_omthds[] = {
283 { NV_PERFCTR_QUERY, NV_PERFCTR_QUERY, nouveau_perfctr_query },
284 { NV_PERFCTR_SAMPLE, NV_PERFCTR_SAMPLE, nouveau_perfctr_sample },
285 { NV_PERFCTR_READ, NV_PERFCTR_READ, nouveau_perfctr_read },
286 {}
287};
288
289struct nouveau_oclass
290nouveau_perfmon_sclass[] = {
291 { .handle = NV_PERFCTR_CLASS,
292 .ofuncs = &nouveau_perfctr_ofuncs,
293 .omthds = nouveau_perfctr_omthds,
294 },
295 {},
296};
297
298/*******************************************************************************
299 * PPM context
300 ******************************************************************************/
301static void
302nouveau_perfctx_dtor(struct nouveau_object *object)
303{
304 struct nouveau_perfmon *ppm = (void *)object->engine;
305 mutex_lock(&nv_subdev(ppm)->mutex);
306 ppm->context = NULL;
307 mutex_unlock(&nv_subdev(ppm)->mutex);
308}
309
310static int
311nouveau_perfctx_ctor(struct nouveau_object *parent,
312 struct nouveau_object *engine,
313 struct nouveau_oclass *oclass, void *data, u32 size,
314 struct nouveau_object **pobject)
315{
316 struct nouveau_perfmon *ppm = (void *)engine;
317 struct nouveau_perfctx *ctx;
318 int ret;
319
320 ret = nouveau_engctx_create(parent, engine, oclass, NULL,
321 0, 0, 0, &ctx);
322 *pobject = nv_object(ctx);
323 if (ret)
324 return ret;
325
326 mutex_lock(&nv_subdev(ppm)->mutex);
327 if (ppm->context == NULL)
328 ppm->context = ctx;
329 mutex_unlock(&nv_subdev(ppm)->mutex);
330
331 if (ctx != ppm->context)
332 return -EBUSY;
333
334 return 0;
335}
336
337struct nouveau_oclass
338nouveau_perfmon_cclass = {
339 .handle = NV_ENGCTX(PERFMON, 0x00),
340 .ofuncs = &(struct nouveau_ofuncs) {
341 .ctor = nouveau_perfctx_ctor,
342 .dtor = nouveau_perfctx_dtor,
343 .init = _nouveau_engctx_init,
344 .fini = _nouveau_engctx_fini,
345 },
346};
347
348/*******************************************************************************
349 * PPM engine/subdev functions
350 ******************************************************************************/
351int
352nouveau_perfdom_new(struct nouveau_perfmon *ppm, const char *name, u32 mask,
353 u32 base, u32 size_unit, u32 size_domain,
354 const struct nouveau_specdom *spec)
355{
356 const struct nouveau_specdom *sdom;
357 const struct nouveau_specsig *ssig;
358 struct nouveau_perfdom *dom;
359 int i;
360
361 for (i = 0; i == 0 || mask; i++) {
362 u32 addr = base + (i * size_unit);
363 if (i && !(mask & (1 << i)))
364 continue;
365
366 sdom = spec;
367 while (sdom->signal_nr) {
368 dom = kzalloc(sizeof(*dom) + sdom->signal_nr *
369 sizeof(*dom->signal), GFP_KERNEL);
370 if (!dom)
371 return -ENOMEM;
372
373 if (mask) {
374 snprintf(dom->name, sizeof(dom->name),
375 "%s/%02x/%02x", name, i,
376 (int)(sdom - spec));
377 } else {
378 snprintf(dom->name, sizeof(dom->name),
379 "%s/%02x", name, (int)(sdom - spec));
380 }
381
382 list_add_tail(&dom->head, &ppm->domains);
383 INIT_LIST_HEAD(&dom->list);
384 dom->func = sdom->func;
385 dom->addr = addr;
386 dom->quad = QUAD_MASK;
387 dom->signal_nr = sdom->signal_nr;
388
389 ssig = (sdom++)->signal;
390 while (ssig->name) {
391 dom->signal[ssig->signal].name = ssig->name;
392 ssig++;
393 }
394
395 addr += size_domain;
396 }
397
398 mask &= ~(1 << i);
399 }
400
401 return 0;
402}
403
404int
405_nouveau_perfmon_fini(struct nouveau_object *object, bool suspend)
406{
407 struct nouveau_perfmon *ppm = (void *)object;
408 return nouveau_engine_fini(&ppm->base, suspend);
409}
410
411int
412_nouveau_perfmon_init(struct nouveau_object *object)
413{
414 struct nouveau_perfmon *ppm = (void *)object;
415 return nouveau_engine_init(&ppm->base);
416}
417
418void
419_nouveau_perfmon_dtor(struct nouveau_object *object)
420{
421 struct nouveau_perfmon *ppm = (void *)object;
422 struct nouveau_perfdom *dom, *tmp;
423
424 list_for_each_entry_safe(dom, tmp, &ppm->domains, head) {
425 list_del(&dom->head);
426 kfree(dom);
427 }
428
429 nouveau_engine_destroy(&ppm->base);
430}
431
432int
433nouveau_perfmon_create_(struct nouveau_object *parent,
434 struct nouveau_object *engine,
435 struct nouveau_oclass *oclass,
436 int length, void **pobject)
437{
438 struct nouveau_perfmon *ppm;
439 int ret;
440
441 ret = nouveau_engine_create_(parent, engine, oclass, true, "PPM",
442 "perfmon", length, pobject);
443 ppm = *pobject;
444 if (ret)
445 return ret;
446
447 INIT_LIST_HEAD(&ppm->domains);
448 return 0;
449}
diff --git a/drivers/gpu/drm/nouveau/core/engine/perfmon/daemon.c b/drivers/gpu/drm/nouveau/core/engine/perfmon/daemon.c
new file mode 100644
index 000000000000..50696cc7b7d7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/perfmon/daemon.c
@@ -0,0 +1,109 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "priv.h"
26
27static void
28pwr_perfctr_init(struct nouveau_perfmon *ppm, struct nouveau_perfdom *dom,
29 struct nouveau_perfctr *ctr)
30{
31 u32 mask = 0x00000000;
32 u32 ctrl = 0x00000001;
33 int i;
34
35 for (i = 0; i < ARRAY_SIZE(ctr->signal) && ctr->signal[i]; i++)
36 mask |= 1 << (ctr->signal[i] - dom->signal);
37
38 nv_wr32(ppm, 0x10a504 + (ctr->slot * 0x10), mask);
39 nv_wr32(ppm, 0x10a50c + (ctr->slot * 0x10), ctrl);
40 nv_wr32(ppm, 0x10a50c + (ppm->last * 0x10), 0x00000003);
41}
42
43static void
44pwr_perfctr_read(struct nouveau_perfmon *ppm, struct nouveau_perfdom *dom,
45 struct nouveau_perfctr *ctr)
46{
47 ctr->ctr = ppm->pwr[ctr->slot];
48 ctr->clk = ppm->pwr[ppm->last];
49}
50
51static void
52pwr_perfctr_next(struct nouveau_perfmon *ppm, struct nouveau_perfdom *dom)
53{
54 int i;
55
56 for (i = 0; i <= ppm->last; i++) {
57 ppm->pwr[i] = nv_rd32(ppm, 0x10a508 + (i * 0x10));
58 nv_wr32(ppm, 0x10a508 + (i * 0x10), 0x80000000);
59 }
60}
61
62static const struct nouveau_funcdom
63pwr_perfctr_func = {
64 .init = pwr_perfctr_init,
65 .read = pwr_perfctr_read,
66 .next = pwr_perfctr_next,
67};
68
69const struct nouveau_specdom
70nva3_perfmon_pwr[] = {
71 { 0x20, (const struct nouveau_specsig[]) {
72 { 0x00, "pwr_gr_idle" },
73 { 0x04, "pwr_bsp_idle" },
74 { 0x05, "pwr_vp_idle" },
75 { 0x06, "pwr_ppp_idle" },
76 { 0x13, "pwr_ce0_idle" },
77 {}
78 }, &pwr_perfctr_func },
79 {}
80};
81
82const struct nouveau_specdom
83nvc0_perfmon_pwr[] = {
84 { 0x20, (const struct nouveau_specsig[]) {
85 { 0x00, "pwr_gr_idle" },
86 { 0x04, "pwr_bsp_idle" },
87 { 0x05, "pwr_vp_idle" },
88 { 0x06, "pwr_ppp_idle" },
89 { 0x13, "pwr_ce0_idle" },
90 { 0x14, "pwr_ce1_idle" },
91 {}
92 }, &pwr_perfctr_func },
93 {}
94};
95
96const struct nouveau_specdom
97nve0_perfmon_pwr[] = {
98 { 0x20, (const struct nouveau_specsig[]) {
99 { 0x00, "pwr_gr_idle" },
100 { 0x04, "pwr_bsp_idle" },
101 { 0x05, "pwr_vp_idle" },
102 { 0x06, "pwr_ppp_idle" },
103 { 0x13, "pwr_ce0_idle" },
104 { 0x14, "pwr_ce1_idle" },
105 { 0x15, "pwr_ce2_idle" },
106 {}
107 }, &pwr_perfctr_func },
108 {}
109};
diff --git a/drivers/gpu/drm/nouveau/core/engine/perfmon/nv40.c b/drivers/gpu/drm/nouveau/core/engine/perfmon/nv40.c
new file mode 100644
index 000000000000..b2a10785adb1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/perfmon/nv40.c
@@ -0,0 +1,143 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "nv40.h"
26
27/*******************************************************************************
28 * Perfmon object classes
29 ******************************************************************************/
30
31/*******************************************************************************
32 * PPM context
33 ******************************************************************************/
34
35/*******************************************************************************
36 * PPM engine/subdev functions
37 ******************************************************************************/
38
39static void
40nv40_perfctr_init(struct nouveau_perfmon *ppm, struct nouveau_perfdom *dom,
41 struct nouveau_perfctr *ctr)
42{
43 struct nv40_perfmon_priv *priv = (void *)ppm;
44 struct nv40_perfmon_cntr *cntr = (void *)ctr;
45 u32 log = ctr->logic_op;
46 u32 src = 0x00000000;
47 int i;
48
49 for (i = 0; i < 4 && ctr->signal[i]; i++)
50 src |= (ctr->signal[i] - dom->signal) << (i * 8);
51
52 nv_wr32(priv, 0x00a7c0 + dom->addr, 0x00000001);
53 nv_wr32(priv, 0x00a400 + dom->addr + (cntr->base.slot * 0x40), src);
54 nv_wr32(priv, 0x00a420 + dom->addr + (cntr->base.slot * 0x40), log);
55}
56
57static void
58nv40_perfctr_read(struct nouveau_perfmon *ppm, struct nouveau_perfdom *dom,
59 struct nouveau_perfctr *ctr)
60{
61 struct nv40_perfmon_priv *priv = (void *)ppm;
62 struct nv40_perfmon_cntr *cntr = (void *)ctr;
63
64 switch (cntr->base.slot) {
65 case 0: cntr->base.ctr = nv_rd32(priv, 0x00a700 + dom->addr); break;
66 case 1: cntr->base.ctr = nv_rd32(priv, 0x00a6c0 + dom->addr); break;
67 case 2: cntr->base.ctr = nv_rd32(priv, 0x00a680 + dom->addr); break;
68 case 3: cntr->base.ctr = nv_rd32(priv, 0x00a740 + dom->addr); break;
69 }
70 cntr->base.clk = nv_rd32(priv, 0x00a600 + dom->addr);
71}
72
73static void
74nv40_perfctr_next(struct nouveau_perfmon *ppm, struct nouveau_perfdom *dom)
75{
76 struct nv40_perfmon_priv *priv = (void *)ppm;
77 if (priv->sequence != ppm->sequence) {
78 nv_wr32(priv, 0x400084, 0x00000020);
79 priv->sequence = ppm->sequence;
80 }
81}
82
83const struct nouveau_funcdom
84nv40_perfctr_func = {
85 .init = nv40_perfctr_init,
86 .read = nv40_perfctr_read,
87 .next = nv40_perfctr_next,
88};
89
90static const struct nouveau_specdom
91nv40_perfmon[] = {
92 { 0x20, (const struct nouveau_specsig[]) {
93 {}
94 }, &nv40_perfctr_func },
95 { 0x20, (const struct nouveau_specsig[]) {
96 {}
97 }, &nv40_perfctr_func },
98 { 0x20, (const struct nouveau_specsig[]) {
99 {}
100 }, &nv40_perfctr_func },
101 { 0x20, (const struct nouveau_specsig[]) {
102 {}
103 }, &nv40_perfctr_func },
104 { 0x20, (const struct nouveau_specsig[]) {
105 {}
106 }, &nv40_perfctr_func },
107 {}
108};
109
110int
111nv40_perfmon_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
112 struct nouveau_oclass *oclass, void *data, u32 size,
113 struct nouveau_object **pobject)
114{
115 struct nv40_perfmon_oclass *mclass = (void *)oclass;
116 struct nv40_perfmon_priv *priv;
117 int ret;
118
119 ret = nouveau_perfmon_create(parent, engine, oclass, &priv);
120 *pobject = nv_object(priv);
121 if (ret)
122 return ret;
123
124 ret = nouveau_perfdom_new(&priv->base, "pm", 0, 0, 0, 4, mclass->doms);
125 if (ret)
126 return ret;
127
128 nv_engine(priv)->cclass = &nouveau_perfmon_cclass;
129 nv_engine(priv)->sclass = nouveau_perfmon_sclass;
130 return 0;
131}
132
133struct nouveau_oclass *
134nv40_perfmon_oclass = &(struct nv40_perfmon_oclass) {
135 .base.handle = NV_ENGINE(PERFMON, 0x40),
136 .base.ofuncs = &(struct nouveau_ofuncs) {
137 .ctor = nv40_perfmon_ctor,
138 .dtor = _nouveau_perfmon_dtor,
139 .init = _nouveau_perfmon_init,
140 .fini = _nouveau_perfmon_fini,
141 },
142 .doms = nv40_perfmon,
143}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/perfmon/nv40.h b/drivers/gpu/drm/nouveau/core/engine/perfmon/nv40.h
new file mode 100644
index 000000000000..1b5792d1df14
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/perfmon/nv40.h
@@ -0,0 +1,26 @@
1#ifndef __NVKM_PM_NV40_H__
2#define __NVKM_PM_NV40_H__
3
4#include "priv.h"
5
6struct nv40_perfmon_oclass {
7 struct nouveau_oclass base;
8 const struct nouveau_specdom *doms;
9};
10
11struct nv40_perfmon_priv {
12 struct nouveau_perfmon base;
13 u32 sequence;
14};
15
16int nv40_perfmon_ctor(struct nouveau_object *, struct nouveau_object *,
17 struct nouveau_oclass *, void *data, u32 size,
18 struct nouveau_object **pobject);
19
20struct nv40_perfmon_cntr {
21 struct nouveau_perfctr base;
22};
23
24extern const struct nouveau_funcdom nv40_perfctr_func;
25
26#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/perfmon/nv50.c b/drivers/gpu/drm/nouveau/core/engine/perfmon/nv50.c
new file mode 100644
index 000000000000..94217691fe67
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/perfmon/nv50.c
@@ -0,0 +1,70 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "nv40.h"
26
27/*******************************************************************************
28 * Perfmon object classes
29 ******************************************************************************/
30
31/*******************************************************************************
32 * PPM context
33 ******************************************************************************/
34
35/*******************************************************************************
36 * PPM engine/subdev functions
37 ******************************************************************************/
38
39static const struct nouveau_specdom
40nv50_perfmon[] = {
41 { 0x040, (const struct nouveau_specsig[]) {
42 {}
43 }, &nv40_perfctr_func },
44 { 0x100, (const struct nouveau_specsig[]) {
45 { 0xc8, "gr_idle" },
46 {}
47 }, &nv40_perfctr_func },
48 { 0x100, (const struct nouveau_specsig[]) {
49 {}
50 }, &nv40_perfctr_func },
51 { 0x020, (const struct nouveau_specsig[]) {
52 {}
53 }, &nv40_perfctr_func },
54 { 0x040, (const struct nouveau_specsig[]) {
55 {}
56 }, &nv40_perfctr_func },
57 {}
58};
59
60struct nouveau_oclass *
61nv50_perfmon_oclass = &(struct nv40_perfmon_oclass) {
62 .base.handle = NV_ENGINE(PERFMON, 0x50),
63 .base.ofuncs = &(struct nouveau_ofuncs) {
64 .ctor = nv40_perfmon_ctor,
65 .dtor = _nouveau_perfmon_dtor,
66 .init = _nouveau_perfmon_init,
67 .fini = _nouveau_perfmon_fini,
68 },
69 .doms = nv50_perfmon,
70}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/perfmon/nv84.c b/drivers/gpu/drm/nouveau/core/engine/perfmon/nv84.c
new file mode 100644
index 000000000000..9232c7fc6253
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/perfmon/nv84.c
@@ -0,0 +1,78 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "nv40.h"
26
27/*******************************************************************************
28 * Perfmon object classes
29 ******************************************************************************/
30
31/*******************************************************************************
32 * PPM context
33 ******************************************************************************/
34
35/*******************************************************************************
36 * PPM engine/subdev functions
37 ******************************************************************************/
38
39static const struct nouveau_specdom
40nv84_perfmon[] = {
41 { 0x20, (const struct nouveau_specsig[]) {
42 {}
43 }, &nv40_perfctr_func },
44 { 0x20, (const struct nouveau_specsig[]) {
45 {}
46 }, &nv40_perfctr_func },
47 { 0x20, (const struct nouveau_specsig[]) {
48 {}
49 }, &nv40_perfctr_func },
50 { 0x20, (const struct nouveau_specsig[]) {
51 {}
52 }, &nv40_perfctr_func },
53 { 0x20, (const struct nouveau_specsig[]) {
54 {}
55 }, &nv40_perfctr_func },
56 { 0x20, (const struct nouveau_specsig[]) {
57 {}
58 }, &nv40_perfctr_func },
59 { 0x20, (const struct nouveau_specsig[]) {
60 {}
61 }, &nv40_perfctr_func },
62 { 0x20, (const struct nouveau_specsig[]) {
63 {}
64 }, &nv40_perfctr_func },
65 {}
66};
67
68struct nouveau_oclass *
69nv84_perfmon_oclass = &(struct nv40_perfmon_oclass) {
70 .base.handle = NV_ENGINE(PERFMON, 0x84),
71 .base.ofuncs = &(struct nouveau_ofuncs) {
72 .ctor = nv40_perfmon_ctor,
73 .dtor = _nouveau_perfmon_dtor,
74 .init = _nouveau_perfmon_init,
75 .fini = _nouveau_perfmon_fini,
76 },
77 .doms = nv84_perfmon,
78}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/perfmon/nva3.c b/drivers/gpu/drm/nouveau/core/engine/perfmon/nva3.c
new file mode 100644
index 000000000000..6197ebdeb648
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/perfmon/nva3.c
@@ -0,0 +1,96 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "nv40.h"
26
27/*******************************************************************************
28 * Perfmon object classes
29 ******************************************************************************/
30
31/*******************************************************************************
32 * PPM context
33 ******************************************************************************/
34
35/*******************************************************************************
36 * PPM engine/subdev functions
37 ******************************************************************************/
38
39static const struct nouveau_specdom
40nva3_perfmon[] = {
41 { 0x20, (const struct nouveau_specsig[]) {
42 {}
43 }, &nv40_perfctr_func },
44 { 0x20, (const struct nouveau_specsig[]) {
45 {}
46 }, &nv40_perfctr_func },
47 { 0x20, (const struct nouveau_specsig[]) {
48 {}
49 }, &nv40_perfctr_func },
50 { 0x20, (const struct nouveau_specsig[]) {
51 {}
52 }, &nv40_perfctr_func },
53 { 0x20, (const struct nouveau_specsig[]) {
54 {}
55 }, &nv40_perfctr_func },
56 { 0x20, (const struct nouveau_specsig[]) {
57 {}
58 }, &nv40_perfctr_func },
59 { 0x20, (const struct nouveau_specsig[]) {
60 {}
61 }, &nv40_perfctr_func },
62 { 0x20, (const struct nouveau_specsig[]) {
63 {}
64 }, &nv40_perfctr_func },
65 {}
66};
67
68static int
69nva3_perfmon_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
70 struct nouveau_oclass *oclass, void *data, u32 size,
71 struct nouveau_object **object)
72{
73 int ret = nv40_perfmon_ctor(parent, engine, oclass, data, size, object);
74 if (ret == 0) {
75 struct nv40_perfmon_priv *priv = (void *)*object;
76 ret = nouveau_perfdom_new(&priv->base, "pwr", 0, 0, 0, 0,
77 nva3_perfmon_pwr);
78 if (ret)
79 return ret;
80
81 priv->base.last = 3;
82 }
83 return ret;
84}
85
86struct nouveau_oclass *
87nva3_perfmon_oclass = &(struct nv40_perfmon_oclass) {
88 .base.handle = NV_ENGINE(PERFMON, 0xa3),
89 .base.ofuncs = &(struct nouveau_ofuncs) {
90 .ctor = nva3_perfmon_ctor,
91 .dtor = _nouveau_perfmon_dtor,
92 .init = _nouveau_perfmon_init,
93 .fini = _nouveau_perfmon_fini,
94 },
95 .doms = nva3_perfmon,
96}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/perfmon/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/perfmon/nvc0.c
new file mode 100644
index 000000000000..74b241042502
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/perfmon/nvc0.c
@@ -0,0 +1,173 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "nvc0.h"
26
27/*******************************************************************************
28 * Perfmon object classes
29 ******************************************************************************/
30
31/*******************************************************************************
32 * PPM context
33 ******************************************************************************/
34
35/*******************************************************************************
36 * PPM engine/subdev functions
37 ******************************************************************************/
38
39static const struct nouveau_specdom
40nvc0_perfmon_hub[] = {
41 {}
42};
43
44static const struct nouveau_specdom
45nvc0_perfmon_gpc[] = {
46 {}
47};
48
49static const struct nouveau_specdom
50nvc0_perfmon_part[] = {
51 {}
52};
53
54static void
55nvc0_perfctr_init(struct nouveau_perfmon *ppm, struct nouveau_perfdom *dom,
56 struct nouveau_perfctr *ctr)
57{
58 struct nvc0_perfmon_priv *priv = (void *)ppm;
59 struct nvc0_perfmon_cntr *cntr = (void *)ctr;
60 u32 log = ctr->logic_op;
61 u32 src = 0x00000000;
62 int i;
63
64 for (i = 0; i < 4 && ctr->signal[i]; i++)
65 src |= (ctr->signal[i] - dom->signal) << (i * 8);
66
67 nv_wr32(priv, dom->addr + 0x09c, 0x00040002);
68 nv_wr32(priv, dom->addr + 0x100, 0x00000000);
69 nv_wr32(priv, dom->addr + 0x040 + (cntr->base.slot * 0x08), src);
70 nv_wr32(priv, dom->addr + 0x044 + (cntr->base.slot * 0x08), log);
71}
72
73static void
74nvc0_perfctr_read(struct nouveau_perfmon *ppm, struct nouveau_perfdom *dom,
75 struct nouveau_perfctr *ctr)
76{
77 struct nvc0_perfmon_priv *priv = (void *)ppm;
78 struct nvc0_perfmon_cntr *cntr = (void *)ctr;
79
80 switch (cntr->base.slot) {
81 case 0: cntr->base.ctr = nv_rd32(priv, dom->addr + 0x08c); break;
82 case 1: cntr->base.ctr = nv_rd32(priv, dom->addr + 0x088); break;
83 case 2: cntr->base.ctr = nv_rd32(priv, dom->addr + 0x080); break;
84 case 3: cntr->base.ctr = nv_rd32(priv, dom->addr + 0x090); break;
85 }
86 cntr->base.clk = nv_rd32(priv, dom->addr + 0x070);
87}
88
89static void
90nvc0_perfctr_next(struct nouveau_perfmon *ppm, struct nouveau_perfdom *dom)
91{
92 struct nvc0_perfmon_priv *priv = (void *)ppm;
93 nv_wr32(priv, dom->addr + 0x06c, dom->signal_nr - 0x40 + 0x27);
94 nv_wr32(priv, dom->addr + 0x0ec, 0x00000011);
95}
96
97const struct nouveau_funcdom
98nvc0_perfctr_func = {
99 .init = nvc0_perfctr_init,
100 .read = nvc0_perfctr_read,
101 .next = nvc0_perfctr_next,
102};
103
104int
105nvc0_perfmon_fini(struct nouveau_object *object, bool suspend)
106{
107 struct nvc0_perfmon_priv *priv = (void *)object;
108 nv_mask(priv, 0x000200, 0x10000000, 0x00000000);
109 nv_mask(priv, 0x000200, 0x10000000, 0x10000000);
110 return nouveau_perfmon_fini(&priv->base, suspend);
111}
112
113static int
114nvc0_perfmon_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
115 struct nouveau_oclass *oclass, void *data, u32 size,
116 struct nouveau_object **pobject)
117{
118 struct nvc0_perfmon_priv *priv;
119 u32 mask;
120 int ret;
121
122 ret = nouveau_perfmon_create(parent, engine, oclass, &priv);
123 *pobject = nv_object(priv);
124 if (ret)
125 return ret;
126
127 ret = nouveau_perfdom_new(&priv->base, "pwr", 0, 0, 0, 0,
128 nvc0_perfmon_pwr);
129 if (ret)
130 return ret;
131
132 /* HUB */
133 ret = nouveau_perfdom_new(&priv->base, "hub", 0, 0x1b0000, 0, 0x200,
134 nvc0_perfmon_hub);
135 if (ret)
136 return ret;
137
138 /* GPC */
139 mask = (1 << nv_rd32(priv, 0x022430)) - 1;
140 mask &= ~nv_rd32(priv, 0x022504);
141 mask &= ~nv_rd32(priv, 0x022584);
142
143 ret = nouveau_perfdom_new(&priv->base, "gpc", mask, 0x180000,
144 0x1000, 0x200, nvc0_perfmon_gpc);
145 if (ret)
146 return ret;
147
148 /* PART */
149 mask = (1 << nv_rd32(priv, 0x022438)) - 1;
150 mask &= ~nv_rd32(priv, 0x022548);
151 mask &= ~nv_rd32(priv, 0x0225c8);
152
153 ret = nouveau_perfdom_new(&priv->base, "part", mask, 0x1a0000,
154 0x1000, 0x200, nvc0_perfmon_part);
155 if (ret)
156 return ret;
157
158 nv_engine(priv)->cclass = &nouveau_perfmon_cclass;
159 nv_engine(priv)->sclass = nouveau_perfmon_sclass;
160 priv->base.last = 7;
161 return 0;
162}
163
164struct nouveau_oclass
165nvc0_perfmon_oclass = {
166 .handle = NV_ENGINE(PERFMON, 0xc0),
167 .ofuncs = &(struct nouveau_ofuncs) {
168 .ctor = nvc0_perfmon_ctor,
169 .dtor = _nouveau_perfmon_dtor,
170 .init = _nouveau_perfmon_init,
171 .fini = nvc0_perfmon_fini,
172 },
173};
diff --git a/drivers/gpu/drm/nouveau/core/engine/perfmon/nvc0.h b/drivers/gpu/drm/nouveau/core/engine/perfmon/nvc0.h
new file mode 100644
index 000000000000..f66bca484263
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/perfmon/nvc0.h
@@ -0,0 +1,17 @@
1#ifndef __NVKM_PM_NVC0_H__
2#define __NVKM_PM_NVC0_H__
3
4#include "priv.h"
5
6struct nvc0_perfmon_priv {
7 struct nouveau_perfmon base;
8};
9
10struct nvc0_perfmon_cntr {
11 struct nouveau_perfctr base;
12};
13
14extern const struct nouveau_funcdom nvc0_perfctr_func;
15int nvc0_perfmon_fini(struct nouveau_object *, bool);
16
17#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/perfmon/nve0.c b/drivers/gpu/drm/nouveau/core/engine/perfmon/nve0.c
new file mode 100644
index 000000000000..71d718c12075
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/perfmon/nve0.c
@@ -0,0 +1,162 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "nvc0.h"
26
27/*******************************************************************************
28 * Perfmon object classes
29 ******************************************************************************/
30
31/*******************************************************************************
32 * PPM context
33 ******************************************************************************/
34
35/*******************************************************************************
36 * PPM engine/subdev functions
37 ******************************************************************************/
38
39static const struct nouveau_specdom
40nve0_perfmon_hub[] = {
41 { 0x60, (const struct nouveau_specsig[]) {
42 { 0x47, "hub00_user_0" },
43 {}
44 }, &nvc0_perfctr_func },
45 { 0x40, (const struct nouveau_specsig[]) {
46 { 0x27, "hub01_user_0" },
47 {}
48 }, &nvc0_perfctr_func },
49 { 0x60, (const struct nouveau_specsig[]) {
50 { 0x47, "hub02_user_0" },
51 {}
52 }, &nvc0_perfctr_func },
53 { 0x60, (const struct nouveau_specsig[]) {
54 { 0x47, "hub03_user_0" },
55 {}
56 }, &nvc0_perfctr_func },
57 { 0x40, (const struct nouveau_specsig[]) {
58 { 0x03, "host_mmio_rd" },
59 { 0x27, "hub04_user_0" },
60 {}
61 }, &nvc0_perfctr_func },
62 { 0x60, (const struct nouveau_specsig[]) {
63 { 0x47, "hub05_user_0" },
64 {}
65 }, &nvc0_perfctr_func },
66 { 0xc0, (const struct nouveau_specsig[]) {
67 { 0x74, "host_fb_rd3x" },
68 { 0x75, "host_fb_rd3x_2" },
69 { 0xa7, "hub06_user_0" },
70 {}
71 }, &nvc0_perfctr_func },
72 { 0x60, (const struct nouveau_specsig[]) {
73 { 0x47, "hub07_user_0" },
74 {}
75 }, &nvc0_perfctr_func },
76 {}
77};
78
79static const struct nouveau_specdom
80nve0_perfmon_gpc[] = {
81 { 0xe0, (const struct nouveau_specsig[]) {
82 { 0xc7, "gpc00_user_0" },
83 {}
84 }, &nvc0_perfctr_func },
85 {}
86};
87
88static const struct nouveau_specdom
89nve0_perfmon_part[] = {
90 { 0x60, (const struct nouveau_specsig[]) {
91 { 0x47, "part00_user_0" },
92 {}
93 }, &nvc0_perfctr_func },
94 { 0x60, (const struct nouveau_specsig[]) {
95 { 0x47, "part01_user_0" },
96 {}
97 }, &nvc0_perfctr_func },
98 {}
99};
100
101static int
102nve0_perfmon_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
103 struct nouveau_oclass *oclass, void *data, u32 size,
104 struct nouveau_object **pobject)
105{
106 struct nvc0_perfmon_priv *priv;
107 u32 mask;
108 int ret;
109
110 ret = nouveau_perfmon_create(parent, engine, oclass, &priv);
111 *pobject = nv_object(priv);
112 if (ret)
113 return ret;
114
115 /* PDAEMON */
116 ret = nouveau_perfdom_new(&priv->base, "pwr", 0, 0, 0, 0,
117 nve0_perfmon_pwr);
118 if (ret)
119 return ret;
120
121 /* HUB */
122 ret = nouveau_perfdom_new(&priv->base, "hub", 0, 0x1b0000, 0, 0x200,
123 nve0_perfmon_hub);
124 if (ret)
125 return ret;
126
127 /* GPC */
128 mask = (1 << nv_rd32(priv, 0x022430)) - 1;
129 mask &= ~nv_rd32(priv, 0x022504);
130 mask &= ~nv_rd32(priv, 0x022584);
131
132 ret = nouveau_perfdom_new(&priv->base, "gpc", mask, 0x180000,
133 0x1000, 0x200, nve0_perfmon_gpc);
134 if (ret)
135 return ret;
136
137 /* PART */
138 mask = (1 << nv_rd32(priv, 0x022438)) - 1;
139 mask &= ~nv_rd32(priv, 0x022548);
140 mask &= ~nv_rd32(priv, 0x0225c8);
141
142 ret = nouveau_perfdom_new(&priv->base, "part", mask, 0x1a0000,
143 0x1000, 0x200, nve0_perfmon_part);
144 if (ret)
145 return ret;
146
147 nv_engine(priv)->cclass = &nouveau_perfmon_cclass;
148 nv_engine(priv)->sclass = nouveau_perfmon_sclass;
149 priv->base.last = 7;
150 return 0;
151}
152
153struct nouveau_oclass
154nve0_perfmon_oclass = {
155 .handle = NV_ENGINE(PERFMON, 0xe0),
156 .ofuncs = &(struct nouveau_ofuncs) {
157 .ctor = nve0_perfmon_ctor,
158 .dtor = _nouveau_perfmon_dtor,
159 .init = _nouveau_perfmon_init,
160 .fini = nvc0_perfmon_fini,
161 },
162};
diff --git a/drivers/gpu/drm/nouveau/core/engine/perfmon/nvf0.c b/drivers/gpu/drm/nouveau/core/engine/perfmon/nvf0.c
new file mode 100644
index 000000000000..47256f78a895
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/perfmon/nvf0.c
@@ -0,0 +1,71 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "nvc0.h"
26
27/*******************************************************************************
28 * Perfmon object classes
29 ******************************************************************************/
30
31/*******************************************************************************
32 * PPM context
33 ******************************************************************************/
34
35/*******************************************************************************
36 * PPM engine/subdev functions
37 ******************************************************************************/
38
39static int
40nvf0_perfmon_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
41 struct nouveau_oclass *oclass, void *data, u32 size,
42 struct nouveau_object **pobject)
43{
44 struct nvc0_perfmon_priv *priv;
45 int ret;
46
47 ret = nouveau_perfmon_create(parent, engine, oclass, &priv);
48 *pobject = nv_object(priv);
49 if (ret)
50 return ret;
51
52 ret = nouveau_perfdom_new(&priv->base, "pwr", 0, 0, 0, 0,
53 nve0_perfmon_pwr);
54 if (ret)
55 return ret;
56
57 nv_engine(priv)->cclass = &nouveau_perfmon_cclass;
58 nv_engine(priv)->sclass = nouveau_perfmon_sclass;
59 return 0;
60}
61
62struct nouveau_oclass
63nvf0_perfmon_oclass = {
64 .handle = NV_ENGINE(PERFMON, 0xf0),
65 .ofuncs = &(struct nouveau_ofuncs) {
66 .ctor = nvf0_perfmon_ctor,
67 .dtor = _nouveau_perfmon_dtor,
68 .init = _nouveau_perfmon_init,
69 .fini = nvc0_perfmon_fini,
70 },
71};
diff --git a/drivers/gpu/drm/nouveau/core/engine/perfmon/priv.h b/drivers/gpu/drm/nouveau/core/engine/perfmon/priv.h
new file mode 100644
index 000000000000..0ac8714fe0ba
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/perfmon/priv.h
@@ -0,0 +1,91 @@
1#ifndef __NVKM_PERFMON_PRIV_H__
2#define __NVKM_PERFMON_PRIV_H__
3
4#include <engine/perfmon.h>
5
6struct nouveau_perfctr {
7 struct nouveau_object base;
8 struct list_head head;
9 struct nouveau_perfsig *signal[4];
10 int slot;
11 u32 logic_op;
12 u32 clk;
13 u32 ctr;
14};
15
16extern struct nouveau_oclass nouveau_perfmon_sclass[];
17
18struct nouveau_perfctx {
19 struct nouveau_engctx base;
20};
21
22extern struct nouveau_oclass nouveau_perfmon_cclass;
23
24struct nouveau_specsig {
25 u8 signal;
26 const char *name;
27};
28
29struct nouveau_perfsig {
30 const char *name;
31};
32
33struct nouveau_perfdom;
34struct nouveau_perfctr *
35nouveau_perfsig_wrap(struct nouveau_perfmon *, const char *,
36 struct nouveau_perfdom **);
37
38struct nouveau_specdom {
39 u16 signal_nr;
40 const struct nouveau_specsig *signal;
41 const struct nouveau_funcdom *func;
42};
43
44extern const struct nouveau_specdom nva3_perfmon_pwr[];
45extern const struct nouveau_specdom nvc0_perfmon_pwr[];
46extern const struct nouveau_specdom nve0_perfmon_pwr[];
47
48struct nouveau_perfdom {
49 struct list_head head;
50 struct list_head list;
51 const struct nouveau_funcdom *func;
52 char name[32];
53 u32 addr;
54 u8 quad;
55 u32 signal_nr;
56 struct nouveau_perfsig signal[];
57};
58
59struct nouveau_funcdom {
60 void (*init)(struct nouveau_perfmon *, struct nouveau_perfdom *,
61 struct nouveau_perfctr *);
62 void (*read)(struct nouveau_perfmon *, struct nouveau_perfdom *,
63 struct nouveau_perfctr *);
64 void (*next)(struct nouveau_perfmon *, struct nouveau_perfdom *);
65};
66
67int nouveau_perfdom_new(struct nouveau_perfmon *, const char *, u32,
68 u32, u32, u32, const struct nouveau_specdom *);
69
70#define nouveau_perfmon_create(p,e,o,d) \
71 nouveau_perfmon_create_((p), (e), (o), sizeof(**d), (void **)d)
72#define nouveau_perfmon_dtor(p) ({ \
73 struct nouveau_perfmon *c = (p); \
74 _nouveau_perfmon_dtor(nv_object(c)); \
75})
76#define nouveau_perfmon_init(p) ({ \
77 struct nouveau_perfmon *c = (p); \
78 _nouveau_perfmon_init(nv_object(c)); \
79})
80#define nouveau_perfmon_fini(p,s) ({ \
81 struct nouveau_perfmon *c = (p); \
82 _nouveau_perfmon_fini(nv_object(c), (s)); \
83})
84
85int nouveau_perfmon_create_(struct nouveau_object *, struct nouveau_object *,
86 struct nouveau_oclass *, int, void **);
87void _nouveau_perfmon_dtor(struct nouveau_object *);
88int _nouveau_perfmon_init(struct nouveau_object *);
89int _nouveau_perfmon_fini(struct nouveau_object *, bool);
90
91#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nv04.c b/drivers/gpu/drm/nouveau/core/engine/software/nv04.c
index 2a859a31c30d..c571758e4a27 100644
--- a/drivers/gpu/drm/nouveau/core/engine/software/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/engine/software/nv04.c
@@ -135,8 +135,8 @@ nv04_software_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
135 return 0; 135 return 0;
136} 136}
137 137
138struct nouveau_oclass 138struct nouveau_oclass *
139nv04_software_oclass = { 139nv04_software_oclass = &(struct nouveau_oclass) {
140 .handle = NV_ENGINE(SW, 0x04), 140 .handle = NV_ENGINE(SW, 0x04),
141 .ofuncs = &(struct nouveau_ofuncs) { 141 .ofuncs = &(struct nouveau_ofuncs) {
142 .ctor = nv04_software_ctor, 142 .ctor = nv04_software_ctor,
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nv10.c b/drivers/gpu/drm/nouveau/core/engine/software/nv10.c
index a019364b1e13..a62f11a78430 100644
--- a/drivers/gpu/drm/nouveau/core/engine/software/nv10.c
+++ b/drivers/gpu/drm/nouveau/core/engine/software/nv10.c
@@ -117,8 +117,8 @@ nv10_software_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
117 return 0; 117 return 0;
118} 118}
119 119
120struct nouveau_oclass 120struct nouveau_oclass *
121nv10_software_oclass = { 121nv10_software_oclass = &(struct nouveau_oclass) {
122 .handle = NV_ENGINE(SW, 0x10), 122 .handle = NV_ENGINE(SW, 0x10),
123 .ofuncs = &(struct nouveau_ofuncs) { 123 .ofuncs = &(struct nouveau_ofuncs) {
124 .ctor = nv10_software_ctor, 124 .ctor = nv10_software_ctor,
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nv50.c b/drivers/gpu/drm/nouveau/core/engine/software/nv50.c
index c48e74953771..b574dd4bb828 100644
--- a/drivers/gpu/drm/nouveau/core/engine/software/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/software/nv50.c
@@ -32,16 +32,9 @@
32 32
33#include <subdev/bar.h> 33#include <subdev/bar.h>
34 34
35#include <engine/software.h>
36#include <engine/disp.h> 35#include <engine/disp.h>
37 36
38struct nv50_software_priv { 37#include "nv50.h"
39 struct nouveau_software base;
40};
41
42struct nv50_software_chan {
43 struct nouveau_software_chan base;
44};
45 38
46/******************************************************************************* 39/*******************************************************************************
47 * software object classes 40 * software object classes
@@ -62,7 +55,7 @@ nv50_software_mthd_dma_vblsem(struct nouveau_object *object, u32 mthd,
62 55
63 if (nv_iclass(handle->object, NV_GPUOBJ_CLASS)) { 56 if (nv_iclass(handle->object, NV_GPUOBJ_CLASS)) {
64 struct nouveau_gpuobj *gpuobj = nv_gpuobj(handle->object); 57 struct nouveau_gpuobj *gpuobj = nv_gpuobj(handle->object);
65 chan->base.vblank.ctxdma = gpuobj->node->offset >> 4; 58 chan->vblank.ctxdma = gpuobj->node->offset >> 4;
66 ret = 0; 59 ret = 0;
67 } 60 }
68 nouveau_namedb_put(handle); 61 nouveau_namedb_put(handle);
@@ -74,34 +67,33 @@ nv50_software_mthd_vblsem_offset(struct nouveau_object *object, u32 mthd,
74 void *args, u32 size) 67 void *args, u32 size)
75{ 68{
76 struct nv50_software_chan *chan = (void *)nv_engctx(object->parent); 69 struct nv50_software_chan *chan = (void *)nv_engctx(object->parent);
77 chan->base.vblank.offset = *(u32 *)args; 70 chan->vblank.offset = *(u32 *)args;
78 return 0; 71 return 0;
79} 72}
80 73
81static int 74int
82nv50_software_mthd_vblsem_value(struct nouveau_object *object, u32 mthd, 75nv50_software_mthd_vblsem_value(struct nouveau_object *object, u32 mthd,
83 void *args, u32 size) 76 void *args, u32 size)
84{ 77{
85 struct nv50_software_chan *chan = (void *)nv_engctx(object->parent); 78 struct nv50_software_chan *chan = (void *)nv_engctx(object->parent);
86 chan->base.vblank.value = *(u32 *)args; 79 chan->vblank.value = *(u32 *)args;
87 return 0; 80 return 0;
88} 81}
89 82
90static int 83int
91nv50_software_mthd_vblsem_release(struct nouveau_object *object, u32 mthd, 84nv50_software_mthd_vblsem_release(struct nouveau_object *object, u32 mthd,
92 void *args, u32 size) 85 void *args, u32 size)
93{ 86{
94 struct nv50_software_chan *chan = (void *)nv_engctx(object->parent); 87 struct nv50_software_chan *chan = (void *)nv_engctx(object->parent);
95 struct nouveau_disp *disp = nouveau_disp(object); 88 u32 head = *(u32 *)args;
96 u32 crtc = *(u32 *)args; 89 if (head >= chan->vblank.nr_event)
97 if (crtc > 1)
98 return -EINVAL; 90 return -EINVAL;
99 91
100 nouveau_event_get(disp->vblank, crtc, &chan->base.vblank.event); 92 nouveau_event_get(chan->vblank.event[head]);
101 return 0; 93 return 0;
102} 94}
103 95
104static int 96int
105nv50_software_mthd_flip(struct nouveau_object *object, u32 mthd, 97nv50_software_mthd_flip(struct nouveau_object *object, u32 mthd,
106 void *args, u32 size) 98 void *args, u32 size)
107{ 99{
@@ -132,10 +124,9 @@ nv50_software_sclass[] = {
132 ******************************************************************************/ 124 ******************************************************************************/
133 125
134static int 126static int
135nv50_software_vblsem_release(struct nouveau_eventh *event, int head) 127nv50_software_vblsem_release(void *data, int head)
136{ 128{
137 struct nouveau_software_chan *chan = 129 struct nv50_software_chan *chan = data;
138 container_of(event, struct nouveau_software_chan, vblank.event);
139 struct nv50_software_priv *priv = (void *)nv_object(chan)->engine; 130 struct nv50_software_priv *priv = (void *)nv_object(chan)->engine;
140 struct nouveau_bar *bar = nouveau_bar(priv); 131 struct nouveau_bar *bar = nouveau_bar(priv);
141 132
@@ -154,45 +145,76 @@ nv50_software_vblsem_release(struct nouveau_eventh *event, int head)
154 return NVKM_EVENT_DROP; 145 return NVKM_EVENT_DROP;
155} 146}
156 147
157static int 148void
149nv50_software_context_dtor(struct nouveau_object *object)
150{
151 struct nv50_software_chan *chan = (void *)object;
152 int i;
153
154 if (chan->vblank.event) {
155 for (i = 0; i < chan->vblank.nr_event; i++)
156 nouveau_event_ref(NULL, &chan->vblank.event[i]);
157 kfree(chan->vblank.event);
158 }
159
160 nouveau_software_context_destroy(&chan->base);
161}
162
163int
158nv50_software_context_ctor(struct nouveau_object *parent, 164nv50_software_context_ctor(struct nouveau_object *parent,
159 struct nouveau_object *engine, 165 struct nouveau_object *engine,
160 struct nouveau_oclass *oclass, void *data, u32 size, 166 struct nouveau_oclass *oclass, void *data, u32 size,
161 struct nouveau_object **pobject) 167 struct nouveau_object **pobject)
162{ 168{
169 struct nouveau_disp *pdisp = nouveau_disp(parent);
170 struct nv50_software_cclass *pclass = (void *)oclass;
163 struct nv50_software_chan *chan; 171 struct nv50_software_chan *chan;
164 int ret; 172 int ret, i;
165 173
166 ret = nouveau_software_context_create(parent, engine, oclass, &chan); 174 ret = nouveau_software_context_create(parent, engine, oclass, &chan);
167 *pobject = nv_object(chan); 175 *pobject = nv_object(chan);
168 if (ret) 176 if (ret)
169 return ret; 177 return ret;
170 178
171 chan->base.vblank.channel = nv_gpuobj(parent->parent)->addr >> 12; 179 chan->vblank.nr_event = pdisp->vblank->index_nr;
172 chan->base.vblank.event.func = nv50_software_vblsem_release; 180 chan->vblank.event = kzalloc(chan->vblank.nr_event *
181 sizeof(*chan->vblank.event), GFP_KERNEL);
182 if (!chan->vblank.event)
183 return -ENOMEM;
184
185 for (i = 0; i < chan->vblank.nr_event; i++) {
186 ret = nouveau_event_new(pdisp->vblank, i, pclass->vblank,
187 chan, &chan->vblank.event[i]);
188 if (ret)
189 return ret;
190 }
191
192 chan->vblank.channel = nv_gpuobj(parent->parent)->addr >> 12;
173 return 0; 193 return 0;
174} 194}
175 195
176static struct nouveau_oclass 196static struct nv50_software_cclass
177nv50_software_cclass = { 197nv50_software_cclass = {
178 .handle = NV_ENGCTX(SW, 0x50), 198 .base.handle = NV_ENGCTX(SW, 0x50),
179 .ofuncs = &(struct nouveau_ofuncs) { 199 .base.ofuncs = &(struct nouveau_ofuncs) {
180 .ctor = nv50_software_context_ctor, 200 .ctor = nv50_software_context_ctor,
181 .dtor = _nouveau_software_context_dtor, 201 .dtor = _nouveau_software_context_dtor,
182 .init = _nouveau_software_context_init, 202 .init = _nouveau_software_context_init,
183 .fini = _nouveau_software_context_fini, 203 .fini = _nouveau_software_context_fini,
184 }, 204 },
205 .vblank = nv50_software_vblsem_release,
185}; 206};
186 207
187/******************************************************************************* 208/*******************************************************************************
188 * software engine/subdev functions 209 * software engine/subdev functions
189 ******************************************************************************/ 210 ******************************************************************************/
190 211
191static int 212int
192nv50_software_ctor(struct nouveau_object *parent, struct nouveau_object *engine, 213nv50_software_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
193 struct nouveau_oclass *oclass, void *data, u32 size, 214 struct nouveau_oclass *oclass, void *data, u32 size,
194 struct nouveau_object **pobject) 215 struct nouveau_object **pobject)
195{ 216{
217 struct nv50_software_oclass *pclass = (void *)oclass;
196 struct nv50_software_priv *priv; 218 struct nv50_software_priv *priv;
197 int ret; 219 int ret;
198 220
@@ -201,19 +223,21 @@ nv50_software_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
201 if (ret) 223 if (ret)
202 return ret; 224 return ret;
203 225
204 nv_engine(priv)->cclass = &nv50_software_cclass; 226 nv_engine(priv)->cclass = pclass->cclass;
205 nv_engine(priv)->sclass = nv50_software_sclass; 227 nv_engine(priv)->sclass = pclass->sclass;
206 nv_subdev(priv)->intr = nv04_software_intr; 228 nv_subdev(priv)->intr = nv04_software_intr;
207 return 0; 229 return 0;
208} 230}
209 231
210struct nouveau_oclass 232struct nouveau_oclass *
211nv50_software_oclass = { 233nv50_software_oclass = &(struct nv50_software_oclass) {
212 .handle = NV_ENGINE(SW, 0x50), 234 .base.handle = NV_ENGINE(SW, 0x50),
213 .ofuncs = &(struct nouveau_ofuncs) { 235 .base.ofuncs = &(struct nouveau_ofuncs) {
214 .ctor = nv50_software_ctor, 236 .ctor = nv50_software_ctor,
215 .dtor = _nouveau_software_dtor, 237 .dtor = _nouveau_software_dtor,
216 .init = _nouveau_software_init, 238 .init = _nouveau_software_init,
217 .fini = _nouveau_software_fini, 239 .fini = _nouveau_software_fini,
218 }, 240 },
219}; 241 .cclass = &nv50_software_cclass.base,
242 .sclass = nv50_software_sclass,
243}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nv50.h b/drivers/gpu/drm/nouveau/core/engine/software/nv50.h
new file mode 100644
index 000000000000..2de370c21279
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/software/nv50.h
@@ -0,0 +1,47 @@
1#ifndef __NVKM_SW_NV50_H__
2#define __NVKM_SW_NV50_H__
3
4#include <engine/software.h>
5
6struct nv50_software_oclass {
7 struct nouveau_oclass base;
8 struct nouveau_oclass *cclass;
9 struct nouveau_oclass *sclass;
10};
11
12struct nv50_software_priv {
13 struct nouveau_software base;
14};
15
16int nv50_software_ctor(struct nouveau_object *, struct nouveau_object *,
17 struct nouveau_oclass *, void *, u32,
18 struct nouveau_object **);
19
20struct nv50_software_cclass {
21 struct nouveau_oclass base;
22 int (*vblank)(void *, int);
23};
24
25struct nv50_software_chan {
26 struct nouveau_software_chan base;
27 struct {
28 struct nouveau_eventh **event;
29 int nr_event;
30 u32 channel;
31 u32 ctxdma;
32 u64 offset;
33 u32 value;
34 } vblank;
35};
36
37int nv50_software_context_ctor(struct nouveau_object *,
38 struct nouveau_object *,
39 struct nouveau_oclass *, void *, u32,
40 struct nouveau_object **);
41void nv50_software_context_dtor(struct nouveau_object *);
42
43int nv50_software_mthd_vblsem_value(struct nouveau_object *, u32, void *, u32);
44int nv50_software_mthd_vblsem_release(struct nouveau_object *, u32, void *, u32);
45int nv50_software_mthd_flip(struct nouveau_object *, u32, void *, u32);
46
47#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c
index d698e710ddd4..f9430c1bf3e5 100644
--- a/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c
@@ -32,13 +32,7 @@
32#include <engine/software.h> 32#include <engine/software.h>
33#include <engine/disp.h> 33#include <engine/disp.h>
34 34
35struct nvc0_software_priv { 35#include "nv50.h"
36 struct nouveau_software base;
37};
38
39struct nvc0_software_chan {
40 struct nouveau_software_chan base;
41};
42 36
43/******************************************************************************* 37/*******************************************************************************
44 * software object classes 38 * software object classes
@@ -48,58 +42,24 @@ static int
48nvc0_software_mthd_vblsem_offset(struct nouveau_object *object, u32 mthd, 42nvc0_software_mthd_vblsem_offset(struct nouveau_object *object, u32 mthd,
49 void *args, u32 size) 43 void *args, u32 size)
50{ 44{
51 struct nvc0_software_chan *chan = (void *)nv_engctx(object->parent); 45 struct nv50_software_chan *chan = (void *)nv_engctx(object->parent);
52 u64 data = *(u32 *)args; 46 u64 data = *(u32 *)args;
53 if (mthd == 0x0400) { 47 if (mthd == 0x0400) {
54 chan->base.vblank.offset &= 0x00ffffffffULL; 48 chan->vblank.offset &= 0x00ffffffffULL;
55 chan->base.vblank.offset |= data << 32; 49 chan->vblank.offset |= data << 32;
56 } else { 50 } else {
57 chan->base.vblank.offset &= 0xff00000000ULL; 51 chan->vblank.offset &= 0xff00000000ULL;
58 chan->base.vblank.offset |= data; 52 chan->vblank.offset |= data;
59 } 53 }
60 return 0; 54 return 0;
61} 55}
62 56
63static int 57static int
64nvc0_software_mthd_vblsem_value(struct nouveau_object *object, u32 mthd,
65 void *args, u32 size)
66{
67 struct nvc0_software_chan *chan = (void *)nv_engctx(object->parent);
68 chan->base.vblank.value = *(u32 *)args;
69 return 0;
70}
71
72static int
73nvc0_software_mthd_vblsem_release(struct nouveau_object *object, u32 mthd,
74 void *args, u32 size)
75{
76 struct nvc0_software_chan *chan = (void *)nv_engctx(object->parent);
77 struct nouveau_disp *disp = nouveau_disp(object);
78 u32 crtc = *(u32 *)args;
79
80 if ((nv_device(object)->card_type < NV_E0 && crtc > 1) || crtc > 3)
81 return -EINVAL;
82
83 nouveau_event_get(disp->vblank, crtc, &chan->base.vblank.event);
84 return 0;
85}
86
87static int
88nvc0_software_mthd_flip(struct nouveau_object *object, u32 mthd,
89 void *args, u32 size)
90{
91 struct nvc0_software_chan *chan = (void *)nv_engctx(object->parent);
92 if (chan->base.flip)
93 return chan->base.flip(chan->base.flip_data);
94 return -EINVAL;
95}
96
97static int
98nvc0_software_mthd_mp_control(struct nouveau_object *object, u32 mthd, 58nvc0_software_mthd_mp_control(struct nouveau_object *object, u32 mthd,
99 void *args, u32 size) 59 void *args, u32 size)
100{ 60{
101 struct nvc0_software_chan *chan = (void *)nv_engctx(object->parent); 61 struct nv50_software_chan *chan = (void *)nv_engctx(object->parent);
102 struct nvc0_software_priv *priv = (void *)nv_object(chan)->engine; 62 struct nv50_software_priv *priv = (void *)nv_object(chan)->engine;
103 u32 data = *(u32 *)args; 63 u32 data = *(u32 *)args;
104 64
105 switch (mthd) { 65 switch (mthd) {
@@ -124,9 +84,9 @@ static struct nouveau_omthds
124nvc0_software_omthds[] = { 84nvc0_software_omthds[] = {
125 { 0x0400, 0x0400, nvc0_software_mthd_vblsem_offset }, 85 { 0x0400, 0x0400, nvc0_software_mthd_vblsem_offset },
126 { 0x0404, 0x0404, nvc0_software_mthd_vblsem_offset }, 86 { 0x0404, 0x0404, nvc0_software_mthd_vblsem_offset },
127 { 0x0408, 0x0408, nvc0_software_mthd_vblsem_value }, 87 { 0x0408, 0x0408, nv50_software_mthd_vblsem_value },
128 { 0x040c, 0x040c, nvc0_software_mthd_vblsem_release }, 88 { 0x040c, 0x040c, nv50_software_mthd_vblsem_release },
129 { 0x0500, 0x0500, nvc0_software_mthd_flip }, 89 { 0x0500, 0x0500, nv50_software_mthd_flip },
130 { 0x0600, 0x0600, nvc0_software_mthd_mp_control }, 90 { 0x0600, 0x0600, nvc0_software_mthd_mp_control },
131 { 0x0644, 0x0644, nvc0_software_mthd_mp_control }, 91 { 0x0644, 0x0644, nvc0_software_mthd_mp_control },
132 { 0x06ac, 0x06ac, nvc0_software_mthd_mp_control }, 92 { 0x06ac, 0x06ac, nvc0_software_mthd_mp_control },
@@ -144,11 +104,10 @@ nvc0_software_sclass[] = {
144 ******************************************************************************/ 104 ******************************************************************************/
145 105
146static int 106static int
147nvc0_software_vblsem_release(struct nouveau_eventh *event, int head) 107nvc0_software_vblsem_release(void *data, int head)
148{ 108{
149 struct nouveau_software_chan *chan = 109 struct nv50_software_chan *chan = data;
150 container_of(event, struct nouveau_software_chan, vblank.event); 110 struct nv50_software_priv *priv = (void *)nv_object(chan)->engine;
151 struct nvc0_software_priv *priv = (void *)nv_object(chan)->engine;
152 struct nouveau_bar *bar = nouveau_bar(priv); 111 struct nouveau_bar *bar = nouveau_bar(priv);
153 112
154 nv_wr32(priv, 0x001718, 0x80000000 | chan->vblank.channel); 113 nv_wr32(priv, 0x001718, 0x80000000 | chan->vblank.channel);
@@ -160,66 +119,31 @@ nvc0_software_vblsem_release(struct nouveau_eventh *event, int head)
160 return NVKM_EVENT_DROP; 119 return NVKM_EVENT_DROP;
161} 120}
162 121
163static int 122static struct nv50_software_cclass
164nvc0_software_context_ctor(struct nouveau_object *parent,
165 struct nouveau_object *engine,
166 struct nouveau_oclass *oclass, void *data, u32 size,
167 struct nouveau_object **pobject)
168{
169 struct nvc0_software_chan *chan;
170 int ret;
171
172 ret = nouveau_software_context_create(parent, engine, oclass, &chan);
173 *pobject = nv_object(chan);
174 if (ret)
175 return ret;
176
177 chan->base.vblank.channel = nv_gpuobj(parent->parent)->addr >> 12;
178 chan->base.vblank.event.func = nvc0_software_vblsem_release;
179 return 0;
180}
181
182static struct nouveau_oclass
183nvc0_software_cclass = { 123nvc0_software_cclass = {
184 .handle = NV_ENGCTX(SW, 0xc0), 124 .base.handle = NV_ENGCTX(SW, 0xc0),
185 .ofuncs = &(struct nouveau_ofuncs) { 125 .base.ofuncs = &(struct nouveau_ofuncs) {
186 .ctor = nvc0_software_context_ctor, 126 .ctor = nv50_software_context_ctor,
187 .dtor = _nouveau_software_context_dtor, 127 .dtor = _nouveau_software_context_dtor,
188 .init = _nouveau_software_context_init, 128 .init = _nouveau_software_context_init,
189 .fini = _nouveau_software_context_fini, 129 .fini = _nouveau_software_context_fini,
190 }, 130 },
131 .vblank = nvc0_software_vblsem_release,
191}; 132};
192 133
193/******************************************************************************* 134/*******************************************************************************
194 * software engine/subdev functions 135 * software engine/subdev functions
195 ******************************************************************************/ 136 ******************************************************************************/
196 137
197static int 138struct nouveau_oclass *
198nvc0_software_ctor(struct nouveau_object *parent, struct nouveau_object *engine, 139nvc0_software_oclass = &(struct nv50_software_oclass) {
199 struct nouveau_oclass *oclass, void *data, u32 size, 140 .base.handle = NV_ENGINE(SW, 0xc0),
200 struct nouveau_object **pobject) 141 .base.ofuncs = &(struct nouveau_ofuncs) {
201{ 142 .ctor = nv50_software_ctor,
202 struct nvc0_software_priv *priv;
203 int ret;
204
205 ret = nouveau_software_create(parent, engine, oclass, &priv);
206 *pobject = nv_object(priv);
207 if (ret)
208 return ret;
209
210 nv_engine(priv)->cclass = &nvc0_software_cclass;
211 nv_engine(priv)->sclass = nvc0_software_sclass;
212 nv_subdev(priv)->intr = nv04_software_intr;
213 return 0;
214}
215
216struct nouveau_oclass
217nvc0_software_oclass = {
218 .handle = NV_ENGINE(SW, 0xc0),
219 .ofuncs = &(struct nouveau_ofuncs) {
220 .ctor = nvc0_software_ctor,
221 .dtor = _nouveau_software_dtor, 143 .dtor = _nouveau_software_dtor,
222 .init = _nouveau_software_init, 144 .init = _nouveau_software_init,
223 .fini = _nouveau_software_fini, 145 .fini = _nouveau_software_fini,
224 }, 146 },
225}; 147 .cclass = &nvc0_software_cclass.base,
148 .sclass = nvc0_software_sclass,
149}.base;
diff --git a/drivers/gpu/drm/nouveau/core/include/core/class.h b/drivers/gpu/drm/nouveau/core/include/core/class.h
index 5a5961b6a6a3..560c3593dae7 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/class.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/class.h
@@ -22,7 +22,7 @@
22#define NV_DEVICE_DISABLE_PPP 0x0000004000000000ULL 22#define NV_DEVICE_DISABLE_PPP 0x0000004000000000ULL
23#define NV_DEVICE_DISABLE_COPY0 0x0000008000000000ULL 23#define NV_DEVICE_DISABLE_COPY0 0x0000008000000000ULL
24#define NV_DEVICE_DISABLE_COPY1 0x0000010000000000ULL 24#define NV_DEVICE_DISABLE_COPY1 0x0000010000000000ULL
25#define NV_DEVICE_DISABLE_UNK1C1 0x0000020000000000ULL 25#define NV_DEVICE_DISABLE_VIC 0x0000020000000000ULL
26#define NV_DEVICE_DISABLE_VENC 0x0000040000000000ULL 26#define NV_DEVICE_DISABLE_VENC 0x0000040000000000ULL
27 27
28struct nv_device_class { 28struct nv_device_class {
@@ -98,6 +98,77 @@ struct nv_dma_class {
98 u32 conf0; 98 u32 conf0;
99}; 99};
100 100
101/* Perfmon counter class
102 *
103 * XXXX: NV_PERFCTR
104 */
105#define NV_PERFCTR_CLASS 0x0000ffff
106#define NV_PERFCTR_QUERY 0x00000000
107#define NV_PERFCTR_SAMPLE 0x00000001
108#define NV_PERFCTR_READ 0x00000002
109
110struct nv_perfctr_class {
111 u16 logic_op;
112 struct {
113 char __user *name; /*XXX: use cfu when exposed to userspace */
114 u32 size;
115 } signal[4];
116};
117
118struct nv_perfctr_query {
119 u32 iter;
120 u32 size;
121 char __user *name; /*XXX: use ctu when exposed to userspace */
122};
123
124struct nv_perfctr_sample {
125};
126
127struct nv_perfctr_read {
128 u32 ctr;
129 u32 clk;
130};
131
132/* Device control class
133 *
134 * XXXX: NV_CONTROL
135 */
136#define NV_CONTROL_CLASS 0x0000fffe
137
138#define NV_CONTROL_PSTATE_INFO 0x00000000
139#define NV_CONTROL_PSTATE_INFO_USTATE_DISABLE (-1)
140#define NV_CONTROL_PSTATE_INFO_USTATE_PERFMON (-2)
141#define NV_CONTROL_PSTATE_INFO_PSTATE_UNKNOWN (-1)
142#define NV_CONTROL_PSTATE_INFO_PSTATE_PERFMON (-2)
143#define NV_CONTROL_PSTATE_ATTR 0x00000001
144#define NV_CONTROL_PSTATE_ATTR_STATE_CURRENT (-1)
145#define NV_CONTROL_PSTATE_USER 0x00000002
146#define NV_CONTROL_PSTATE_USER_STATE_UNKNOWN (-1)
147#define NV_CONTROL_PSTATE_USER_STATE_PERFMON (-2)
148
149struct nv_control_pstate_info {
150 u32 count; /* out: number of power states */
151 s32 ustate; /* out: current target pstate index */
152 u32 pstate; /* out: current pstate index */
153};
154
155struct nv_control_pstate_attr {
156 s32 state; /* in: index of pstate to query
157 * out: pstate identifier
158 */
159 u32 index; /* in: index of attribute to query
160 * out: index of next attribute, or 0 if no more
161 */
162 char name[32];
163 char unit[16];
164 u32 min;
165 u32 max;
166};
167
168struct nv_control_pstate_user {
169 s32 state; /* in: pstate identifier */
170};
171
101/* DMA FIFO channel classes 172/* DMA FIFO channel classes
102 * 173 *
103 * 006b: NV03_CHANNEL_DMA 174 * 006b: NV03_CHANNEL_DMA
diff --git a/drivers/gpu/drm/nouveau/core/include/core/debug.h b/drivers/gpu/drm/nouveau/core/include/core/debug.h
index 9ea18dfcb4d0..8092e2e90323 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/debug.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/debug.h
@@ -1,13 +1,20 @@
1#ifndef __NOUVEAU_DEBUG_H__ 1#ifndef __NOUVEAU_DEBUG_H__
2#define __NOUVEAU_DEBUG_H__ 2#define __NOUVEAU_DEBUG_H__
3 3
4extern int nv_info_debug_level;
5
4#define NV_DBG_FATAL 0 6#define NV_DBG_FATAL 0
5#define NV_DBG_ERROR 1 7#define NV_DBG_ERROR 1
6#define NV_DBG_WARN 2 8#define NV_DBG_WARN 2
7#define NV_DBG_INFO 3 9#define NV_DBG_INFO nv_info_debug_level
8#define NV_DBG_DEBUG 4 10#define NV_DBG_DEBUG 4
9#define NV_DBG_TRACE 5 11#define NV_DBG_TRACE 5
10#define NV_DBG_PARANOIA 6 12#define NV_DBG_PARANOIA 6
11#define NV_DBG_SPAM 7 13#define NV_DBG_SPAM 7
12 14
15#define NV_DBG_INFO_NORMAL 3
16#define NV_DBG_INFO_SILENT NV_DBG_DEBUG
17
18#define nv_debug_level(a) nv_info_debug_level = NV_DBG_INFO_##a
19
13#endif 20#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/device.h b/drivers/gpu/drm/nouveau/core/include/core/device.h
index 99b6600fe80a..ac2881d1776a 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/device.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/device.h
@@ -33,9 +33,10 @@ enum nv_subdev_type {
33 NVDEV_SUBDEV_INSTMEM, 33 NVDEV_SUBDEV_INSTMEM,
34 NVDEV_SUBDEV_VM, 34 NVDEV_SUBDEV_VM,
35 NVDEV_SUBDEV_BAR, 35 NVDEV_SUBDEV_BAR,
36 NVDEV_SUBDEV_PWR,
36 NVDEV_SUBDEV_VOLT, 37 NVDEV_SUBDEV_VOLT,
37 NVDEV_SUBDEV_CLOCK,
38 NVDEV_SUBDEV_THERM, 38 NVDEV_SUBDEV_THERM,
39 NVDEV_SUBDEV_CLOCK,
39 40
40 NVDEV_ENGINE_DMAOBJ, 41 NVDEV_ENGINE_DMAOBJ,
41 NVDEV_ENGINE_FIFO, 42 NVDEV_ENGINE_FIFO,
@@ -50,9 +51,10 @@ enum nv_subdev_type {
50 NVDEV_ENGINE_COPY0, 51 NVDEV_ENGINE_COPY0,
51 NVDEV_ENGINE_COPY1, 52 NVDEV_ENGINE_COPY1,
52 NVDEV_ENGINE_COPY2, 53 NVDEV_ENGINE_COPY2,
53 NVDEV_ENGINE_UNK1C1, 54 NVDEV_ENGINE_VIC,
54 NVDEV_ENGINE_VENC, 55 NVDEV_ENGINE_VENC,
55 NVDEV_ENGINE_DISP, 56 NVDEV_ENGINE_DISP,
57 NVDEV_ENGINE_PERFMON,
56 58
57 NVDEV_SUBDEV_NR, 59 NVDEV_SUBDEV_NR,
58}; 60};
@@ -72,6 +74,7 @@ struct nouveau_device {
72 enum { 74 enum {
73 NV_04 = 0x04, 75 NV_04 = 0x04,
74 NV_10 = 0x10, 76 NV_10 = 0x10,
77 NV_11 = 0x11,
75 NV_20 = 0x20, 78 NV_20 = 0x20,
76 NV_30 = 0x30, 79 NV_30 = 0x30,
77 NV_40 = 0x40, 80 NV_40 = 0x40,
diff --git a/drivers/gpu/drm/nouveau/core/include/core/event.h b/drivers/gpu/drm/nouveau/core/include/core/event.h
index 9e094408f14e..5d539ebff3ed 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/event.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/event.h
@@ -5,13 +5,21 @@
5#define NVKM_EVENT_DROP 0 5#define NVKM_EVENT_DROP 0
6#define NVKM_EVENT_KEEP 1 6#define NVKM_EVENT_KEEP 1
7 7
8/* nouveau_eventh.flags bit #s */
9#define NVKM_EVENT_ENABLE 0
10
8struct nouveau_eventh { 11struct nouveau_eventh {
12 struct nouveau_event *event;
9 struct list_head head; 13 struct list_head head;
10 int (*func)(struct nouveau_eventh *, int index); 14 unsigned long flags;
15 int index;
16 int (*func)(void *, int);
17 void *priv;
11}; 18};
12 19
13struct nouveau_event { 20struct nouveau_event {
14 spinlock_t lock; 21 spinlock_t list_lock;
22 spinlock_t refs_lock;
15 23
16 void *priv; 24 void *priv;
17 void (*enable)(struct nouveau_event *, int index); 25 void (*enable)(struct nouveau_event *, int index);
@@ -28,9 +36,11 @@ int nouveau_event_create(int index_nr, struct nouveau_event **);
28void nouveau_event_destroy(struct nouveau_event **); 36void nouveau_event_destroy(struct nouveau_event **);
29void nouveau_event_trigger(struct nouveau_event *, int index); 37void nouveau_event_trigger(struct nouveau_event *, int index);
30 38
31void nouveau_event_get(struct nouveau_event *, int index, 39int nouveau_event_new(struct nouveau_event *, int index,
32 struct nouveau_eventh *); 40 int (*func)(void *, int), void *,
33void nouveau_event_put(struct nouveau_event *, int index, 41 struct nouveau_eventh **);
34 struct nouveau_eventh *); 42void nouveau_event_ref(struct nouveau_eventh *, struct nouveau_eventh **);
43void nouveau_event_get(struct nouveau_eventh *);
44void nouveau_event_put(struct nouveau_eventh *);
35 45
36#endif 46#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/option.h b/drivers/gpu/drm/nouveau/core/include/core/option.h
index 27074957fd21..ed055847887e 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/option.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/option.h
@@ -8,4 +8,13 @@ bool nouveau_boolopt(const char *optstr, const char *opt, bool value);
8 8
9int nouveau_dbgopt(const char *optstr, const char *sub); 9int nouveau_dbgopt(const char *optstr, const char *sub);
10 10
11/* compares unterminated string 'str' with zero-terminated string 'cmp' */
12static inline int
13strncasecmpz(const char *str, const char *cmp, size_t len)
14{
15 if (strlen(cmp) != len)
16 return len;
17 return strncasecmp(str, cmp, len);
18}
19
11#endif 20#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/printk.h b/drivers/gpu/drm/nouveau/core/include/core/printk.h
index d87836e3a704..0f9a37bd32b0 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/printk.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/printk.h
@@ -6,27 +6,12 @@
6 6
7struct nouveau_object; 7struct nouveau_object;
8 8
9#define NV_PRINTK_FATAL KERN_CRIT 9void __printf(3, 4)
10#define NV_PRINTK_ERROR KERN_ERR 10nv_printk_(struct nouveau_object *, int, const char *, ...);
11#define NV_PRINTK_WARN KERN_WARNING
12#define NV_PRINTK_INFO KERN_INFO
13#define NV_PRINTK_DEBUG KERN_DEBUG
14#define NV_PRINTK_PARANOIA KERN_DEBUG
15#define NV_PRINTK_TRACE KERN_DEBUG
16#define NV_PRINTK_SPAM KERN_DEBUG
17
18extern int nv_printk_suspend_level;
19
20#define NV_DBG_SUSPEND (nv_printk_suspend_level)
21#define NV_PRINTK_SUSPEND (nv_printk_level_to_pfx(nv_printk_suspend_level))
22
23const char *nv_printk_level_to_pfx(int level);
24void __printf(4, 5)
25nv_printk_(struct nouveau_object *, const char *, int, const char *, ...);
26 11
27#define nv_printk(o,l,f,a...) do { \ 12#define nv_printk(o,l,f,a...) do { \
28 if (NV_DBG_##l <= CONFIG_NOUVEAU_DEBUG) \ 13 if (NV_DBG_##l <= CONFIG_NOUVEAU_DEBUG) \
29 nv_printk_(nv_object(o), NV_PRINTK_##l, NV_DBG_##l, f, ##a); \ 14 nv_printk_(nv_object(o), NV_DBG_##l, f, ##a); \
30} while(0) 15} while(0)
31 16
32#define nv_fatal(o,f,a...) nv_printk((o), FATAL, f, ##a) 17#define nv_fatal(o,f,a...) nv_printk((o), FATAL, f, ##a)
@@ -37,16 +22,9 @@ nv_printk_(struct nouveau_object *, const char *, int, const char *, ...);
37#define nv_trace(o,f,a...) nv_printk((o), TRACE, f, ##a) 22#define nv_trace(o,f,a...) nv_printk((o), TRACE, f, ##a)
38#define nv_spam(o,f,a...) nv_printk((o), SPAM, f, ##a) 23#define nv_spam(o,f,a...) nv_printk((o), SPAM, f, ##a)
39 24
40#define nv_suspend(o,f,a...) nv_printk((o), SUSPEND, f, ##a)
41
42static inline void nv_suspend_set_printk_level(int level)
43{
44 nv_printk_suspend_level = level;
45}
46
47#define nv_assert(f,a...) do { \ 25#define nv_assert(f,a...) do { \
48 if (NV_DBG_FATAL <= CONFIG_NOUVEAU_DEBUG) \ 26 if (NV_DBG_FATAL <= CONFIG_NOUVEAU_DEBUG) \
49 nv_printk_(NULL, NV_PRINTK_FATAL, NV_DBG_FATAL, f "\n", ##a); \ 27 nv_printk_(NULL, NV_DBG_FATAL, f "\n", ##a); \
50 BUG_ON(1); \ 28 BUG_ON(1); \
51} while(0) 29} while(0)
52 30
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/fifo.h b/drivers/gpu/drm/nouveau/core/include/engine/fifo.h
index 633c2f806482..8c32cf4d83c7 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/fifo.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/fifo.h
@@ -101,14 +101,14 @@ nouveau_client_name_for_fifo_chid(struct nouveau_fifo *fifo, u32 chid);
101#define _nouveau_fifo_init _nouveau_engine_init 101#define _nouveau_fifo_init _nouveau_engine_init
102#define _nouveau_fifo_fini _nouveau_engine_fini 102#define _nouveau_fifo_fini _nouveau_engine_fini
103 103
104extern struct nouveau_oclass nv04_fifo_oclass; 104extern struct nouveau_oclass *nv04_fifo_oclass;
105extern struct nouveau_oclass nv10_fifo_oclass; 105extern struct nouveau_oclass *nv10_fifo_oclass;
106extern struct nouveau_oclass nv17_fifo_oclass; 106extern struct nouveau_oclass *nv17_fifo_oclass;
107extern struct nouveau_oclass nv40_fifo_oclass; 107extern struct nouveau_oclass *nv40_fifo_oclass;
108extern struct nouveau_oclass nv50_fifo_oclass; 108extern struct nouveau_oclass *nv50_fifo_oclass;
109extern struct nouveau_oclass nv84_fifo_oclass; 109extern struct nouveau_oclass *nv84_fifo_oclass;
110extern struct nouveau_oclass nvc0_fifo_oclass; 110extern struct nouveau_oclass *nvc0_fifo_oclass;
111extern struct nouveau_oclass nve0_fifo_oclass; 111extern struct nouveau_oclass *nve0_fifo_oclass;
112 112
113void nv04_fifo_intr(struct nouveau_subdev *); 113void nv04_fifo_intr(struct nouveau_subdev *);
114int nv04_fifo_context_attach(struct nouveau_object *, struct nouveau_object *); 114int nv04_fifo_context_attach(struct nouveau_object *, struct nouveau_object *);
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/mpeg.h b/drivers/gpu/drm/nouveau/core/include/engine/mpeg.h
index 1d1a89a06ee4..9b0d938199f6 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/mpeg.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/mpeg.h
@@ -42,10 +42,13 @@ struct nouveau_mpeg {
42 42
43extern struct nouveau_oclass nv31_mpeg_oclass; 43extern struct nouveau_oclass nv31_mpeg_oclass;
44extern struct nouveau_oclass nv40_mpeg_oclass; 44extern struct nouveau_oclass nv40_mpeg_oclass;
45extern struct nouveau_oclass nv44_mpeg_oclass;
45extern struct nouveau_oclass nv50_mpeg_oclass; 46extern struct nouveau_oclass nv50_mpeg_oclass;
46extern struct nouveau_oclass nv84_mpeg_oclass; 47extern struct nouveau_oclass nv84_mpeg_oclass;
47 48extern struct nouveau_ofuncs nv31_mpeg_ofuncs;
49extern struct nouveau_oclass nv31_mpeg_cclass;
48extern struct nouveau_oclass nv31_mpeg_sclass[]; 50extern struct nouveau_oclass nv31_mpeg_sclass[];
51extern struct nouveau_oclass nv40_mpeg_sclass[];
49void nv31_mpeg_intr(struct nouveau_subdev *); 52void nv31_mpeg_intr(struct nouveau_subdev *);
50void nv31_mpeg_tile_prog(struct nouveau_engine *, int); 53void nv31_mpeg_tile_prog(struct nouveau_engine *, int);
51int nv31_mpeg_init(struct nouveau_object *); 54int nv31_mpeg_init(struct nouveau_object *);
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/perfmon.h b/drivers/gpu/drm/nouveau/core/include/engine/perfmon.h
new file mode 100644
index 000000000000..49b0024910fe
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/engine/perfmon.h
@@ -0,0 +1,39 @@
1#ifndef __NVKM_PERFMON_H__
2#define __NVKM_PERFMON_H__
3
4#include <core/device.h>
5#include <core/engine.h>
6#include <core/engctx.h>
7#include <core/class.h>
8
9struct nouveau_perfdom;
10struct nouveau_perfctr;
11struct nouveau_perfmon {
12 struct nouveau_engine base;
13
14 struct nouveau_perfctx *context;
15 void *profile_data;
16
17 struct list_head domains;
18 u32 sequence;
19
20 /*XXX: temp for daemon backend */
21 u32 pwr[8];
22 u32 last;
23};
24
25static inline struct nouveau_perfmon *
26nouveau_perfmon(void *obj)
27{
28 return (void *)nv_device(obj)->subdev[NVDEV_ENGINE_PERFMON];
29}
30
31extern struct nouveau_oclass *nv40_perfmon_oclass;
32extern struct nouveau_oclass *nv50_perfmon_oclass;
33extern struct nouveau_oclass *nv84_perfmon_oclass;
34extern struct nouveau_oclass *nva3_perfmon_oclass;
35extern struct nouveau_oclass nvc0_perfmon_oclass;
36extern struct nouveau_oclass nve0_perfmon_oclass;
37extern struct nouveau_oclass nvf0_perfmon_oclass;
38
39#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/software.h b/drivers/gpu/drm/nouveau/core/include/engine/software.h
index 45799487e573..23a462b50d03 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/software.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/software.h
@@ -3,19 +3,10 @@
3 3
4#include <core/engine.h> 4#include <core/engine.h>
5#include <core/engctx.h> 5#include <core/engctx.h>
6#include <core/event.h>
7 6
8struct nouveau_software_chan { 7struct nouveau_software_chan {
9 struct nouveau_engctx base; 8 struct nouveau_engctx base;
10 9
11 struct {
12 struct nouveau_eventh event;
13 u32 channel;
14 u32 ctxdma;
15 u64 offset;
16 u32 value;
17 } vblank;
18
19 int (*flip)(void *); 10 int (*flip)(void *);
20 void *flip_data; 11 void *flip_data;
21}; 12};
@@ -50,10 +41,10 @@ struct nouveau_software {
50#define _nouveau_software_init _nouveau_engine_init 41#define _nouveau_software_init _nouveau_engine_init
51#define _nouveau_software_fini _nouveau_engine_fini 42#define _nouveau_software_fini _nouveau_engine_fini
52 43
53extern struct nouveau_oclass nv04_software_oclass; 44extern struct nouveau_oclass *nv04_software_oclass;
54extern struct nouveau_oclass nv10_software_oclass; 45extern struct nouveau_oclass *nv10_software_oclass;
55extern struct nouveau_oclass nv50_software_oclass; 46extern struct nouveau_oclass *nv50_software_oclass;
56extern struct nouveau_oclass nvc0_software_oclass; 47extern struct nouveau_oclass *nvc0_software_oclass;
57 48
58void nv04_software_intr(struct nouveau_subdev *); 49void nv04_software_intr(struct nouveau_subdev *);
59 50
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/boost.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/boost.h
new file mode 100644
index 000000000000..662b20726851
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/boost.h
@@ -0,0 +1,29 @@
1#ifndef __NVBIOS_BOOST_H__
2#define __NVBIOS_BOOST_H__
3
4u16 nvbios_boostTe(struct nouveau_bios *, u8 *, u8 *, u8 *, u8 *, u8 *, u8 *);
5
6struct nvbios_boostE {
7 u8 pstate;
8 u32 min;
9 u32 max;
10};
11
12u16 nvbios_boostEe(struct nouveau_bios *, int idx, u8 *, u8 *, u8 *, u8 *);
13u16 nvbios_boostEp(struct nouveau_bios *, int idx, u8 *, u8 *, u8 *, u8 *,
14 struct nvbios_boostE *);
15u16 nvbios_boostEm(struct nouveau_bios *, u8, u8 *, u8 *, u8 *, u8 *,
16 struct nvbios_boostE *);
17
18struct nvbios_boostS {
19 u8 domain;
20 u8 percent;
21 u32 min;
22 u32 max;
23};
24
25u16 nvbios_boostSe(struct nouveau_bios *, int, u16, u8 *, u8 *, u8, u8);
26u16 nvbios_boostSp(struct nouveau_bios *, int, u16, u8 *, u8 *, u8, u8,
27 struct nvbios_boostS *);
28
29#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/cstep.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/cstep.h
new file mode 100644
index 000000000000..a80a43809883
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/cstep.h
@@ -0,0 +1,28 @@
1#ifndef __NVBIOS_CSTEP_H__
2#define __NVBIOS_CSTEP_H__
3
4u16 nvbios_cstepTe(struct nouveau_bios *,
5 u8 *ver, u8 *hdr, u8 *cnt, u8 *len, u8 *xnr, u8 *xsz);
6
7struct nvbios_cstepE {
8 u8 pstate;
9 u8 index;
10};
11
12u16 nvbios_cstepEe(struct nouveau_bios *, int idx, u8 *ver, u8 *hdr);
13u16 nvbios_cstepEp(struct nouveau_bios *, int idx, u8 *ver, u8 *hdr,
14 struct nvbios_cstepE *);
15u16 nvbios_cstepEm(struct nouveau_bios *, u8 pstate, u8 *ver, u8 *hdr,
16 struct nvbios_cstepE *);
17
18struct nvbios_cstepX {
19 u32 freq;
20 u8 unkn[2];
21 u8 voltage;
22};
23
24u16 nvbios_cstepXe(struct nouveau_bios *, int idx, u8 *ver, u8 *hdr);
25u16 nvbios_cstepXp(struct nouveau_bios *, int idx, u8 *ver, u8 *hdr,
26 struct nvbios_cstepX *);
27
28#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/gpio.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/gpio.h
index 96d3364f6db3..c7b2e586be0b 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bios/gpio.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/gpio.h
@@ -7,7 +7,15 @@ enum dcb_gpio_func_name {
7 DCB_GPIO_TVDAC1 = 0x2d, 7 DCB_GPIO_TVDAC1 = 0x2d,
8 DCB_GPIO_FAN = 0x09, 8 DCB_GPIO_FAN = 0x09,
9 DCB_GPIO_FAN_SENSE = 0x3d, 9 DCB_GPIO_FAN_SENSE = 0x3d,
10 DCB_GPIO_UNUSED = 0xff 10 DCB_GPIO_UNUSED = 0xff,
11 DCB_GPIO_VID0 = 0x04,
12 DCB_GPIO_VID1 = 0x05,
13 DCB_GPIO_VID2 = 0x06,
14 DCB_GPIO_VID3 = 0x1a,
15 DCB_GPIO_VID4 = 0x73,
16 DCB_GPIO_VID5 = 0x74,
17 DCB_GPIO_VID6 = 0x75,
18 DCB_GPIO_VID7 = 0x76,
11}; 19};
12 20
13#define DCB_GPIO_LOG_DIR 0x02 21#define DCB_GPIO_LOG_DIR 0x02
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/perf.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/perf.h
index 0b285e99be5a..16ff06ec2a88 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bios/perf.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/perf.h
@@ -3,6 +3,39 @@
3 3
4struct nouveau_bios; 4struct nouveau_bios;
5 5
6u16 nvbios_perf_table(struct nouveau_bios *, u8 *ver, u8 *hdr,
7 u8 *cnt, u8 *len, u8 *snr, u8 *ssz);
8
9struct nvbios_perfE {
10 u8 pstate;
11 u8 fanspeed;
12 u8 voltage;
13 u32 core;
14 u32 shader;
15 u32 memory;
16 u32 vdec;
17 u32 disp;
18 u32 script;
19};
20
21u16 nvbios_perf_entry(struct nouveau_bios *, int idx,
22 u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
23u16 nvbios_perfEp(struct nouveau_bios *, int idx,
24 u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_perfE *);
25
26struct nvbios_perfS {
27 union {
28 struct {
29 u32 freq;
30 } v40;
31 };
32};
33
34u32 nvbios_perfSe(struct nouveau_bios *, u32 data, int idx,
35 u8 *ver, u8 *hdr, u8 cnt, u8 len);
36u32 nvbios_perfSp(struct nouveau_bios *, u32 data, int idx,
37 u8 *ver, u8 *hdr, u8 cnt, u8 len, struct nvbios_perfS *);
38
6struct nvbios_perf_fan { 39struct nvbios_perf_fan {
7 u32 pwm_divisor; 40 u32 pwm_divisor;
8}; 41};
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/rammap.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/rammap.h
new file mode 100644
index 000000000000..bc15e0320877
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/rammap.h
@@ -0,0 +1,11 @@
1#ifndef __NVBIOS_RAMMAP_H__
2#define __NVBIOS_RAMMAP_H__
3
4u16 nvbios_rammap_table(struct nouveau_bios *, u8 *ver, u8 *hdr,
5 u8 *cnt, u8 *len, u8 *snr, u8 *ssz);
6u16 nvbios_rammap_entry(struct nouveau_bios *, int idx,
7 u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
8u16 nvbios_rammap_match(struct nouveau_bios *, u16 khz,
9 u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
10
11#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/timing.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/timing.h
new file mode 100644
index 000000000000..963694b54224
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/timing.h
@@ -0,0 +1,8 @@
1#ifndef __NVBIOS_TIMING_H__
2#define __NVBIOS_TIMING_H__
3
4u16 nvbios_timing_table(struct nouveau_bios *,
5 u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
6u16 nvbios_timing_entry(struct nouveau_bios *, int idx, u8 *ver, u8 *hdr);
7
8#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/vmap.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/vmap.h
new file mode 100644
index 000000000000..ad5a8f20e113
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/vmap.h
@@ -0,0 +1,25 @@
1#ifndef __NVBIOS_VMAP_H__
2#define __NVBIOS_VMAP_H__
3
4struct nouveau_bios;
5
6struct nvbios_vmap {
7};
8
9u16 nvbios_vmap_table(struct nouveau_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
10u16 nvbios_vmap_parse(struct nouveau_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
11 struct nvbios_vmap *);
12
13struct nvbios_vmap_entry {
14 u8 unk0;
15 u8 link;
16 u32 min;
17 u32 max;
18 s32 arg[6];
19};
20
21u16 nvbios_vmap_entry(struct nouveau_bios *, int idx, u8 *ver, u8 *len);
22u16 nvbios_vmap_entry_parse(struct nouveau_bios *, int idx, u8 *ver, u8 *len,
23 struct nvbios_vmap_entry *);
24
25#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/volt.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/volt.h
new file mode 100644
index 000000000000..6a11dcd59770
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/volt.h
@@ -0,0 +1,27 @@
1#ifndef __NVBIOS_VOLT_H__
2#define __NVBIOS_VOLT_H__
3
4struct nouveau_bios;
5
6struct nvbios_volt {
7 u8 vidmask;
8 u32 min;
9 u32 max;
10 u32 base;
11 s16 step;
12};
13
14u16 nvbios_volt_table(struct nouveau_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
15u16 nvbios_volt_parse(struct nouveau_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
16 struct nvbios_volt *);
17
18struct nvbios_volt_entry {
19 u32 voltage;
20 u8 vid;
21};
22
23u16 nvbios_volt_entry(struct nouveau_bios *, int idx, u8 *ver, u8 *len);
24u16 nvbios_volt_entry_parse(struct nouveau_bios *, int idx, u8 *ver, u8 *len,
25 struct nvbios_volt_entry *);
26
27#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bus.h b/drivers/gpu/drm/nouveau/core/include/subdev/bus.h
index 7d88ec4a6d06..697f7ce70aab 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bus.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bus.h
@@ -11,6 +11,8 @@ struct nouveau_bus_intr {
11 11
12struct nouveau_bus { 12struct nouveau_bus {
13 struct nouveau_subdev base; 13 struct nouveau_subdev base;
14 int (*hwsq_exec)(struct nouveau_bus *, u32 *, u32);
15 u32 hwsq_size;
14}; 16};
15 17
16static inline struct nouveau_bus * 18static inline struct nouveau_bus *
@@ -33,9 +35,19 @@ nouveau_bus(void *obj)
33#define _nouveau_bus_init _nouveau_subdev_init 35#define _nouveau_bus_init _nouveau_subdev_init
34#define _nouveau_bus_fini _nouveau_subdev_fini 36#define _nouveau_bus_fini _nouveau_subdev_fini
35 37
36extern struct nouveau_oclass nv04_bus_oclass; 38extern struct nouveau_oclass *nv04_bus_oclass;
37extern struct nouveau_oclass nv31_bus_oclass; 39extern struct nouveau_oclass *nv31_bus_oclass;
38extern struct nouveau_oclass nv50_bus_oclass; 40extern struct nouveau_oclass *nv50_bus_oclass;
39extern struct nouveau_oclass nvc0_bus_oclass; 41extern struct nouveau_oclass *nv94_bus_oclass;
42extern struct nouveau_oclass *nvc0_bus_oclass;
43
44/* interface to sequencer */
45struct nouveau_hwsq;
46int nouveau_hwsq_init(struct nouveau_bus *, struct nouveau_hwsq **);
47int nouveau_hwsq_fini(struct nouveau_hwsq **, bool exec);
48void nouveau_hwsq_wr32(struct nouveau_hwsq *, u32 addr, u32 data);
49void nouveau_hwsq_setf(struct nouveau_hwsq *, u8 flag, int data);
50void nouveau_hwsq_wait(struct nouveau_hwsq *, u8 flag, u8 data);
51void nouveau_hwsq_nsec(struct nouveau_hwsq *, u32 nsec);
40 52
41#endif 53#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/clock.h b/drivers/gpu/drm/nouveau/core/include/subdev/clock.h
index 89ee289097a6..e2675bc0edba 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/clock.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/clock.h
@@ -7,9 +7,78 @@
7struct nouveau_pll_vals; 7struct nouveau_pll_vals;
8struct nvbios_pll; 8struct nvbios_pll;
9 9
10enum nv_clk_src {
11 nv_clk_src_crystal,
12 nv_clk_src_href,
13
14 nv_clk_src_hclk,
15 nv_clk_src_hclkm3,
16 nv_clk_src_hclkm3d2,
17
18 nv_clk_src_host,
19
20 nv_clk_src_sppll0,
21 nv_clk_src_sppll1,
22
23 nv_clk_src_mpllsrcref,
24 nv_clk_src_mpllsrc,
25 nv_clk_src_mpll,
26 nv_clk_src_mdiv,
27
28 nv_clk_src_core,
29 nv_clk_src_shader,
30
31 nv_clk_src_mem,
32
33 nv_clk_src_gpc,
34 nv_clk_src_rop,
35 nv_clk_src_hubk01,
36 nv_clk_src_hubk06,
37 nv_clk_src_hubk07,
38 nv_clk_src_copy,
39 nv_clk_src_daemon,
40 nv_clk_src_disp,
41 nv_clk_src_vdec,
42
43 nv_clk_src_dom6,
44
45 nv_clk_src_max,
46};
47
48struct nouveau_cstate {
49 struct list_head head;
50 u8 voltage;
51 u32 domain[nv_clk_src_max];
52};
53
54struct nouveau_pstate {
55 struct list_head head;
56 struct list_head list; /* c-states */
57 struct nouveau_cstate base;
58 u8 pstate;
59 u8 fanspeed;
60};
61
10struct nouveau_clock { 62struct nouveau_clock {
11 struct nouveau_subdev base; 63 struct nouveau_subdev base;
12 64
65 struct nouveau_clocks *domains;
66 struct nouveau_pstate bstate;
67
68 struct list_head states;
69 int state_nr;
70
71 int pstate; /* current */
72 int ustate; /* user-requested (-1 disabled, -2 perfmon) */
73 int astate; /* perfmon adjustment (base) */
74 int tstate; /* thermal adjustment (max-) */
75 int dstate; /* display adjustment (min+) */
76
77 int (*read)(struct nouveau_clock *, enum nv_clk_src);
78 int (*calc)(struct nouveau_clock *, struct nouveau_cstate *);
79 int (*prog)(struct nouveau_clock *);
80 void (*tidy)(struct nouveau_clock *);
81
13 /*XXX: die, these are here *only* to support the completely 82 /*XXX: die, these are here *only* to support the completely
14 * bat-shit insane what-was-nouveau_hw.c code 83 * bat-shit insane what-was-nouveau_hw.c code
15 */ 84 */
@@ -25,27 +94,42 @@ nouveau_clock(void *obj)
25 return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_CLOCK]; 94 return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_CLOCK];
26} 95}
27 96
28#define nouveau_clock_create(p,e,o,d) \ 97struct nouveau_clocks {
29 nouveau_subdev_create((p), (e), (o), 0, "CLOCK", "clock", d) 98 enum nv_clk_src name;
30#define nouveau_clock_destroy(p) \ 99 u8 bios; /* 0xff for none */
31 nouveau_subdev_destroy(&(p)->base) 100#define NVKM_CLK_DOM_FLAG_CORE 0x01
32#define nouveau_clock_init(p) \ 101 u8 flags;
33 nouveau_subdev_init(&(p)->base) 102 const char *mname;
103 int mdiv;
104};
105
106#define nouveau_clock_create(p,e,o,i,d) \
107 nouveau_clock_create_((p), (e), (o), (i), sizeof(**d), (void **)d)
108#define nouveau_clock_destroy(p) ({ \
109 struct nouveau_clock *clk = (p); \
110 _nouveau_clock_dtor(nv_object(clk)); \
111})
112#define nouveau_clock_init(p) ({ \
113 struct nouveau_clock *clk = (p); \
114 _nouveau_clock_init(nv_object(clk)); \
115})
34#define nouveau_clock_fini(p,s) \ 116#define nouveau_clock_fini(p,s) \
35 nouveau_subdev_fini(&(p)->base, (s)) 117 nouveau_subdev_fini(&(p)->base, (s))
36 118
37int nouveau_clock_create_(struct nouveau_object *, struct nouveau_object *, 119int nouveau_clock_create_(struct nouveau_object *, struct nouveau_object *,
38 struct nouveau_oclass *, void *, u32, int, void **); 120 struct nouveau_oclass *,
39 121 struct nouveau_clocks *, int, void **);
40#define _nouveau_clock_dtor _nouveau_subdev_dtor 122void _nouveau_clock_dtor(struct nouveau_object *);
41#define _nouveau_clock_init _nouveau_subdev_init 123int _nouveau_clock_init(struct nouveau_object *);
42#define _nouveau_clock_fini _nouveau_subdev_fini 124#define _nouveau_clock_fini _nouveau_subdev_fini
43 125
44extern struct nouveau_oclass nv04_clock_oclass; 126extern struct nouveau_oclass nv04_clock_oclass;
45extern struct nouveau_oclass nv40_clock_oclass; 127extern struct nouveau_oclass nv40_clock_oclass;
46extern struct nouveau_oclass nv50_clock_oclass; 128extern struct nouveau_oclass *nv50_clock_oclass;
129extern struct nouveau_oclass *nv84_clock_oclass;
47extern struct nouveau_oclass nva3_clock_oclass; 130extern struct nouveau_oclass nva3_clock_oclass;
48extern struct nouveau_oclass nvc0_clock_oclass; 131extern struct nouveau_oclass nvc0_clock_oclass;
132extern struct nouveau_oclass nve0_clock_oclass;
49 133
50int nv04_clock_pll_set(struct nouveau_clock *, u32 type, u32 freq); 134int nv04_clock_pll_set(struct nouveau_clock *, u32 type, u32 freq);
51int nv04_clock_pll_calc(struct nouveau_clock *, struct nvbios_pll *, 135int nv04_clock_pll_calc(struct nouveau_clock *, struct nvbios_pll *,
@@ -55,4 +139,9 @@ int nv04_clock_pll_prog(struct nouveau_clock *, u32 reg1,
55int nva3_clock_pll_calc(struct nouveau_clock *, struct nvbios_pll *, 139int nva3_clock_pll_calc(struct nouveau_clock *, struct nvbios_pll *,
56 int clk, struct nouveau_pll_vals *); 140 int clk, struct nouveau_pll_vals *);
57 141
142int nouveau_clock_ustate(struct nouveau_clock *, int req);
143int nouveau_clock_astate(struct nouveau_clock *, int req, int rel);
144int nouveau_clock_dstate(struct nouveau_clock *, int req, int rel);
145int nouveau_clock_tstate(struct nouveau_clock *, int req, int rel);
146
58#endif 147#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/fb.h b/drivers/gpu/drm/nouveau/core/include/subdev/fb.h
index 2e7405084261..8541aa382ff2 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/fb.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/fb.h
@@ -78,23 +78,28 @@ nouveau_fb(void *obj)
78 return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_FB]; 78 return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_FB];
79} 79}
80 80
81extern struct nouveau_oclass nv04_fb_oclass; 81extern struct nouveau_oclass *nv04_fb_oclass;
82extern struct nouveau_oclass nv10_fb_oclass; 82extern struct nouveau_oclass *nv10_fb_oclass;
83extern struct nouveau_oclass nv1a_fb_oclass; 83extern struct nouveau_oclass *nv1a_fb_oclass;
84extern struct nouveau_oclass nv20_fb_oclass; 84extern struct nouveau_oclass *nv20_fb_oclass;
85extern struct nouveau_oclass nv25_fb_oclass; 85extern struct nouveau_oclass *nv25_fb_oclass;
86extern struct nouveau_oclass nv30_fb_oclass; 86extern struct nouveau_oclass *nv30_fb_oclass;
87extern struct nouveau_oclass nv35_fb_oclass; 87extern struct nouveau_oclass *nv35_fb_oclass;
88extern struct nouveau_oclass nv36_fb_oclass; 88extern struct nouveau_oclass *nv36_fb_oclass;
89extern struct nouveau_oclass nv40_fb_oclass; 89extern struct nouveau_oclass *nv40_fb_oclass;
90extern struct nouveau_oclass nv41_fb_oclass; 90extern struct nouveau_oclass *nv41_fb_oclass;
91extern struct nouveau_oclass nv44_fb_oclass; 91extern struct nouveau_oclass *nv44_fb_oclass;
92extern struct nouveau_oclass nv46_fb_oclass; 92extern struct nouveau_oclass *nv46_fb_oclass;
93extern struct nouveau_oclass nv47_fb_oclass; 93extern struct nouveau_oclass *nv47_fb_oclass;
94extern struct nouveau_oclass nv49_fb_oclass; 94extern struct nouveau_oclass *nv49_fb_oclass;
95extern struct nouveau_oclass nv4e_fb_oclass; 95extern struct nouveau_oclass *nv4e_fb_oclass;
96extern struct nouveau_oclass nv50_fb_oclass; 96extern struct nouveau_oclass *nv50_fb_oclass;
97extern struct nouveau_oclass nvc0_fb_oclass; 97extern struct nouveau_oclass *nv84_fb_oclass;
98extern struct nouveau_oclass *nva3_fb_oclass;
99extern struct nouveau_oclass *nvaa_fb_oclass;
100extern struct nouveau_oclass *nvaf_fb_oclass;
101extern struct nouveau_oclass *nvc0_fb_oclass;
102extern struct nouveau_oclass *nve0_fb_oclass;
98 103
99struct nouveau_ram { 104struct nouveau_ram {
100 struct nouveau_object base; 105 struct nouveau_object base;
@@ -121,6 +126,17 @@ struct nouveau_ram {
121 int (*get)(struct nouveau_fb *, u64 size, u32 align, 126 int (*get)(struct nouveau_fb *, u64 size, u32 align,
122 u32 size_nc, u32 type, struct nouveau_mem **); 127 u32 size_nc, u32 type, struct nouveau_mem **);
123 void (*put)(struct nouveau_fb *, struct nouveau_mem **); 128 void (*put)(struct nouveau_fb *, struct nouveau_mem **);
129
130 int (*calc)(struct nouveau_fb *, u32 freq);
131 int (*prog)(struct nouveau_fb *);
132 void (*tidy)(struct nouveau_fb *);
133 struct {
134 u8 version;
135 u32 data;
136 u8 size;
137 } rammap, ramcfg, timing;
138 u32 freq;
139 u32 mr[16];
124}; 140};
125 141
126#endif 142#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h b/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h
index 7e4e2775f249..9fa5da723871 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h
@@ -60,13 +60,18 @@ void _nouveau_i2c_port_dtor(struct nouveau_object *);
60#define _nouveau_i2c_port_init nouveau_object_init 60#define _nouveau_i2c_port_init nouveau_object_init
61#define _nouveau_i2c_port_fini nouveau_object_fini 61#define _nouveau_i2c_port_fini nouveau_object_fini
62 62
63struct nouveau_i2c_board_info {
64 struct i2c_board_info dev;
65 u8 udelay; /* set to 0 to use the standard delay */
66};
67
63struct nouveau_i2c { 68struct nouveau_i2c {
64 struct nouveau_subdev base; 69 struct nouveau_subdev base;
65 70
66 struct nouveau_i2c_port *(*find)(struct nouveau_i2c *, u8 index); 71 struct nouveau_i2c_port *(*find)(struct nouveau_i2c *, u8 index);
67 struct nouveau_i2c_port *(*find_type)(struct nouveau_i2c *, u16 type); 72 struct nouveau_i2c_port *(*find_type)(struct nouveau_i2c *, u16 type);
68 int (*identify)(struct nouveau_i2c *, int index, 73 int (*identify)(struct nouveau_i2c *, int index,
69 const char *what, struct i2c_board_info *, 74 const char *what, struct nouveau_i2c_board_info *,
70 bool (*match)(struct nouveau_i2c_port *, 75 bool (*match)(struct nouveau_i2c_port *,
71 struct i2c_board_info *)); 76 struct i2c_board_info *));
72 struct list_head ports; 77 struct list_head ports;
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/mc.h b/drivers/gpu/drm/nouveau/core/include/subdev/mc.h
index ce6569f365a7..adc88b73d911 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/mc.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/mc.h
@@ -11,7 +11,6 @@ struct nouveau_mc_intr {
11 11
12struct nouveau_mc { 12struct nouveau_mc {
13 struct nouveau_subdev base; 13 struct nouveau_subdev base;
14 const struct nouveau_mc_intr *intr_map;
15 bool use_msi; 14 bool use_msi;
16}; 15};
17 16
@@ -21,8 +20,8 @@ nouveau_mc(void *obj)
21 return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_MC]; 20 return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_MC];
22} 21}
23 22
24#define nouveau_mc_create(p,e,o,m,d) \ 23#define nouveau_mc_create(p,e,o,d) \
25 nouveau_mc_create_((p), (e), (o), (m), sizeof(**d), (void **)d) 24 nouveau_mc_create_((p), (e), (o), sizeof(**d), (void **)d)
26#define nouveau_mc_destroy(p) ({ \ 25#define nouveau_mc_destroy(p) ({ \
27 struct nouveau_mc *pmc = (p); _nouveau_mc_dtor(nv_object(pmc)); \ 26 struct nouveau_mc *pmc = (p); _nouveau_mc_dtor(nv_object(pmc)); \
28}) 27})
@@ -34,20 +33,24 @@ nouveau_mc(void *obj)
34}) 33})
35 34
36int nouveau_mc_create_(struct nouveau_object *, struct nouveau_object *, 35int nouveau_mc_create_(struct nouveau_object *, struct nouveau_object *,
37 struct nouveau_oclass *, const struct nouveau_mc_intr *, 36 struct nouveau_oclass *, int, void **);
38 int, void **);
39void _nouveau_mc_dtor(struct nouveau_object *); 37void _nouveau_mc_dtor(struct nouveau_object *);
40int _nouveau_mc_init(struct nouveau_object *); 38int _nouveau_mc_init(struct nouveau_object *);
41int _nouveau_mc_fini(struct nouveau_object *, bool); 39int _nouveau_mc_fini(struct nouveau_object *, bool);
42 40
43extern struct nouveau_oclass nv04_mc_oclass; 41struct nouveau_mc_oclass {
44extern struct nouveau_oclass nv44_mc_oclass; 42 struct nouveau_oclass base;
45extern struct nouveau_oclass nv50_mc_oclass; 43 const struct nouveau_mc_intr *intr;
46extern struct nouveau_oclass nv98_mc_oclass; 44 void (*msi_rearm)(struct nouveau_mc *);
47extern struct nouveau_oclass nvc0_mc_oclass; 45};
48 46
49extern const struct nouveau_mc_intr nv04_mc_intr[]; 47extern struct nouveau_oclass *nv04_mc_oclass;
50int nv04_mc_init(struct nouveau_object *); 48extern struct nouveau_oclass *nv40_mc_oclass;
51int nv50_mc_init(struct nouveau_object *); 49extern struct nouveau_oclass *nv44_mc_oclass;
50extern struct nouveau_oclass *nv50_mc_oclass;
51extern struct nouveau_oclass *nv94_mc_oclass;
52extern struct nouveau_oclass *nv98_mc_oclass;
53extern struct nouveau_oclass *nvc0_mc_oclass;
54extern struct nouveau_oclass *nvc3_mc_oclass;
52 55
53#endif 56#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/pwr.h b/drivers/gpu/drm/nouveau/core/include/subdev/pwr.h
new file mode 100644
index 000000000000..c5c92cbed33f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/pwr.h
@@ -0,0 +1,80 @@
1#ifndef __NOUVEAU_PWR_H__
2#define __NOUVEAU_PWR_H__
3
4#include <core/subdev.h>
5#include <core/device.h>
6
7struct nouveau_pwr {
8 struct nouveau_subdev base;
9
10 struct {
11 u32 limit;
12 u32 *data;
13 u32 size;
14 } code;
15
16 struct {
17 u32 limit;
18 u32 *data;
19 u32 size;
20 } data;
21
22 struct {
23 u32 base;
24 u32 size;
25 } send;
26
27 struct {
28 u32 base;
29 u32 size;
30
31 struct work_struct work;
32 wait_queue_head_t wait;
33 u32 process;
34 u32 message;
35 u32 data[2];
36 } recv;
37
38 int (*message)(struct nouveau_pwr *, u32[2], u32, u32, u32, u32);
39};
40
41static inline struct nouveau_pwr *
42nouveau_pwr(void *obj)
43{
44 return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_PWR];
45}
46
47#define nouveau_pwr_create(p, e, o, d) \
48 nouveau_pwr_create_((p), (e), (o), sizeof(**d), (void **)d)
49#define nouveau_pwr_destroy(p) \
50 nouveau_subdev_destroy(&(p)->base)
51#define nouveau_pwr_init(p) ({ \
52 struct nouveau_pwr *ppwr = (p); \
53 _nouveau_pwr_init(nv_object(ppwr)); \
54})
55#define nouveau_pwr_fini(p,s) ({ \
56 struct nouveau_pwr *ppwr = (p); \
57 _nouveau_pwr_fini(nv_object(ppwr), (s)); \
58})
59
60int nouveau_pwr_create_(struct nouveau_object *, struct nouveau_object *,
61 struct nouveau_oclass *, int, void **);
62#define _nouveau_pwr_dtor _nouveau_subdev_dtor
63int _nouveau_pwr_init(struct nouveau_object *);
64int _nouveau_pwr_fini(struct nouveau_object *, bool);
65
66extern struct nouveau_oclass nva3_pwr_oclass;
67extern struct nouveau_oclass nvc0_pwr_oclass;
68extern struct nouveau_oclass nvd0_pwr_oclass;
69extern struct nouveau_oclass nv108_pwr_oclass;
70
71/* interface to MEMX process running on PPWR */
72struct nouveau_memx;
73int nouveau_memx_init(struct nouveau_pwr *, struct nouveau_memx **);
74int nouveau_memx_fini(struct nouveau_memx **, bool exec);
75void nouveau_memx_wr32(struct nouveau_memx *, u32 addr, u32 data);
76void nouveau_memx_wait(struct nouveau_memx *,
77 u32 addr, u32 mask, u32 data, u32 nsec);
78void nouveau_memx_nsec(struct nouveau_memx *, u32 nsec);
79
80#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/therm.h b/drivers/gpu/drm/nouveau/core/include/subdev/therm.h
index c075998d82e6..69891d4a3fe7 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/therm.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/therm.h
@@ -71,6 +71,8 @@ void _nouveau_therm_dtor(struct nouveau_object *);
71int _nouveau_therm_init(struct nouveau_object *); 71int _nouveau_therm_init(struct nouveau_object *);
72int _nouveau_therm_fini(struct nouveau_object *, bool); 72int _nouveau_therm_fini(struct nouveau_object *, bool);
73 73
74int nouveau_therm_cstate(struct nouveau_therm *, int, int);
75
74extern struct nouveau_oclass nv40_therm_oclass; 76extern struct nouveau_oclass nv40_therm_oclass;
75extern struct nouveau_oclass nv50_therm_oclass; 77extern struct nouveau_oclass nv50_therm_oclass;
76extern struct nouveau_oclass nv84_therm_oclass; 78extern struct nouveau_oclass nv84_therm_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/volt.h b/drivers/gpu/drm/nouveau/core/include/subdev/volt.h
new file mode 100644
index 000000000000..820b62ffd75b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/volt.h
@@ -0,0 +1,60 @@
1#ifndef __NOUVEAU_VOLT_H__
2#define __NOUVEAU_VOLT_H__
3
4#include <core/subdev.h>
5#include <core/device.h>
6
7struct nouveau_voltage {
8 u32 uv;
9 u8 id;
10};
11
12struct nouveau_volt {
13 struct nouveau_subdev base;
14
15 int (*vid_get)(struct nouveau_volt *);
16 int (*get)(struct nouveau_volt *);
17 int (*vid_set)(struct nouveau_volt *, u8 vid);
18 int (*set)(struct nouveau_volt *, u32 uv);
19 int (*set_id)(struct nouveau_volt *, u8 id, int condition);
20
21 u8 vid_mask;
22 u8 vid_nr;
23 struct {
24 u32 uv;
25 u8 vid;
26 } vid[256];
27};
28
29static inline struct nouveau_volt *
30nouveau_volt(void *obj)
31{
32 return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_VOLT];
33}
34
35#define nouveau_volt_create(p, e, o, d) \
36 nouveau_volt_create_((p), (e), (o), sizeof(**d), (void **)d)
37#define nouveau_volt_destroy(p) ({ \
38 struct nouveau_volt *v = (p); \
39 _nouveau_volt_dtor(nv_object(v)); \
40})
41#define nouveau_volt_init(p) ({ \
42 struct nouveau_volt *v = (p); \
43 _nouveau_volt_init(nv_object(v)); \
44})
45#define nouveau_volt_fini(p,s) \
46 nouveau_subdev_fini((p), (s))
47
48int nouveau_volt_create_(struct nouveau_object *, struct nouveau_object *,
49 struct nouveau_oclass *, int, void **);
50void _nouveau_volt_dtor(struct nouveau_object *);
51int _nouveau_volt_init(struct nouveau_object *);
52#define _nouveau_volt_fini _nouveau_subdev_fini
53
54extern struct nouveau_oclass nv40_volt_oclass;
55
56int nouveau_voltgpio_init(struct nouveau_volt *);
57int nouveau_voltgpio_get(struct nouveau_volt *);
58int nouveau_voltgpio_set(struct nouveau_volt *, u8);
59
60#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/boost.c b/drivers/gpu/drm/nouveau/core/subdev/bios/boost.c
new file mode 100644
index 000000000000..c1835e591c44
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/boost.c
@@ -0,0 +1,127 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/bios.h>
26#include <subdev/bios/bit.h>
27#include <subdev/bios/boost.h>
28
29u16
30nvbios_boostTe(struct nouveau_bios *bios,
31 u8 *ver, u8 *hdr, u8 *cnt, u8 *len, u8 *snr, u8 *ssz)
32{
33 struct bit_entry bit_P;
34 u16 boost = 0x0000;
35
36 if (!bit_entry(bios, 'P', &bit_P)) {
37 if (bit_P.version == 2)
38 boost = nv_ro16(bios, bit_P.offset + 0x30);
39
40 if (boost) {
41 *ver = nv_ro08(bios, boost + 0);
42 switch (*ver) {
43 case 0x11:
44 *hdr = nv_ro08(bios, boost + 1);
45 *cnt = nv_ro08(bios, boost + 5);
46 *len = nv_ro08(bios, boost + 2);
47 *snr = nv_ro08(bios, boost + 4);
48 *ssz = nv_ro08(bios, boost + 3);
49 return boost;
50 default:
51 break;
52 }
53 }
54 }
55
56 return 0x0000;
57}
58
59u16
60nvbios_boostEe(struct nouveau_bios *bios, int idx,
61 u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
62{
63 u8 snr, ssz;
64 u16 data = nvbios_boostTe(bios, ver, hdr, cnt, len, &snr, &ssz);
65 if (data && idx < *cnt) {
66 data = data + *hdr + (idx * (*len + (snr * ssz)));
67 *hdr = *len;
68 *cnt = snr;
69 *len = ssz;
70 return data;
71 }
72 return 0x0000;
73}
74
75u16
76nvbios_boostEp(struct nouveau_bios *bios, int idx,
77 u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_boostE *info)
78{
79 u16 data = nvbios_boostEe(bios, idx, ver, hdr, cnt, len);
80 memset(info, 0x00, sizeof(*info));
81 if (data) {
82 info->pstate = (nv_ro16(bios, data + 0x00) & 0x01e0) >> 5;
83 info->min = nv_ro16(bios, data + 0x02) * 1000;
84 info->max = nv_ro16(bios, data + 0x04) * 1000;
85 }
86 return data;
87}
88
89u16
90nvbios_boostEm(struct nouveau_bios *bios, u8 pstate,
91 u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_boostE *info)
92{
93 u32 data, idx = 0;
94 while ((data = nvbios_boostEp(bios, idx++, ver, hdr, cnt, len, info))) {
95 if (info->pstate == pstate)
96 break;
97 }
98 return data;
99}
100
101u16
102nvbios_boostSe(struct nouveau_bios *bios, int idx,
103 u16 data, u8 *ver, u8 *hdr, u8 cnt, u8 len)
104{
105 if (data && idx < cnt) {
106 data = data + *hdr + (idx * len);
107 *hdr = len;
108 return data;
109 }
110 return 0x0000;
111}
112
113u16
114nvbios_boostSp(struct nouveau_bios *bios, int idx,
115 u16 data, u8 *ver, u8 *hdr, u8 cnt, u8 len,
116 struct nvbios_boostS *info)
117{
118 data = nvbios_boostSe(bios, idx, data, ver, hdr, cnt, len);
119 memset(info, 0x00, sizeof(*info));
120 if (data) {
121 info->domain = nv_ro08(bios, data + 0x00);
122 info->percent = nv_ro08(bios, data + 0x01);
123 info->min = nv_ro16(bios, data + 0x02) * 1000;
124 info->max = nv_ro16(bios, data + 0x04) * 1000;
125 }
126 return data;
127}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/cstep.c b/drivers/gpu/drm/nouveau/core/subdev/bios/cstep.c
new file mode 100644
index 000000000000..d3b15327fbfd
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/cstep.c
@@ -0,0 +1,123 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/bios.h>
26#include <subdev/bios/bit.h>
27#include <subdev/bios/cstep.h>
28
29u16
30nvbios_cstepTe(struct nouveau_bios *bios,
31 u8 *ver, u8 *hdr, u8 *cnt, u8 *len, u8 *xnr, u8 *xsz)
32{
33 struct bit_entry bit_P;
34 u16 cstep = 0x0000;
35
36 if (!bit_entry(bios, 'P', &bit_P)) {
37 if (bit_P.version == 2)
38 cstep = nv_ro16(bios, bit_P.offset + 0x34);
39
40 if (cstep) {
41 *ver = nv_ro08(bios, cstep + 0);
42 switch (*ver) {
43 case 0x10:
44 *hdr = nv_ro08(bios, cstep + 1);
45 *cnt = nv_ro08(bios, cstep + 3);
46 *len = nv_ro08(bios, cstep + 2);
47 *xnr = nv_ro08(bios, cstep + 5);
48 *xsz = nv_ro08(bios, cstep + 4);
49 return cstep;
50 default:
51 break;
52 }
53 }
54 }
55
56 return 0x0000;
57}
58
59u16
60nvbios_cstepEe(struct nouveau_bios *bios, int idx, u8 *ver, u8 *hdr)
61{
62 u8 cnt, len, xnr, xsz;
63 u16 data = nvbios_cstepTe(bios, ver, hdr, &cnt, &len, &xnr, &xsz);
64 if (data && idx < cnt) {
65 data = data + *hdr + (idx * len);
66 *hdr = len;
67 return data;
68 }
69 return 0x0000;
70}
71
72u16
73nvbios_cstepEp(struct nouveau_bios *bios, int idx, u8 *ver, u8 *hdr,
74 struct nvbios_cstepE *info)
75{
76 u16 data = nvbios_cstepEe(bios, idx, ver, hdr);
77 memset(info, 0x00, sizeof(*info));
78 if (data) {
79 info->pstate = (nv_ro16(bios, data + 0x00) & 0x01e0) >> 5;
80 info->index = nv_ro08(bios, data + 0x03);
81 }
82 return data;
83}
84
85u16
86nvbios_cstepEm(struct nouveau_bios *bios, u8 pstate, u8 *ver, u8 *hdr,
87 struct nvbios_cstepE *info)
88{
89 u32 data, idx = 0;
90 while ((data = nvbios_cstepEp(bios, idx++, ver, hdr, info))) {
91 if (info->pstate == pstate)
92 break;
93 }
94 return data;
95}
96
97u16
98nvbios_cstepXe(struct nouveau_bios *bios, int idx, u8 *ver, u8 *hdr)
99{
100 u8 cnt, len, xnr, xsz;
101 u16 data = nvbios_cstepTe(bios, ver, hdr, &cnt, &len, &xnr, &xsz);
102 if (data && idx < xnr) {
103 data = data + *hdr + (cnt * len) + (idx * xsz);
104 *hdr = xsz;
105 return data;
106 }
107 return 0x0000;
108}
109
110u16
111nvbios_cstepXp(struct nouveau_bios *bios, int idx, u8 *ver, u8 *hdr,
112 struct nvbios_cstepX *info)
113{
114 u16 data = nvbios_cstepXe(bios, idx, ver, hdr);
115 memset(info, 0x00, sizeof(*info));
116 if (data) {
117 info->freq = nv_ro16(bios, data + 0x00) * 1000;
118 info->unkn[0] = nv_ro08(bios, data + 0x02);
119 info->unkn[1] = nv_ro08(bios, data + 0x03);
120 info->voltage = nv_ro08(bios, data + 0x04);
121 }
122 return data;
123}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/dp.c b/drivers/gpu/drm/nouveau/core/subdev/bios/dp.c
index 663853bcca82..7628fe759220 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/dp.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/dp.c
@@ -89,6 +89,7 @@ nvbios_dpout_parse(struct nouveau_bios *bios, u8 idx,
89 struct nvbios_dpout *info) 89 struct nvbios_dpout *info)
90{ 90{
91 u16 data = nvbios_dpout_entry(bios, idx, ver, hdr, cnt, len); 91 u16 data = nvbios_dpout_entry(bios, idx, ver, hdr, cnt, len);
92 memset(info, 0x00, sizeof(*info));
92 if (data && *ver) { 93 if (data && *ver) {
93 info->type = nv_ro16(bios, data + 0x00); 94 info->type = nv_ro16(bios, data + 0x00);
94 info->mask = nv_ro16(bios, data + 0x02); 95 info->mask = nv_ro16(bios, data + 0x02);
@@ -99,9 +100,12 @@ nvbios_dpout_parse(struct nouveau_bios *bios, u8 idx,
99 info->script[0] = nv_ro16(bios, data + 0x06); 100 info->script[0] = nv_ro16(bios, data + 0x06);
100 info->script[1] = nv_ro16(bios, data + 0x08); 101 info->script[1] = nv_ro16(bios, data + 0x08);
101 info->lnkcmp = nv_ro16(bios, data + 0x0a); 102 info->lnkcmp = nv_ro16(bios, data + 0x0a);
102 info->script[2] = nv_ro16(bios, data + 0x0c); 103 if (*len >= 0x0f) {
103 info->script[3] = nv_ro16(bios, data + 0x0e); 104 info->script[2] = nv_ro16(bios, data + 0x0c);
104 info->script[4] = nv_ro16(bios, data + 0x10); 105 info->script[3] = nv_ro16(bios, data + 0x0e);
106 }
107 if (*len >= 0x11)
108 info->script[4] = nv_ro16(bios, data + 0x10);
105 break; 109 break;
106 case 0x40: 110 case 0x40:
107 info->flags = nv_ro08(bios, data + 0x04); 111 info->flags = nv_ro08(bios, data + 0x04);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
index 57cda2a1437b..420908cb82b6 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
@@ -2180,7 +2180,7 @@ nvbios_init(struct nouveau_subdev *subdev, bool execute)
2180 u16 data; 2180 u16 data;
2181 2181
2182 if (execute) 2182 if (execute)
2183 nv_suspend(bios, "running init tables\n"); 2183 nv_info(bios, "running init tables\n");
2184 while (!ret && (data = (init_script(bios, ++i)))) { 2184 while (!ret && (data = (init_script(bios, ++i)))) {
2185 struct nvbios_init init = { 2185 struct nvbios_init init = {
2186 .subdev = subdev, 2186 .subdev = subdev,
@@ -2210,5 +2210,5 @@ nvbios_init(struct nouveau_subdev *subdev, bool execute)
2210 ret = nvbios_exec(&init); 2210 ret = nvbios_exec(&init);
2211 } 2211 }
2212 2212
2213 return 0; 2213 return ret;
2214} 2214}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/perf.c b/drivers/gpu/drm/nouveau/core/subdev/bios/perf.c
index bcbb056c2887..675e221680aa 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/perf.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/perf.c
@@ -26,8 +26,9 @@
26#include <subdev/bios/bit.h> 26#include <subdev/bios/bit.h>
27#include <subdev/bios/perf.h> 27#include <subdev/bios/perf.h>
28 28
29static u16 29u16
30perf_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len) 30nvbios_perf_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr,
31 u8 *cnt, u8 *len, u8 *snr, u8 *ssz)
31{ 32{
32 struct bit_entry bit_P; 33 struct bit_entry bit_P;
33 u16 perf = 0x0000; 34 u16 perf = 0x0000;
@@ -38,10 +39,22 @@ perf_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
38 if (perf) { 39 if (perf) {
39 *ver = nv_ro08(bios, perf + 0); 40 *ver = nv_ro08(bios, perf + 0);
40 *hdr = nv_ro08(bios, perf + 1); 41 *hdr = nv_ro08(bios, perf + 1);
42 if (*ver >= 0x40 && *ver < 0x41) {
43 *cnt = nv_ro08(bios, perf + 5);
44 *len = nv_ro08(bios, perf + 2);
45 *snr = nv_ro08(bios, perf + 4);
46 *ssz = nv_ro08(bios, perf + 3);
47 return perf;
48 } else
49 if (*ver >= 0x20 && *ver < 0x40) {
50 *cnt = nv_ro08(bios, perf + 2);
51 *len = nv_ro08(bios, perf + 3);
52 *snr = nv_ro08(bios, perf + 4);
53 *ssz = nv_ro08(bios, perf + 5);
54 return perf;
55 }
41 } 56 }
42 } else 57 }
43 nv_error(bios, "unknown offset for perf in BIT P %d\n",
44 bit_P.version);
45 } 58 }
46 59
47 if (bios->bmp_offset) { 60 if (bios->bmp_offset) {
@@ -50,19 +63,132 @@ perf_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
50 if (perf) { 63 if (perf) {
51 *hdr = nv_ro08(bios, perf + 0); 64 *hdr = nv_ro08(bios, perf + 0);
52 *ver = nv_ro08(bios, perf + 1); 65 *ver = nv_ro08(bios, perf + 1);
66 *cnt = nv_ro08(bios, perf + 2);
67 *len = nv_ro08(bios, perf + 3);
68 *snr = 0;
69 *ssz = 0;
70 return perf;
53 } 71 }
54 } 72 }
55 } 73 }
56 74
75 return 0x0000;
76}
77
78u16
79nvbios_perf_entry(struct nouveau_bios *bios, int idx,
80 u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
81{
82 u8 snr, ssz;
83 u16 perf = nvbios_perf_table(bios, ver, hdr, cnt, len, &snr, &ssz);
84 if (perf && idx < *cnt) {
85 perf = perf + *hdr + (idx * (*len + (snr * ssz)));
86 *hdr = *len;
87 *cnt = snr;
88 *len = ssz;
89 return perf;
90 }
91 return 0x0000;
92}
93
94u16
95nvbios_perfEp(struct nouveau_bios *bios, int idx,
96 u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
97 struct nvbios_perfE *info)
98{
99 u16 perf = nvbios_perf_entry(bios, idx, ver, hdr, cnt, len);
100 memset(info, 0x00, sizeof(*info));
101 info->pstate = nv_ro08(bios, perf + 0x00);
102 switch (!!perf * *ver) {
103 case 0x12:
104 case 0x13:
105 case 0x14:
106 info->core = nv_ro32(bios, perf + 0x01) * 10;
107 info->memory = nv_ro32(bios, perf + 0x05) * 20;
108 info->fanspeed = nv_ro08(bios, perf + 0x37);
109 if (*hdr > 0x38)
110 info->voltage = nv_ro08(bios, perf + 0x38);
111 break;
112 case 0x21:
113 case 0x23:
114 case 0x24:
115 info->fanspeed = nv_ro08(bios, perf + 0x04);
116 info->voltage = nv_ro08(bios, perf + 0x05);
117 info->shader = nv_ro16(bios, perf + 0x06) * 1000;
118 info->core = info->shader + (signed char)
119 nv_ro08(bios, perf + 0x08) * 1000;
120 switch (nv_device(bios)->chipset) {
121 case 0x49:
122 case 0x4b:
123 info->memory = nv_ro16(bios, perf + 0x0b) * 1000;
124 break;
125 default:
126 info->memory = nv_ro16(bios, perf + 0x0b) * 2000;
127 break;
128 }
129 break;
130 case 0x25:
131 info->fanspeed = nv_ro08(bios, perf + 0x04);
132 info->voltage = nv_ro08(bios, perf + 0x05);
133 info->core = nv_ro16(bios, perf + 0x06) * 1000;
134 info->shader = nv_ro16(bios, perf + 0x0a) * 1000;
135 info->memory = nv_ro16(bios, perf + 0x0c) * 1000;
136 break;
137 case 0x30:
138 info->script = nv_ro16(bios, perf + 0x02);
139 case 0x35:
140 info->fanspeed = nv_ro08(bios, perf + 0x06);
141 info->voltage = nv_ro08(bios, perf + 0x07);
142 info->core = nv_ro16(bios, perf + 0x08) * 1000;
143 info->shader = nv_ro16(bios, perf + 0x0a) * 1000;
144 info->memory = nv_ro16(bios, perf + 0x0c) * 1000;
145 info->vdec = nv_ro16(bios, perf + 0x10) * 1000;
146 info->disp = nv_ro16(bios, perf + 0x14) * 1000;
147 break;
148 case 0x40:
149 info->voltage = nv_ro08(bios, perf + 0x02);
150 break;
151 default:
152 return 0x0000;
153 }
57 return perf; 154 return perf;
58} 155}
59 156
157u32
158nvbios_perfSe(struct nouveau_bios *bios, u32 perfE, int idx,
159 u8 *ver, u8 *hdr, u8 cnt, u8 len)
160{
161 u32 data = 0x00000000;
162 if (idx < cnt) {
163 data = perfE + *hdr + (idx * len);
164 *hdr = len;
165 }
166 return data;
167}
168
169u32
170nvbios_perfSp(struct nouveau_bios *bios, u32 perfE, int idx,
171 u8 *ver, u8 *hdr, u8 cnt, u8 len,
172 struct nvbios_perfS *info)
173{
174 u32 data = nvbios_perfSe(bios, perfE, idx, ver, hdr, cnt, len);
175 memset(info, 0x00, sizeof(*info));
176 switch (!!data * *ver) {
177 case 0x40:
178 info->v40.freq = (nv_ro16(bios, data + 0x00) & 0x3fff) * 1000;
179 break;
180 default:
181 break;
182 }
183 return data;
184}
185
60int 186int
61nvbios_perf_fan_parse(struct nouveau_bios *bios, 187nvbios_perf_fan_parse(struct nouveau_bios *bios,
62 struct nvbios_perf_fan *fan) 188 struct nvbios_perf_fan *fan)
63{ 189{
64 u8 ver = 0, hdr = 0, cnt = 0, len = 0; 190 u8 ver, hdr, cnt, len, snr, ssz;
65 u16 perf = perf_table(bios, &ver, &hdr, &cnt, &len); 191 u16 perf = nvbios_perf_table(bios, &ver, &hdr, &cnt, &len, &snr, &ssz);
66 if (!perf) 192 if (!perf)
67 return -ENODEV; 193 return -ENODEV;
68 194
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/pll.c b/drivers/gpu/drm/nouveau/core/subdev/bios/pll.c
index f835501203e5..1f76de597d4b 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/pll.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/pll.c
@@ -114,6 +114,7 @@ pll_map(struct nouveau_bios *bios)
114 switch (nv_device(bios)->card_type) { 114 switch (nv_device(bios)->card_type) {
115 case NV_04: 115 case NV_04:
116 case NV_10: 116 case NV_10:
117 case NV_11:
117 case NV_20: 118 case NV_20:
118 case NV_30: 119 case NV_30:
119 return nv04_pll_mapping; 120 return nv04_pll_mapping;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/rammap.c b/drivers/gpu/drm/nouveau/core/subdev/bios/rammap.c
new file mode 100644
index 000000000000..916fa9d302b7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/rammap.c
@@ -0,0 +1,88 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/bios.h>
26#include <subdev/bios/bit.h>
27#include <subdev/bios/rammap.h>
28
29u16
30nvbios_rammap_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr,
31 u8 *cnt, u8 *len, u8 *snr, u8 *ssz)
32{
33 struct bit_entry bit_P;
34 u16 rammap = 0x0000;
35
36 if (!bit_entry(bios, 'P', &bit_P)) {
37 if (bit_P.version == 2)
38 rammap = nv_ro16(bios, bit_P.offset + 4);
39
40 if (rammap) {
41 *ver = nv_ro08(bios, rammap + 0);
42 switch (*ver) {
43 case 0x10:
44 case 0x11:
45 *hdr = nv_ro08(bios, rammap + 1);
46 *cnt = nv_ro08(bios, rammap + 5);
47 *len = nv_ro08(bios, rammap + 2);
48 *snr = nv_ro08(bios, rammap + 4);
49 *ssz = nv_ro08(bios, rammap + 3);
50 return rammap;
51 default:
52 break;
53 }
54 }
55 }
56
57 return 0x0000;
58}
59
60u16
61nvbios_rammap_entry(struct nouveau_bios *bios, int idx,
62 u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
63{
64 u8 snr, ssz;
65 u16 rammap = nvbios_rammap_table(bios, ver, hdr, cnt, len, &snr, &ssz);
66 if (rammap && idx < *cnt) {
67 rammap = rammap + *hdr + (idx * (*len + (snr * ssz)));
68 *hdr = *len;
69 *cnt = snr;
70 *len = ssz;
71 return rammap;
72 }
73 return 0x0000;
74}
75
76u16
77nvbios_rammap_match(struct nouveau_bios *bios, u16 khz,
78 u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
79{
80 int idx = 0;
81 u32 data;
82 while ((data = nvbios_rammap_entry(bios, idx++, ver, hdr, cnt, len))) {
83 if (khz >= nv_ro16(bios, data + 0x00) &&
84 khz <= nv_ro16(bios, data + 0x02))
85 break;
86 }
87 return data;
88}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/timing.c b/drivers/gpu/drm/nouveau/core/subdev/bios/timing.c
new file mode 100644
index 000000000000..151c2d6aaee8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/timing.c
@@ -0,0 +1,73 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/bios.h>
26#include <subdev/bios/bit.h>
27#include <subdev/bios/timing.h>
28
29u16
30nvbios_timing_table(struct nouveau_bios *bios,
31 u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
32{
33 struct bit_entry bit_P;
34 u16 timing = 0x0000;
35
36 if (!bit_entry(bios, 'P', &bit_P)) {
37 if (bit_P.version == 1)
38 timing = nv_ro16(bios, bit_P.offset + 4);
39 else
40 if (bit_P.version == 2)
41 timing = nv_ro16(bios, bit_P.offset + 8);
42
43 if (timing) {
44 *ver = nv_ro08(bios, timing + 0);
45 switch (*ver) {
46 case 0x10:
47 *hdr = nv_ro08(bios, timing + 1);
48 *cnt = nv_ro08(bios, timing + 2);
49 *len = nv_ro08(bios, timing + 3);
50 return timing;
51 case 0x20:
52 *hdr = nv_ro08(bios, timing + 1);
53 *cnt = nv_ro08(bios, timing + 3);
54 *len = nv_ro08(bios, timing + 2);
55 return timing;
56 default:
57 break;
58 }
59 }
60 }
61
62 return 0x0000;
63}
64
65u16
66nvbios_timing_entry(struct nouveau_bios *bios, int idx, u8 *ver, u8 *len)
67{
68 u8 hdr, cnt;
69 u16 timing = nvbios_timing_table(bios, ver, &hdr, &cnt, len);
70 if (timing && idx < cnt)
71 return timing + hdr + (idx * *len);
72 return 0x0000;
73}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/vmap.c b/drivers/gpu/drm/nouveau/core/subdev/bios/vmap.c
new file mode 100644
index 000000000000..f343a1b060e8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/vmap.c
@@ -0,0 +1,112 @@
1/*
2 * Copyright 2012 Nouveau Community
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Martin Peres
23 */
24
25#include <subdev/bios.h>
26#include <subdev/bios/bit.h>
27#include <subdev/bios/vmap.h>
28
29u16
30nvbios_vmap_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
31{
32 struct bit_entry bit_P;
33 u16 vmap = 0x0000;
34
35 if (!bit_entry(bios, 'P', &bit_P)) {
36 if (bit_P.version == 2) {
37 vmap = nv_ro16(bios, bit_P.offset + 0x20);
38 if (vmap) {
39 *ver = nv_ro08(bios, vmap + 0);
40 switch (*ver) {
41 case 0x10:
42 case 0x20:
43 *hdr = nv_ro08(bios, vmap + 1);
44 *cnt = nv_ro08(bios, vmap + 3);
45 *len = nv_ro08(bios, vmap + 2);
46 return vmap;
47 default:
48 break;
49 }
50 }
51 }
52 }
53
54 return 0x0000;
55}
56
57u16
58nvbios_vmap_parse(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
59 struct nvbios_vmap *info)
60{
61 u16 vmap = nvbios_vmap_table(bios, ver, hdr, cnt, len);
62 memset(info, 0x00, sizeof(*info));
63 switch (!!vmap * *ver) {
64 case 0x10:
65 case 0x20:
66 break;
67 }
68 return vmap;
69}
70
71u16
72nvbios_vmap_entry(struct nouveau_bios *bios, int idx, u8 *ver, u8 *len)
73{
74 u8 hdr, cnt;
75 u16 vmap = nvbios_vmap_table(bios, ver, &hdr, &cnt, len);
76 if (vmap && idx < cnt) {
77 vmap = vmap + hdr + (idx * *len);
78 return vmap;
79 }
80 return 0x0000;
81}
82
83u16
84nvbios_vmap_entry_parse(struct nouveau_bios *bios, int idx, u8 *ver, u8 *len,
85 struct nvbios_vmap_entry *info)
86{
87 u16 vmap = nvbios_vmap_entry(bios, idx, ver, len);
88 memset(info, 0x00, sizeof(*info));
89 switch (!!vmap * *ver) {
90 case 0x10:
91 info->link = 0xff;
92 info->min = nv_ro32(bios, vmap + 0x00);
93 info->max = nv_ro32(bios, vmap + 0x04);
94 info->arg[0] = nv_ro32(bios, vmap + 0x08);
95 info->arg[1] = nv_ro32(bios, vmap + 0x0c);
96 info->arg[2] = nv_ro32(bios, vmap + 0x10);
97 break;
98 case 0x20:
99 info->unk0 = nv_ro08(bios, vmap + 0x00);
100 info->link = nv_ro08(bios, vmap + 0x01);
101 info->min = nv_ro32(bios, vmap + 0x02);
102 info->max = nv_ro32(bios, vmap + 0x06);
103 info->arg[0] = nv_ro32(bios, vmap + 0x0a);
104 info->arg[1] = nv_ro32(bios, vmap + 0x0e);
105 info->arg[2] = nv_ro32(bios, vmap + 0x12);
106 info->arg[3] = nv_ro32(bios, vmap + 0x16);
107 info->arg[4] = nv_ro32(bios, vmap + 0x1a);
108 info->arg[5] = nv_ro32(bios, vmap + 0x1e);
109 break;
110 }
111 return vmap;
112}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/volt.c b/drivers/gpu/drm/nouveau/core/subdev/bios/volt.c
new file mode 100644
index 000000000000..bb590de4ecb2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/volt.c
@@ -0,0 +1,137 @@
1/*
2 * Copyright 2012 Nouveau Community
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Martin Peres
23 */
24
25#include <subdev/bios.h>
26#include <subdev/bios/bit.h>
27#include <subdev/bios/volt.h>
28
29u16
30nvbios_volt_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
31{
32 struct bit_entry bit_P;
33 u16 volt = 0x0000;
34
35 if (!bit_entry(bios, 'P', &bit_P)) {
36 if (bit_P.version == 2)
37 volt = nv_ro16(bios, bit_P.offset + 0x0c);
38 else
39 if (bit_P.version == 1)
40 volt = nv_ro16(bios, bit_P.offset + 0x10);
41
42 if (volt) {
43 *ver = nv_ro08(bios, volt + 0);
44 switch (*ver) {
45 case 0x12:
46 *hdr = 5;
47 *cnt = nv_ro08(bios, volt + 2);
48 *len = nv_ro08(bios, volt + 1);
49 return volt;
50 case 0x20:
51 *hdr = nv_ro08(bios, volt + 1);
52 *cnt = nv_ro08(bios, volt + 2);
53 *len = nv_ro08(bios, volt + 3);
54 return volt;
55 case 0x30:
56 case 0x40:
57 case 0x50:
58 *hdr = nv_ro08(bios, volt + 1);
59 *cnt = nv_ro08(bios, volt + 3);
60 *len = nv_ro08(bios, volt + 2);
61 return volt;
62 }
63 }
64 }
65
66 return 0x0000;
67}
68
69u16
70nvbios_volt_parse(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
71 struct nvbios_volt *info)
72{
73 u16 volt = nvbios_volt_table(bios, ver, hdr, cnt, len);
74 memset(info, 0x00, sizeof(*info));
75 switch (!!volt * *ver) {
76 case 0x12:
77 info->vidmask = nv_ro08(bios, volt + 0x04);
78 break;
79 case 0x20:
80 info->vidmask = nv_ro08(bios, volt + 0x05);
81 break;
82 case 0x30:
83 info->vidmask = nv_ro08(bios, volt + 0x04);
84 break;
85 case 0x40:
86 info->base = nv_ro32(bios, volt + 0x04);
87 info->step = nv_ro16(bios, volt + 0x08);
88 info->vidmask = nv_ro08(bios, volt + 0x0b);
89 /*XXX*/
90 info->min = 0;
91 info->max = info->base;
92 break;
93 case 0x50:
94 info->vidmask = nv_ro08(bios, volt + 0x06);
95 info->min = nv_ro32(bios, volt + 0x0a);
96 info->max = nv_ro32(bios, volt + 0x0e);
97 info->base = nv_ro32(bios, volt + 0x12) & 0x00ffffff;
98 info->step = nv_ro16(bios, volt + 0x16);
99 break;
100 }
101 return volt;
102}
103
104u16
105nvbios_volt_entry(struct nouveau_bios *bios, int idx, u8 *ver, u8 *len)
106{
107 u8 hdr, cnt;
108 u16 volt = nvbios_volt_table(bios, ver, &hdr, &cnt, len);
109 if (volt && idx < cnt) {
110 volt = volt + hdr + (idx * *len);
111 return volt;
112 }
113 return 0x0000;
114}
115
116u16
117nvbios_volt_entry_parse(struct nouveau_bios *bios, int idx, u8 *ver, u8 *len,
118 struct nvbios_volt_entry *info)
119{
120 u16 volt = nvbios_volt_entry(bios, idx, ver, len);
121 memset(info, 0x00, sizeof(*info));
122 switch (!!volt * *ver) {
123 case 0x12:
124 case 0x20:
125 info->voltage = nv_ro08(bios, volt + 0x00) * 10000;
126 info->vid = nv_ro08(bios, volt + 0x01);
127 break;
128 case 0x30:
129 info->voltage = nv_ro08(bios, volt + 0x00) * 10000;
130 info->vid = nv_ro08(bios, volt + 0x01) >> 2;
131 break;
132 case 0x40:
133 case 0x50:
134 break;
135 }
136 return volt;
137}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bus/hwsq.c b/drivers/gpu/drm/nouveau/core/subdev/bus/hwsq.c
new file mode 100644
index 000000000000..f757470e2284
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bus/hwsq.c
@@ -0,0 +1,145 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */
24
25#include <subdev/timer.h>
26#include <subdev/bus.h>
27
28struct nouveau_hwsq {
29 struct nouveau_bus *pbus;
30 u32 addr;
31 u32 data;
32 struct {
33 u8 data[512];
34 u8 size;
35 } c;
36};
37
38static void
39hwsq_cmd(struct nouveau_hwsq *hwsq, int size, u8 data[])
40{
41 memcpy(&hwsq->c.data[hwsq->c.size], data, size * sizeof(data[0]));
42 hwsq->c.size += size;
43}
44
45int
46nouveau_hwsq_init(struct nouveau_bus *pbus, struct nouveau_hwsq **phwsq)
47{
48 struct nouveau_hwsq *hwsq;
49
50 hwsq = *phwsq = kmalloc(sizeof(*hwsq), GFP_KERNEL);
51 if (hwsq) {
52 hwsq->pbus = pbus;
53 hwsq->addr = ~0;
54 hwsq->data = ~0;
55 memset(hwsq->c.data, 0x7f, sizeof(hwsq->c.data));
56 hwsq->c.size = 0;
57 }
58
59 return hwsq ? 0 : -ENOMEM;
60}
61
62int
63nouveau_hwsq_fini(struct nouveau_hwsq **phwsq, bool exec)
64{
65 struct nouveau_hwsq *hwsq = *phwsq;
66 int ret = 0, i;
67 if (hwsq) {
68 struct nouveau_bus *pbus = hwsq->pbus;
69 hwsq->c.size = (hwsq->c.size + 4) / 4;
70 if (hwsq->c.size <= pbus->hwsq_size) {
71 if (exec)
72 ret = pbus->hwsq_exec(pbus, (u32 *)hwsq->c.data,
73 hwsq->c.size);
74 if (ret)
75 nv_error(pbus, "hwsq exec failed: %d\n", ret);
76 } else {
77 nv_error(pbus, "hwsq ucode too large\n");
78 ret = -ENOSPC;
79 }
80
81 for (i = 0; ret && i < hwsq->c.size; i++)
82 nv_error(pbus, "\t0x%08x\n", ((u32 *)hwsq->c.data)[i]);
83
84 *phwsq = NULL;
85 kfree(hwsq);
86 }
87 return ret;
88}
89
90void
91nouveau_hwsq_wr32(struct nouveau_hwsq *hwsq, u32 addr, u32 data)
92{
93 nv_debug(hwsq->pbus, "R[%06x] = 0x%08x\n", addr, data);
94
95 if (hwsq->data != data) {
96 if ((data & 0xffff0000) != (hwsq->data & 0xffff0000)) {
97 hwsq_cmd(hwsq, 5, (u8[]){ 0xe2, data, data >> 8,
98 data >> 16, data >> 24 });
99 } else {
100 hwsq_cmd(hwsq, 3, (u8[]){ 0x42, data, data >> 8 });
101 }
102 }
103
104 if ((addr & 0xffff0000) != (hwsq->addr & 0xffff0000)) {
105 hwsq_cmd(hwsq, 5, (u8[]){ 0xe0, addr, addr >> 8,
106 addr >> 16, addr >> 24 });
107 } else {
108 hwsq_cmd(hwsq, 3, (u8[]){ 0x40, addr, addr >> 8 });
109 }
110
111 hwsq->addr = addr;
112 hwsq->data = data;
113}
114
115void
116nouveau_hwsq_setf(struct nouveau_hwsq *hwsq, u8 flag, int data)
117{
118 nv_debug(hwsq->pbus, " FLAG[%02x] = %d\n", flag, data);
119 flag += 0x80;
120 if (data >= 0)
121 flag += 0x20;
122 if (data >= 1)
123 flag += 0x20;
124 hwsq_cmd(hwsq, 1, (u8[]){ flag });
125}
126
127void
128nouveau_hwsq_wait(struct nouveau_hwsq *hwsq, u8 flag, u8 data)
129{
130 nv_debug(hwsq->pbus, " WAIT[%02x] = %d\n", flag, data);
131 hwsq_cmd(hwsq, 3, (u8[]){ 0x5f, flag, data });
132}
133
134void
135nouveau_hwsq_nsec(struct nouveau_hwsq *hwsq, u32 nsec)
136{
137 u8 shift = 0, usec = nsec / 1000;
138 while (usec & ~3) {
139 usec >>= 2;
140 shift++;
141 }
142
143 nv_debug(hwsq->pbus, " DELAY = %d ns\n", nsec);
144 hwsq_cmd(hwsq, 1, (u8[]){ 0x00 | (shift << 2) | usec });
145}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bus/hwsq.h b/drivers/gpu/drm/nouveau/core/subdev/bus/hwsq.h
new file mode 100644
index 000000000000..12176f9c1bc6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bus/hwsq.h
@@ -0,0 +1,113 @@
1#ifndef __NVKM_BUS_HWSQ_H__
2#define __NVKM_BUS_HWSQ_H__
3
4#include <subdev/bus.h>
5
6struct hwsq {
7 struct nouveau_subdev *subdev;
8 struct nouveau_hwsq *hwsq;
9 int sequence;
10};
11
12struct hwsq_reg {
13 int sequence;
14 bool force;
15 u32 addr[2];
16 u32 data;
17};
18
19static inline struct hwsq_reg
20hwsq_reg2(u32 addr1, u32 addr2)
21{
22 return (struct hwsq_reg) {
23 .sequence = 0,
24 .force = 0,
25 .addr = { addr1, addr2 },
26 .data = 0xdeadbeef,
27 };
28}
29
30static inline struct hwsq_reg
31hwsq_reg(u32 addr)
32{
33 return hwsq_reg2(addr, addr);
34}
35
36static inline int
37hwsq_init(struct hwsq *ram, struct nouveau_subdev *subdev)
38{
39 struct nouveau_bus *pbus = nouveau_bus(subdev);
40 int ret;
41
42 ret = nouveau_hwsq_init(pbus, &ram->hwsq);
43 if (ret)
44 return ret;
45
46 ram->sequence++;
47 ram->subdev = subdev;
48 return 0;
49}
50
51static inline int
52hwsq_exec(struct hwsq *ram, bool exec)
53{
54 int ret = 0;
55 if (ram->subdev) {
56 ret = nouveau_hwsq_fini(&ram->hwsq, exec);
57 ram->subdev = NULL;
58 }
59 return ret;
60}
61
62static inline u32
63hwsq_rd32(struct hwsq *ram, struct hwsq_reg *reg)
64{
65 if (reg->sequence != ram->sequence)
66 reg->data = nv_rd32(ram->subdev, reg->addr[0]);
67 return reg->data;
68}
69
70static inline void
71hwsq_wr32(struct hwsq *ram, struct hwsq_reg *reg, u32 data)
72{
73 reg->sequence = ram->sequence;
74 reg->data = data;
75 if (reg->addr[0] != reg->addr[1])
76 nouveau_hwsq_wr32(ram->hwsq, reg->addr[1], reg->data);
77 nouveau_hwsq_wr32(ram->hwsq, reg->addr[0], reg->data);
78}
79
80static inline void
81hwsq_nuke(struct hwsq *ram, struct hwsq_reg *reg)
82{
83 reg->force = true;
84}
85
86static inline u32
87hwsq_mask(struct hwsq *ram, struct hwsq_reg *reg, u32 mask, u32 data)
88{
89 u32 temp = hwsq_rd32(ram, reg);
90 if (temp != ((temp & ~mask) | data) || reg->force)
91 hwsq_wr32(ram, reg, (temp & ~mask) | data);
92 return temp;
93}
94
95static inline void
96hwsq_setf(struct hwsq *ram, u8 flag, int data)
97{
98 nouveau_hwsq_setf(ram->hwsq, flag, data);
99}
100
101static inline void
102hwsq_wait(struct hwsq *ram, u8 flag, u8 data)
103{
104 nouveau_hwsq_wait(ram->hwsq, flag, data);
105}
106
107static inline void
108hwsq_nsec(struct hwsq *ram, u32 nsec)
109{
110 nouveau_hwsq_nsec(ram->hwsq, nsec);
111}
112
113#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bus/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/bus/nv04.c
index 8c7f8057a185..23921b5351db 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bus/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bus/nv04.c
@@ -23,11 +23,7 @@
23 * Ben Skeggs 23 * Ben Skeggs
24 */ 24 */
25 25
26#include <subdev/bus.h> 26#include "nv04.h"
27
28struct nv04_bus_priv {
29 struct nouveau_bus base;
30};
31 27
32static void 28static void
33nv04_bus_intr(struct nouveau_subdev *subdev) 29nv04_bus_intr(struct nouveau_subdev *subdev)
@@ -56,10 +52,22 @@ nv04_bus_intr(struct nouveau_subdev *subdev)
56} 52}
57 53
58static int 54static int
55nv04_bus_init(struct nouveau_object *object)
56{
57 struct nv04_bus_priv *priv = (void *)object;
58
59 nv_wr32(priv, 0x001100, 0xffffffff);
60 nv_wr32(priv, 0x001140, 0x00000111);
61
62 return nouveau_bus_init(&priv->base);
63}
64
65int
59nv04_bus_ctor(struct nouveau_object *parent, struct nouveau_object *engine, 66nv04_bus_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
60 struct nouveau_oclass *oclass, void *data, u32 size, 67 struct nouveau_oclass *oclass, void *data, u32 size,
61 struct nouveau_object **pobject) 68 struct nouveau_object **pobject)
62{ 69{
70 struct nv04_bus_impl *impl = (void *)oclass;
63 struct nv04_bus_priv *priv; 71 struct nv04_bus_priv *priv;
64 int ret; 72 int ret;
65 73
@@ -68,28 +76,20 @@ nv04_bus_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
68 if (ret) 76 if (ret)
69 return ret; 77 return ret;
70 78
71 nv_subdev(priv)->intr = nv04_bus_intr; 79 nv_subdev(priv)->intr = impl->intr;
80 priv->base.hwsq_exec = impl->hwsq_exec;
81 priv->base.hwsq_size = impl->hwsq_size;
72 return 0; 82 return 0;
73} 83}
74 84
75static int 85struct nouveau_oclass *
76nv04_bus_init(struct nouveau_object *object) 86nv04_bus_oclass = &(struct nv04_bus_impl) {
77{ 87 .base.handle = NV_SUBDEV(BUS, 0x04),
78 struct nv04_bus_priv *priv = (void *)object; 88 .base.ofuncs = &(struct nouveau_ofuncs) {
79
80 nv_wr32(priv, 0x001100, 0xffffffff);
81 nv_wr32(priv, 0x001140, 0x00000111);
82
83 return nouveau_bus_init(&priv->base);
84}
85
86struct nouveau_oclass
87nv04_bus_oclass = {
88 .handle = NV_SUBDEV(BUS, 0x04),
89 .ofuncs = &(struct nouveau_ofuncs) {
90 .ctor = nv04_bus_ctor, 89 .ctor = nv04_bus_ctor,
91 .dtor = _nouveau_bus_dtor, 90 .dtor = _nouveau_bus_dtor,
92 .init = nv04_bus_init, 91 .init = nv04_bus_init,
93 .fini = _nouveau_bus_fini, 92 .fini = _nouveau_bus_fini,
94 }, 93 },
95}; 94 .intr = nv04_bus_intr,
95}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bus/nv04.h b/drivers/gpu/drm/nouveau/core/subdev/bus/nv04.h
new file mode 100644
index 000000000000..4d7602450a20
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bus/nv04.h
@@ -0,0 +1,23 @@
1#ifndef __NVKM_BUS_NV04_H__
2#define __NVKM_BUS_NV04_H__
3
4#include <subdev/bus.h>
5
6struct nv04_bus_priv {
7 struct nouveau_bus base;
8};
9
10int nv04_bus_ctor(struct nouveau_object *, struct nouveau_object *,
11 struct nouveau_oclass *, void *, u32,
12 struct nouveau_object **);
13int nv50_bus_init(struct nouveau_object *);
14void nv50_bus_intr(struct nouveau_subdev *);
15
16struct nv04_bus_impl {
17 struct nouveau_oclass base;
18 void (*intr)(struct nouveau_subdev *);
19 int (*hwsq_exec)(struct nouveau_bus *, u32 *, u32);
20 u32 hwsq_size;
21};
22
23#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bus/nv31.c b/drivers/gpu/drm/nouveau/core/subdev/bus/nv31.c
index 34132aef34e1..94da46f61627 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bus/nv31.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bus/nv31.c
@@ -23,11 +23,7 @@
23 * Ben Skeggs 23 * Ben Skeggs
24 */ 24 */
25 25
26#include <subdev/bus.h> 26#include "nv04.h"
27
28struct nv31_bus_priv {
29 struct nouveau_bus base;
30};
31 27
32static void 28static void
33nv31_bus_intr(struct nouveau_subdev *subdev) 29nv31_bus_intr(struct nouveau_subdev *subdev)
@@ -71,7 +67,7 @@ nv31_bus_intr(struct nouveau_subdev *subdev)
71static int 67static int
72nv31_bus_init(struct nouveau_object *object) 68nv31_bus_init(struct nouveau_object *object)
73{ 69{
74 struct nv31_bus_priv *priv = (void *)object; 70 struct nv04_bus_priv *priv = (void *)object;
75 int ret; 71 int ret;
76 72
77 ret = nouveau_bus_init(&priv->base); 73 ret = nouveau_bus_init(&priv->base);
@@ -83,30 +79,14 @@ nv31_bus_init(struct nouveau_object *object)
83 return 0; 79 return 0;
84} 80}
85 81
86static int 82struct nouveau_oclass *
87nv31_bus_ctor(struct nouveau_object *parent, struct nouveau_object *engine, 83nv31_bus_oclass = &(struct nv04_bus_impl) {
88 struct nouveau_oclass *oclass, void *data, u32 size, 84 .base.handle = NV_SUBDEV(BUS, 0x31),
89 struct nouveau_object **pobject) 85 .base.ofuncs = &(struct nouveau_ofuncs) {
90{ 86 .ctor = nv04_bus_ctor,
91 struct nv31_bus_priv *priv;
92 int ret;
93
94 ret = nouveau_bus_create(parent, engine, oclass, &priv);
95 *pobject = nv_object(priv);
96 if (ret)
97 return ret;
98
99 nv_subdev(priv)->intr = nv31_bus_intr;
100 return 0;
101}
102
103struct nouveau_oclass
104nv31_bus_oclass = {
105 .handle = NV_SUBDEV(BUS, 0x31),
106 .ofuncs = &(struct nouveau_ofuncs) {
107 .ctor = nv31_bus_ctor,
108 .dtor = _nouveau_bus_dtor, 87 .dtor = _nouveau_bus_dtor,
109 .init = nv31_bus_init, 88 .init = nv31_bus_init,
110 .fini = _nouveau_bus_fini, 89 .fini = _nouveau_bus_fini,
111 }, 90 },
112}; 91 .intr = nv31_bus_intr,
92}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bus/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/bus/nv50.c
index f5b2117fa8c6..11918f7e2aca 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bus/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bus/nv50.c
@@ -23,13 +23,27 @@
23 * Ben Skeggs 23 * Ben Skeggs
24 */ 24 */
25 25
26#include <subdev/bus.h> 26#include <subdev/timer.h>
27 27
28struct nv50_bus_priv { 28#include "nv04.h"
29 struct nouveau_bus base;
30};
31 29
32static void 30static int
31nv50_bus_hwsq_exec(struct nouveau_bus *pbus, u32 *data, u32 size)
32{
33 struct nv50_bus_priv *priv = (void *)pbus;
34 int i;
35
36 nv_mask(pbus, 0x001098, 0x00000008, 0x00000000);
37 nv_wr32(pbus, 0x001304, 0x00000000);
38 for (i = 0; i < size; i++)
39 nv_wr32(priv, 0x001400 + (i * 4), data[i]);
40 nv_mask(pbus, 0x001098, 0x00000018, 0x00000018);
41 nv_wr32(pbus, 0x00130c, 0x00000003);
42
43 return nv_wait(pbus, 0x001308, 0x00000100, 0x00000000) ? 0 : -ETIMEDOUT;
44}
45
46void
33nv50_bus_intr(struct nouveau_subdev *subdev) 47nv50_bus_intr(struct nouveau_subdev *subdev)
34{ 48{
35 struct nouveau_bus *pbus = nouveau_bus(subdev); 49 struct nouveau_bus *pbus = nouveau_bus(subdev);
@@ -61,10 +75,10 @@ nv50_bus_intr(struct nouveau_subdev *subdev)
61 } 75 }
62} 76}
63 77
64static int 78int
65nv50_bus_init(struct nouveau_object *object) 79nv50_bus_init(struct nouveau_object *object)
66{ 80{
67 struct nv50_bus_priv *priv = (void *)object; 81 struct nv04_bus_priv *priv = (void *)object;
68 int ret; 82 int ret;
69 83
70 ret = nouveau_bus_init(&priv->base); 84 ret = nouveau_bus_init(&priv->base);
@@ -76,30 +90,16 @@ nv50_bus_init(struct nouveau_object *object)
76 return 0; 90 return 0;
77} 91}
78 92
79static int 93struct nouveau_oclass *
80nv50_bus_ctor(struct nouveau_object *parent, struct nouveau_object *engine, 94nv50_bus_oclass = &(struct nv04_bus_impl) {
81 struct nouveau_oclass *oclass, void *data, u32 size, 95 .base.handle = NV_SUBDEV(BUS, 0x50),
82 struct nouveau_object **pobject) 96 .base.ofuncs = &(struct nouveau_ofuncs) {
83{ 97 .ctor = nv04_bus_ctor,
84 struct nv50_bus_priv *priv;
85 int ret;
86
87 ret = nouveau_bus_create(parent, engine, oclass, &priv);
88 *pobject = nv_object(priv);
89 if (ret)
90 return ret;
91
92 nv_subdev(priv)->intr = nv50_bus_intr;
93 return 0;
94}
95
96struct nouveau_oclass
97nv50_bus_oclass = {
98 .handle = NV_SUBDEV(BUS, 0x50),
99 .ofuncs = &(struct nouveau_ofuncs) {
100 .ctor = nv50_bus_ctor,
101 .dtor = _nouveau_bus_dtor, 98 .dtor = _nouveau_bus_dtor,
102 .init = nv50_bus_init, 99 .init = nv50_bus_init,
103 .fini = _nouveau_bus_fini, 100 .fini = _nouveau_bus_fini,
104 }, 101 },
105}; 102 .intr = nv50_bus_intr,
103 .hwsq_exec = nv50_bus_hwsq_exec,
104 .hwsq_size = 64,
105}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bus/nv94.c b/drivers/gpu/drm/nouveau/core/subdev/bus/nv94.c
new file mode 100644
index 000000000000..d3659055fa4b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bus/nv94.c
@@ -0,0 +1,59 @@
1/*
2 * Copyright 2012 Nouveau Community
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Martin Peres <martin.peres@labri.fr>
23 * Ben Skeggs
24 */
25
26#include <subdev/timer.h>
27
28#include "nv04.h"
29
30static int
31nv94_bus_hwsq_exec(struct nouveau_bus *pbus, u32 *data, u32 size)
32{
33 struct nv50_bus_priv *priv = (void *)pbus;
34 int i;
35
36 nv_mask(pbus, 0x001098, 0x00000008, 0x00000000);
37 nv_wr32(pbus, 0x001304, 0x00000000);
38 nv_wr32(pbus, 0x001318, 0x00000000);
39 for (i = 0; i < size; i++)
40 nv_wr32(priv, 0x080000 + (i * 4), data[i]);
41 nv_mask(pbus, 0x001098, 0x00000018, 0x00000018);
42 nv_wr32(pbus, 0x00130c, 0x00000001);
43
44 return nv_wait(pbus, 0x001308, 0x00000100, 0x00000000) ? 0 : -ETIMEDOUT;
45}
46
47struct nouveau_oclass *
48nv94_bus_oclass = &(struct nv04_bus_impl) {
49 .base.handle = NV_SUBDEV(BUS, 0x94),
50 .base.ofuncs = &(struct nouveau_ofuncs) {
51 .ctor = nv04_bus_ctor,
52 .dtor = _nouveau_bus_dtor,
53 .init = nv50_bus_init,
54 .fini = _nouveau_bus_fini,
55 },
56 .intr = nv50_bus_intr,
57 .hwsq_exec = nv94_bus_hwsq_exec,
58 .hwsq_size = 128,
59}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bus/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/bus/nvc0.c
index b192d6246363..73839d7151a7 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bus/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bus/nvc0.c
@@ -23,11 +23,7 @@
23 * Ben Skeggs 23 * Ben Skeggs
24 */ 24 */
25 25
26#include <subdev/bus.h> 26#include "nv04.h"
27
28struct nvc0_bus_priv {
29 struct nouveau_bus base;
30};
31 27
32static void 28static void
33nvc0_bus_intr(struct nouveau_subdev *subdev) 29nvc0_bus_intr(struct nouveau_subdev *subdev)
@@ -60,7 +56,7 @@ nvc0_bus_intr(struct nouveau_subdev *subdev)
60static int 56static int
61nvc0_bus_init(struct nouveau_object *object) 57nvc0_bus_init(struct nouveau_object *object)
62{ 58{
63 struct nvc0_bus_priv *priv = (void *)object; 59 struct nv04_bus_priv *priv = (void *)object;
64 int ret; 60 int ret;
65 61
66 ret = nouveau_bus_init(&priv->base); 62 ret = nouveau_bus_init(&priv->base);
@@ -72,30 +68,14 @@ nvc0_bus_init(struct nouveau_object *object)
72 return 0; 68 return 0;
73} 69}
74 70
75static int 71struct nouveau_oclass *
76nvc0_bus_ctor(struct nouveau_object *parent, struct nouveau_object *engine, 72nvc0_bus_oclass = &(struct nv04_bus_impl) {
77 struct nouveau_oclass *oclass, void *data, u32 size, 73 .base.handle = NV_SUBDEV(BUS, 0xc0),
78 struct nouveau_object **pobject) 74 .base.ofuncs = &(struct nouveau_ofuncs) {
79{ 75 .ctor = nv04_bus_ctor,
80 struct nvc0_bus_priv *priv;
81 int ret;
82
83 ret = nouveau_bus_create(parent, engine, oclass, &priv);
84 *pobject = nv_object(priv);
85 if (ret)
86 return ret;
87
88 nv_subdev(priv)->intr = nvc0_bus_intr;
89 return 0;
90}
91
92struct nouveau_oclass
93nvc0_bus_oclass = {
94 .handle = NV_SUBDEV(BUS, 0xc0),
95 .ofuncs = &(struct nouveau_ofuncs) {
96 .ctor = nvc0_bus_ctor,
97 .dtor = _nouveau_bus_dtor, 76 .dtor = _nouveau_bus_dtor,
98 .init = nvc0_bus_init, 77 .init = nvc0_bus_init,
99 .fini = _nouveau_bus_fini, 78 .fini = _nouveau_bus_fini,
100 }, 79 },
101}; 80 .intr = nvc0_bus_intr,
81}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/base.c b/drivers/gpu/drm/nouveau/core/subdev/clock/base.c
new file mode 100644
index 000000000000..e2938a21b06f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/base.c
@@ -0,0 +1,494 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/option.h>
26
27#include <subdev/clock.h>
28#include <subdev/therm.h>
29#include <subdev/volt.h>
30#include <subdev/fb.h>
31
32#include <subdev/bios.h>
33#include <subdev/bios/boost.h>
34#include <subdev/bios/cstep.h>
35#include <subdev/bios/perf.h>
36
37/******************************************************************************
38 * misc
39 *****************************************************************************/
40static u32
41nouveau_clock_adjust(struct nouveau_clock *clk, bool adjust,
42 u8 pstate, u8 domain, u32 input)
43{
44 struct nouveau_bios *bios = nouveau_bios(clk);
45 struct nvbios_boostE boostE;
46 u8 ver, hdr, cnt, len;
47 u16 data;
48
49 data = nvbios_boostEm(bios, pstate, &ver, &hdr, &cnt, &len, &boostE);
50 if (data) {
51 struct nvbios_boostS boostS;
52 u8 idx = 0, sver, shdr;
53 u16 subd;
54
55 input = max(boostE.min, input);
56 input = min(boostE.max, input);
57 do {
58 sver = ver;
59 shdr = hdr;
60 subd = nvbios_boostSp(bios, idx++, data, &sver, &shdr,
61 cnt, len, &boostS);
62 if (subd && boostS.domain == domain) {
63 if (adjust)
64 input = input * boostS.percent / 100;
65 input = max(boostS.min, input);
66 input = min(boostS.max, input);
67 break;
68 }
69 } while (subd);
70 }
71
72 return input;
73}
74
75/******************************************************************************
76 * C-States
77 *****************************************************************************/
78static int
79nouveau_cstate_prog(struct nouveau_clock *clk,
80 struct nouveau_pstate *pstate, int cstatei)
81{
82 struct nouveau_therm *ptherm = nouveau_therm(clk);
83 struct nouveau_volt *volt = nouveau_volt(clk);
84 struct nouveau_cstate *cstate;
85 int ret;
86
87 if (!list_empty(&pstate->list)) {
88 cstate = list_entry(pstate->list.prev, typeof(*cstate), head);
89 } else {
90 cstate = &pstate->base;
91 }
92
93 ret = nouveau_therm_cstate(ptherm, pstate->fanspeed, +1);
94 if (ret && ret != -ENODEV) {
95 nv_error(clk, "failed to raise fan speed: %d\n", ret);
96 return ret;
97 }
98
99 ret = volt->set_id(volt, cstate->voltage, +1);
100 if (ret && ret != -ENODEV) {
101 nv_error(clk, "failed to raise voltage: %d\n", ret);
102 return ret;
103 }
104
105 ret = clk->calc(clk, cstate);
106 if (ret == 0) {
107 ret = clk->prog(clk);
108 clk->tidy(clk);
109 }
110
111 ret = volt->set_id(volt, cstate->voltage, -1);
112 if (ret && ret != -ENODEV)
113 nv_error(clk, "failed to lower voltage: %d\n", ret);
114
115 ret = nouveau_therm_cstate(ptherm, pstate->fanspeed, -1);
116 if (ret && ret != -ENODEV)
117 nv_error(clk, "failed to lower fan speed: %d\n", ret);
118
119 return 0;
120}
121
122static void
123nouveau_cstate_del(struct nouveau_cstate *cstate)
124{
125 list_del(&cstate->head);
126 kfree(cstate);
127}
128
129static int
130nouveau_cstate_new(struct nouveau_clock *clk, int idx,
131 struct nouveau_pstate *pstate)
132{
133 struct nouveau_bios *bios = nouveau_bios(clk);
134 struct nouveau_clocks *domain = clk->domains;
135 struct nouveau_cstate *cstate = NULL;
136 struct nvbios_cstepX cstepX;
137 u8 ver, hdr;
138 u16 data;
139
140 data = nvbios_cstepXp(bios, idx, &ver, &hdr, &cstepX);
141 if (!data)
142 return -ENOENT;
143
144 cstate = kzalloc(sizeof(*cstate), GFP_KERNEL);
145 if (!cstate)
146 return -ENOMEM;
147
148 *cstate = pstate->base;
149 cstate->voltage = cstepX.voltage;
150
151 while (domain && domain->name != nv_clk_src_max) {
152 if (domain->flags & NVKM_CLK_DOM_FLAG_CORE) {
153 u32 freq = nouveau_clock_adjust(clk, true,
154 pstate->pstate,
155 domain->bios,
156 cstepX.freq);
157 cstate->domain[domain->name] = freq;
158 }
159 domain++;
160 }
161
162 list_add(&cstate->head, &pstate->list);
163 return 0;
164}
165
166/******************************************************************************
167 * P-States
168 *****************************************************************************/
169static int
170nouveau_pstate_prog(struct nouveau_clock *clk, int pstatei)
171{
172 struct nouveau_fb *pfb = nouveau_fb(clk);
173 struct nouveau_pstate *pstate;
174 int ret, idx = 0;
175
176 list_for_each_entry(pstate, &clk->states, head) {
177 if (idx++ == pstatei)
178 break;
179 }
180
181 nv_debug(clk, "setting performance state %d\n", pstatei);
182 clk->pstate = pstatei;
183
184 if (pfb->ram->calc) {
185 ret = pfb->ram->calc(pfb, pstate->base.domain[nv_clk_src_mem]);
186 if (ret == 0)
187 ret = pfb->ram->prog(pfb);
188 pfb->ram->tidy(pfb);
189 }
190
191 return nouveau_cstate_prog(clk, pstate, 0);
192}
193
194static int
195nouveau_pstate_calc(struct nouveau_clock *clk)
196{
197 int pstate, ret = 0;
198
199 nv_trace(clk, "P %d U %d A %d T %d D %d\n", clk->pstate,
200 clk->ustate, clk->astate, clk->tstate, clk->dstate);
201
202 if (clk->state_nr && clk->ustate != -1) {
203 pstate = (clk->ustate < 0) ? clk->astate : clk->ustate;
204 pstate = min(pstate, clk->state_nr - 1 - clk->tstate);
205 pstate = max(pstate, clk->dstate);
206 } else {
207 pstate = clk->pstate = -1;
208 }
209
210 nv_trace(clk, "-> %d\n", pstate);
211 if (pstate != clk->pstate)
212 ret = nouveau_pstate_prog(clk, pstate);
213 return ret;
214}
215
216static void
217nouveau_pstate_info(struct nouveau_clock *clk, struct nouveau_pstate *pstate)
218{
219 struct nouveau_clocks *clock = clk->domains - 1;
220 struct nouveau_cstate *cstate;
221 char info[3][32] = { "", "", "" };
222 char name[4] = "--";
223 int i = -1;
224
225 if (pstate->pstate != 0xff)
226 snprintf(name, sizeof(name), "%02x", pstate->pstate);
227
228 while ((++clock)->name != nv_clk_src_max) {
229 u32 lo = pstate->base.domain[clock->name];
230 u32 hi = lo;
231 if (hi == 0)
232 continue;
233
234 nv_debug(clk, "%02x: %10d KHz\n", clock->name, lo);
235 list_for_each_entry(cstate, &pstate->list, head) {
236 u32 freq = cstate->domain[clock->name];
237 lo = min(lo, freq);
238 hi = max(hi, freq);
239 nv_debug(clk, "%10d KHz\n", freq);
240 }
241
242 if (clock->mname && ++i < ARRAY_SIZE(info)) {
243 lo /= clock->mdiv;
244 hi /= clock->mdiv;
245 if (lo == hi) {
246 snprintf(info[i], sizeof(info[i]), "%s %d MHz",
247 clock->mname, lo);
248 } else {
249 snprintf(info[i], sizeof(info[i]),
250 "%s %d-%d MHz", clock->mname, lo, hi);
251 }
252 }
253 }
254
255 nv_info(clk, "%s: %s %s %s\n", name, info[0], info[1], info[2]);
256}
257
258static void
259nouveau_pstate_del(struct nouveau_pstate *pstate)
260{
261 struct nouveau_cstate *cstate, *temp;
262
263 list_for_each_entry_safe(cstate, temp, &pstate->list, head) {
264 nouveau_cstate_del(cstate);
265 }
266
267 list_del(&pstate->head);
268 kfree(pstate);
269}
270
271static int
272nouveau_pstate_new(struct nouveau_clock *clk, int idx)
273{
274 struct nouveau_bios *bios = nouveau_bios(clk);
275 struct nouveau_clocks *domain = clk->domains - 1;
276 struct nouveau_pstate *pstate;
277 struct nouveau_cstate *cstate;
278 struct nvbios_cstepE cstepE;
279 struct nvbios_perfE perfE;
280 u8 ver, hdr, cnt, len;
281 u16 data;
282
283 data = nvbios_perfEp(bios, idx, &ver, &hdr, &cnt, &len, &perfE);
284 if (!data)
285 return -EINVAL;
286 if (perfE.pstate == 0xff)
287 return 0;
288
289 pstate = kzalloc(sizeof(*pstate), GFP_KERNEL);
290 cstate = &pstate->base;
291 if (!pstate)
292 return -ENOMEM;
293
294 INIT_LIST_HEAD(&pstate->list);
295
296 pstate->pstate = perfE.pstate;
297 pstate->fanspeed = perfE.fanspeed;
298 cstate->voltage = perfE.voltage;
299 cstate->domain[nv_clk_src_core] = perfE.core;
300 cstate->domain[nv_clk_src_shader] = perfE.shader;
301 cstate->domain[nv_clk_src_mem] = perfE.memory;
302 cstate->domain[nv_clk_src_vdec] = perfE.vdec;
303 cstate->domain[nv_clk_src_dom6] = perfE.disp;
304
305 while (ver >= 0x40 && (++domain)->name != nv_clk_src_max) {
306 struct nvbios_perfS perfS;
307 u8 sver = ver, shdr = hdr;
308 u32 perfSe = nvbios_perfSp(bios, data, domain->bios,
309 &sver, &shdr, cnt, len, &perfS);
310 if (perfSe == 0 || sver != 0x40)
311 continue;
312
313 if (domain->flags & NVKM_CLK_DOM_FLAG_CORE) {
314 perfS.v40.freq = nouveau_clock_adjust(clk, false,
315 pstate->pstate,
316 domain->bios,
317 perfS.v40.freq);
318 }
319
320 cstate->domain[domain->name] = perfS.v40.freq;
321 }
322
323 data = nvbios_cstepEm(bios, pstate->pstate, &ver, &hdr, &cstepE);
324 if (data) {
325 int idx = cstepE.index;
326 do {
327 nouveau_cstate_new(clk, idx, pstate);
328 } while(idx--);
329 }
330
331 nouveau_pstate_info(clk, pstate);
332 list_add_tail(&pstate->head, &clk->states);
333 clk->state_nr++;
334 return 0;
335}
336
337/******************************************************************************
338 * Adjustment triggers
339 *****************************************************************************/
340static int
341nouveau_clock_ustate_update(struct nouveau_clock *clk, int req)
342{
343 struct nouveau_pstate *pstate;
344 int i = 0;
345
346 /* YKW repellant */
347 return -ENOSYS;
348
349 if (req != -1 && req != -2) {
350 list_for_each_entry(pstate, &clk->states, head) {
351 if (pstate->pstate == req)
352 break;
353 i++;
354 }
355
356 if (pstate->pstate != req)
357 return -EINVAL;
358 req = i;
359 }
360
361 clk->ustate = req;
362 return 0;
363}
364
365int
366nouveau_clock_ustate(struct nouveau_clock *clk, int req)
367{
368 int ret = nouveau_clock_ustate_update(clk, req);
369 if (ret)
370 return ret;
371 return nouveau_pstate_calc(clk);
372}
373
374int
375nouveau_clock_astate(struct nouveau_clock *clk, int req, int rel)
376{
377 if (!rel) clk->astate = req;
378 if ( rel) clk->astate += rel;
379 clk->astate = min(clk->astate, clk->state_nr - 1);
380 clk->astate = max(clk->astate, 0);
381 return nouveau_pstate_calc(clk);
382}
383
384int
385nouveau_clock_tstate(struct nouveau_clock *clk, int req, int rel)
386{
387 if (!rel) clk->tstate = req;
388 if ( rel) clk->tstate += rel;
389 clk->tstate = min(clk->tstate, 0);
390 clk->tstate = max(clk->tstate, -(clk->state_nr - 1));
391 return nouveau_pstate_calc(clk);
392}
393
394int
395nouveau_clock_dstate(struct nouveau_clock *clk, int req, int rel)
396{
397 if (!rel) clk->dstate = req;
398 if ( rel) clk->dstate += rel;
399 clk->dstate = min(clk->dstate, clk->state_nr - 1);
400 clk->dstate = max(clk->dstate, 0);
401 return nouveau_pstate_calc(clk);
402}
403
404/******************************************************************************
405 * subdev base class implementation
406 *****************************************************************************/
407int
408_nouveau_clock_init(struct nouveau_object *object)
409{
410 struct nouveau_clock *clk = (void *)object;
411 struct nouveau_clocks *clock = clk->domains;
412 int ret;
413
414 memset(&clk->bstate, 0x00, sizeof(clk->bstate));
415 INIT_LIST_HEAD(&clk->bstate.list);
416 clk->bstate.pstate = 0xff;
417
418 while (clock->name != nv_clk_src_max) {
419 ret = clk->read(clk, clock->name);
420 if (ret < 0) {
421 nv_error(clk, "%02x freq unknown\n", clock->name);
422 return ret;
423 }
424 clk->bstate.base.domain[clock->name] = ret;
425 clock++;
426 }
427
428 nouveau_pstate_info(clk, &clk->bstate);
429
430 clk->astate = clk->state_nr - 1;
431 clk->tstate = 0;
432 clk->dstate = 0;
433 clk->pstate = -1;
434 nouveau_pstate_calc(clk);
435 return 0;
436}
437
438void
439_nouveau_clock_dtor(struct nouveau_object *object)
440{
441 struct nouveau_clock *clk = (void *)object;
442 struct nouveau_pstate *pstate, *temp;
443
444 list_for_each_entry_safe(pstate, temp, &clk->states, head) {
445 nouveau_pstate_del(pstate);
446 }
447
448 nouveau_subdev_destroy(&clk->base);
449}
450
451int
452nouveau_clock_create_(struct nouveau_object *parent,
453 struct nouveau_object *engine,
454 struct nouveau_oclass *oclass,
455 struct nouveau_clocks *clocks,
456 int length, void **object)
457{
458 struct nouveau_device *device = nv_device(parent);
459 struct nouveau_clock *clk;
460 int ret, idx, arglen;
461 const char *mode;
462
463 ret = nouveau_subdev_create_(parent, engine, oclass, 0, "CLK",
464 "clock", length, object);
465 clk = *object;
466 if (ret)
467 return ret;
468
469 INIT_LIST_HEAD(&clk->states);
470 clk->domains = clocks;
471 clk->ustate = -1;
472
473 idx = 0;
474 do {
475 ret = nouveau_pstate_new(clk, idx++);
476 } while (ret == 0);
477
478 mode = nouveau_stropt(device->cfgopt, "NvClkMode", &arglen);
479 if (mode) {
480 if (!strncasecmpz(mode, "disabled", arglen)) {
481 clk->ustate = -1;
482 } else {
483 char save = mode[arglen];
484 long v;
485
486 ((char *)mode)[arglen] = '\0';
487 if (!kstrtol(mode, 0, &v))
488 nouveau_clock_ustate_update(clk, v);
489 ((char *)mode)[arglen] = save;
490 }
491 }
492
493 return 0;
494}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c
index a14277586595..da50c1b12928 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c
@@ -77,7 +77,7 @@ nv04_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
77 struct nv04_clock_priv *priv; 77 struct nv04_clock_priv *priv;
78 int ret; 78 int ret;
79 79
80 ret = nouveau_clock_create(parent, engine, oclass, &priv); 80 ret = nouveau_clock_create(parent, engine, oclass, NULL, &priv);
81 *pobject = nv_object(priv); 81 *pobject = nv_object(priv);
82 if (ret) 82 if (ret)
83 return ret; 83 return ret;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nv40.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nv40.c
index 0db5dbfd91b5..db7346f79080 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nv40.c
@@ -23,11 +23,188 @@
23 */ 23 */
24 24
25#include <subdev/clock.h> 25#include <subdev/clock.h>
26#include <subdev/bios.h>
27#include <subdev/bios/pll.h>
28
29#include "pll.h"
26 30
27struct nv40_clock_priv { 31struct nv40_clock_priv {
28 struct nouveau_clock base; 32 struct nouveau_clock base;
33 u32 ctrl;
34 u32 npll_ctrl;
35 u32 npll_coef;
36 u32 spll;
37};
38
39static struct nouveau_clocks
40nv40_domain[] = {
41 { nv_clk_src_crystal, 0xff },
42 { nv_clk_src_href , 0xff },
43 { nv_clk_src_core , 0xff, 0, "core", 1000 },
44 { nv_clk_src_shader , 0xff, 0, "shader", 1000 },
45 { nv_clk_src_mem , 0xff, 0, "memory", 1000 },
46 { nv_clk_src_max }
29}; 47};
30 48
49static u32
50read_pll_1(struct nv40_clock_priv *priv, u32 reg)
51{
52 u32 ctrl = nv_rd32(priv, reg + 0x00);
53 int P = (ctrl & 0x00070000) >> 16;
54 int N = (ctrl & 0x0000ff00) >> 8;
55 int M = (ctrl & 0x000000ff) >> 0;
56 u32 ref = 27000, clk = 0;
57
58 if (ctrl & 0x80000000)
59 clk = ref * N / M;
60
61 return clk >> P;
62}
63
64static u32
65read_pll_2(struct nv40_clock_priv *priv, u32 reg)
66{
67 u32 ctrl = nv_rd32(priv, reg + 0x00);
68 u32 coef = nv_rd32(priv, reg + 0x04);
69 int N2 = (coef & 0xff000000) >> 24;
70 int M2 = (coef & 0x00ff0000) >> 16;
71 int N1 = (coef & 0x0000ff00) >> 8;
72 int M1 = (coef & 0x000000ff) >> 0;
73 int P = (ctrl & 0x00070000) >> 16;
74 u32 ref = 27000, clk = 0;
75
76 if ((ctrl & 0x80000000) && M1) {
77 clk = ref * N1 / M1;
78 if ((ctrl & 0x40000100) == 0x40000000) {
79 if (M2)
80 clk = clk * N2 / M2;
81 else
82 clk = 0;
83 }
84 }
85
86 return clk >> P;
87}
88
89static u32
90read_clk(struct nv40_clock_priv *priv, u32 src)
91{
92 switch (src) {
93 case 3:
94 return read_pll_2(priv, 0x004000);
95 case 2:
96 return read_pll_1(priv, 0x004008);
97 default:
98 break;
99 }
100
101 return 0;
102}
103
104static int
105nv40_clock_read(struct nouveau_clock *clk, enum nv_clk_src src)
106{
107 struct nv40_clock_priv *priv = (void *)clk;
108 u32 mast = nv_rd32(priv, 0x00c040);
109
110 switch (src) {
111 case nv_clk_src_crystal:
112 return nv_device(priv)->crystal;
113 case nv_clk_src_href:
114 return 100000; /*XXX: PCIE/AGP differ*/
115 case nv_clk_src_core:
116 return read_clk(priv, (mast & 0x00000003) >> 0);
117 case nv_clk_src_shader:
118 return read_clk(priv, (mast & 0x00000030) >> 4);
119 case nv_clk_src_mem:
120 return read_pll_2(priv, 0x4020);
121 default:
122 break;
123 }
124
125 nv_debug(priv, "unknown clock source %d 0x%08x\n", src, mast);
126 return -EINVAL;
127}
128
129static int
130nv40_clock_calc_pll(struct nv40_clock_priv *priv, u32 reg, u32 clk,
131 int *N1, int *M1, int *N2, int *M2, int *log2P)
132{
133 struct nouveau_bios *bios = nouveau_bios(priv);
134 struct nvbios_pll pll;
135 int ret;
136
137 ret = nvbios_pll_parse(bios, reg, &pll);
138 if (ret)
139 return ret;
140
141 if (clk < pll.vco1.max_freq)
142 pll.vco2.max_freq = 0;
143
144 ret = nv04_pll_calc(nv_subdev(priv), &pll, clk, N1, M1, N2, M2, log2P);
145 if (ret == 0)
146 return -ERANGE;
147 return ret;
148}
149
150static int
151nv40_clock_calc(struct nouveau_clock *clk, struct nouveau_cstate *cstate)
152{
153 struct nv40_clock_priv *priv = (void *)clk;
154 int gclk = cstate->domain[nv_clk_src_core];
155 int sclk = cstate->domain[nv_clk_src_shader];
156 int N1, M1, N2, M2, log2P;
157 int ret;
158
159 /* core/geometric clock */
160 ret = nv40_clock_calc_pll(priv, 0x004000, gclk,
161 &N1, &M1, &N2, &M2, &log2P);
162 if (ret < 0)
163 return ret;
164
165 if (N2 == M2) {
166 priv->npll_ctrl = 0x80000100 | (log2P << 16);
167 priv->npll_coef = (N1 << 8) | M1;
168 } else {
169 priv->npll_ctrl = 0xc0000000 | (log2P << 16);
170 priv->npll_coef = (N2 << 24) | (M2 << 16) | (N1 << 8) | M1;
171 }
172
173 /* use the second pll for shader/rop clock, if it differs from core */
174 if (sclk && sclk != gclk) {
175 ret = nv40_clock_calc_pll(priv, 0x004008, sclk,
176 &N1, &M1, NULL, NULL, &log2P);
177 if (ret < 0)
178 return ret;
179
180 priv->spll = 0xc0000000 | (log2P << 16) | (N1 << 8) | M1;
181 priv->ctrl = 0x00000223;
182 } else {
183 priv->spll = 0x00000000;
184 priv->ctrl = 0x00000333;
185 }
186
187 return 0;
188}
189
190static int
191nv40_clock_prog(struct nouveau_clock *clk)
192{
193 struct nv40_clock_priv *priv = (void *)clk;
194 nv_mask(priv, 0x00c040, 0x00000333, 0x00000000);
195 nv_wr32(priv, 0x004004, priv->npll_coef);
196 nv_mask(priv, 0x004000, 0xc0070100, priv->npll_ctrl);
197 nv_mask(priv, 0x004008, 0xc007ffff, priv->spll);
198 mdelay(5);
199 nv_mask(priv, 0x00c040, 0x00000333, priv->ctrl);
200 return 0;
201}
202
203static void
204nv40_clock_tidy(struct nouveau_clock *clk)
205{
206}
207
31static int 208static int
32nv40_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine, 209nv40_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
33 struct nouveau_oclass *oclass, void *data, u32 size, 210 struct nouveau_oclass *oclass, void *data, u32 size,
@@ -36,13 +213,17 @@ nv40_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
36 struct nv40_clock_priv *priv; 213 struct nv40_clock_priv *priv;
37 int ret; 214 int ret;
38 215
39 ret = nouveau_clock_create(parent, engine, oclass, &priv); 216 ret = nouveau_clock_create(parent, engine, oclass, nv40_domain, &priv);
40 *pobject = nv_object(priv); 217 *pobject = nv_object(priv);
41 if (ret) 218 if (ret)
42 return ret; 219 return ret;
43 220
44 priv->base.pll_calc = nv04_clock_pll_calc; 221 priv->base.pll_calc = nv04_clock_pll_calc;
45 priv->base.pll_prog = nv04_clock_pll_prog; 222 priv->base.pll_prog = nv04_clock_pll_prog;
223 priv->base.read = nv40_clock_read;
224 priv->base.calc = nv40_clock_calc;
225 priv->base.prog = nv40_clock_prog;
226 priv->base.tidy = nv40_clock_tidy;
46 return 0; 227 return 0;
47} 228}
48 229
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nv50.c
index d09d3e78040c..250a6d96016b 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nv50.c
@@ -22,40 +22,538 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include <subdev/clock.h>
26#include <subdev/bios.h> 25#include <subdev/bios.h>
27#include <subdev/bios/pll.h> 26#include <subdev/bios/pll.h>
28 27
28#include "nv50.h"
29#include "pll.h" 29#include "pll.h"
30#include "seq.h"
30 31
31struct nv50_clock_priv { 32static u32
32 struct nouveau_clock base; 33read_div(struct nv50_clock_priv *priv)
33}; 34{
35 switch (nv_device(priv)->chipset) {
36 case 0x50: /* it exists, but only has bit 31, not the dividers.. */
37 case 0x84:
38 case 0x86:
39 case 0x98:
40 case 0xa0:
41 return nv_rd32(priv, 0x004700);
42 case 0x92:
43 case 0x94:
44 case 0x96:
45 return nv_rd32(priv, 0x004800);
46 default:
47 return 0x00000000;
48 }
49}
50
51static u32
52read_pll_src(struct nv50_clock_priv *priv, u32 base)
53{
54 struct nouveau_clock *clk = &priv->base;
55 u32 coef, ref = clk->read(clk, nv_clk_src_crystal);
56 u32 rsel = nv_rd32(priv, 0x00e18c);
57 int P, N, M, id;
58
59 switch (nv_device(priv)->chipset) {
60 case 0x50:
61 case 0xa0:
62 switch (base) {
63 case 0x4020:
64 case 0x4028: id = !!(rsel & 0x00000004); break;
65 case 0x4008: id = !!(rsel & 0x00000008); break;
66 case 0x4030: id = 0; break;
67 default:
68 nv_error(priv, "ref: bad pll 0x%06x\n", base);
69 return 0;
70 }
71
72 coef = nv_rd32(priv, 0x00e81c + (id * 0x0c));
73 ref *= (coef & 0x01000000) ? 2 : 4;
74 P = (coef & 0x00070000) >> 16;
75 N = ((coef & 0x0000ff00) >> 8) + 1;
76 M = ((coef & 0x000000ff) >> 0) + 1;
77 break;
78 case 0x84:
79 case 0x86:
80 case 0x92:
81 coef = nv_rd32(priv, 0x00e81c);
82 P = (coef & 0x00070000) >> 16;
83 N = (coef & 0x0000ff00) >> 8;
84 M = (coef & 0x000000ff) >> 0;
85 break;
86 case 0x94:
87 case 0x96:
88 case 0x98:
89 rsel = nv_rd32(priv, 0x00c050);
90 switch (base) {
91 case 0x4020: rsel = (rsel & 0x00000003) >> 0; break;
92 case 0x4008: rsel = (rsel & 0x0000000c) >> 2; break;
93 case 0x4028: rsel = (rsel & 0x00001800) >> 11; break;
94 case 0x4030: rsel = 3; break;
95 default:
96 nv_error(priv, "ref: bad pll 0x%06x\n", base);
97 return 0;
98 }
99
100 switch (rsel) {
101 case 0: id = 1; break;
102 case 1: return clk->read(clk, nv_clk_src_crystal);
103 case 2: return clk->read(clk, nv_clk_src_href);
104 case 3: id = 0; break;
105 }
106
107 coef = nv_rd32(priv, 0x00e81c + (id * 0x28));
108 P = (nv_rd32(priv, 0x00e824 + (id * 0x28)) >> 16) & 7;
109 P += (coef & 0x00070000) >> 16;
110 N = (coef & 0x0000ff00) >> 8;
111 M = (coef & 0x000000ff) >> 0;
112 break;
113 default:
114 BUG_ON(1);
115 }
116
117 if (M)
118 return (ref * N / M) >> P;
119 return 0;
120}
121
122static u32
123read_pll_ref(struct nv50_clock_priv *priv, u32 base)
124{
125 struct nouveau_clock *clk = &priv->base;
126 u32 src, mast = nv_rd32(priv, 0x00c040);
127
128 switch (base) {
129 case 0x004028:
130 src = !!(mast & 0x00200000);
131 break;
132 case 0x004020:
133 src = !!(mast & 0x00400000);
134 break;
135 case 0x004008:
136 src = !!(mast & 0x00010000);
137 break;
138 case 0x004030:
139 src = !!(mast & 0x02000000);
140 break;
141 case 0x00e810:
142 return clk->read(clk, nv_clk_src_crystal);
143 default:
144 nv_error(priv, "bad pll 0x%06x\n", base);
145 return 0;
146 }
147
148 if (src)
149 return clk->read(clk, nv_clk_src_href);
150 return read_pll_src(priv, base);
151}
152
153static u32
154read_pll(struct nv50_clock_priv *priv, u32 base)
155{
156 struct nouveau_clock *clk = &priv->base;
157 u32 mast = nv_rd32(priv, 0x00c040);
158 u32 ctrl = nv_rd32(priv, base + 0);
159 u32 coef = nv_rd32(priv, base + 4);
160 u32 ref = read_pll_ref(priv, base);
161 u32 freq = 0;
162 int N1, N2, M1, M2;
163
164 if (base == 0x004028 && (mast & 0x00100000)) {
165 /* wtf, appears to only disable post-divider on nva0 */
166 if (nv_device(priv)->chipset != 0xa0)
167 return clk->read(clk, nv_clk_src_dom6);
168 }
169
170 N2 = (coef & 0xff000000) >> 24;
171 M2 = (coef & 0x00ff0000) >> 16;
172 N1 = (coef & 0x0000ff00) >> 8;
173 M1 = (coef & 0x000000ff);
174 if ((ctrl & 0x80000000) && M1) {
175 freq = ref * N1 / M1;
176 if ((ctrl & 0x40000100) == 0x40000000) {
177 if (M2)
178 freq = freq * N2 / M2;
179 else
180 freq = 0;
181 }
182 }
183
184 return freq;
185}
34 186
35static int 187static int
188nv50_clock_read(struct nouveau_clock *clk, enum nv_clk_src src)
189{
190 struct nv50_clock_priv *priv = (void *)clk;
191 u32 mast = nv_rd32(priv, 0x00c040);
192 u32 P = 0;
193
194 switch (src) {
195 case nv_clk_src_crystal:
196 return nv_device(priv)->crystal;
197 case nv_clk_src_href:
198 return 100000; /* PCIE reference clock */
199 case nv_clk_src_hclk:
200 return div_u64((u64)clk->read(clk, nv_clk_src_href) * 27778, 10000);
201 case nv_clk_src_hclkm3:
202 return clk->read(clk, nv_clk_src_hclk) * 3;
203 case nv_clk_src_hclkm3d2:
204 return clk->read(clk, nv_clk_src_hclk) * 3 / 2;
205 case nv_clk_src_host:
206 switch (mast & 0x30000000) {
207 case 0x00000000: return clk->read(clk, nv_clk_src_href);
208 case 0x10000000: break;
209 case 0x20000000: /* !0x50 */
210 case 0x30000000: return clk->read(clk, nv_clk_src_hclk);
211 }
212 break;
213 case nv_clk_src_core:
214 if (!(mast & 0x00100000))
215 P = (nv_rd32(priv, 0x004028) & 0x00070000) >> 16;
216 switch (mast & 0x00000003) {
217 case 0x00000000: return clk->read(clk, nv_clk_src_crystal) >> P;
218 case 0x00000001: return clk->read(clk, nv_clk_src_dom6);
219 case 0x00000002: return read_pll(priv, 0x004020) >> P;
220 case 0x00000003: return read_pll(priv, 0x004028) >> P;
221 }
222 break;
223 case nv_clk_src_shader:
224 P = (nv_rd32(priv, 0x004020) & 0x00070000) >> 16;
225 switch (mast & 0x00000030) {
226 case 0x00000000:
227 if (mast & 0x00000080)
228 return clk->read(clk, nv_clk_src_host) >> P;
229 return clk->read(clk, nv_clk_src_crystal) >> P;
230 case 0x00000010: break;
231 case 0x00000020: return read_pll(priv, 0x004028) >> P;
232 case 0x00000030: return read_pll(priv, 0x004020) >> P;
233 }
234 break;
235 case nv_clk_src_mem:
236 P = (nv_rd32(priv, 0x004008) & 0x00070000) >> 16;
237 if (nv_rd32(priv, 0x004008) & 0x00000200) {
238 switch (mast & 0x0000c000) {
239 case 0x00000000:
240 return clk->read(clk, nv_clk_src_crystal) >> P;
241 case 0x00008000:
242 case 0x0000c000:
243 return clk->read(clk, nv_clk_src_href) >> P;
244 }
245 } else {
246 return read_pll(priv, 0x004008) >> P;
247 }
248 break;
249 case nv_clk_src_vdec:
250 P = (read_div(priv) & 0x00000700) >> 8;
251 switch (nv_device(priv)->chipset) {
252 case 0x84:
253 case 0x86:
254 case 0x92:
255 case 0x94:
256 case 0x96:
257 case 0xa0:
258 switch (mast & 0x00000c00) {
259 case 0x00000000:
260 if (nv_device(priv)->chipset == 0xa0) /* wtf?? */
261 return clk->read(clk, nv_clk_src_core) >> P;
262 return clk->read(clk, nv_clk_src_crystal) >> P;
263 case 0x00000400:
264 return 0;
265 case 0x00000800:
266 if (mast & 0x01000000)
267 return read_pll(priv, 0x004028) >> P;
268 return read_pll(priv, 0x004030) >> P;
269 case 0x00000c00:
270 return clk->read(clk, nv_clk_src_core) >> P;
271 }
272 break;
273 case 0x98:
274 switch (mast & 0x00000c00) {
275 case 0x00000000:
276 return clk->read(clk, nv_clk_src_core) >> P;
277 case 0x00000400:
278 return 0;
279 case 0x00000800:
280 return clk->read(clk, nv_clk_src_hclkm3d2) >> P;
281 case 0x00000c00:
282 return clk->read(clk, nv_clk_src_mem) >> P;
283 }
284 break;
285 }
286 break;
287 case nv_clk_src_dom6:
288 switch (nv_device(priv)->chipset) {
289 case 0x50:
290 case 0xa0:
291 return read_pll(priv, 0x00e810) >> 2;
292 case 0x84:
293 case 0x86:
294 case 0x92:
295 case 0x94:
296 case 0x96:
297 case 0x98:
298 P = (read_div(priv) & 0x00000007) >> 0;
299 switch (mast & 0x0c000000) {
300 case 0x00000000: return clk->read(clk, nv_clk_src_href);
301 case 0x04000000: break;
302 case 0x08000000: return clk->read(clk, nv_clk_src_hclk);
303 case 0x0c000000:
304 return clk->read(clk, nv_clk_src_hclkm3) >> P;
305 }
306 break;
307 default:
308 break;
309 }
310 default:
311 break;
312 }
313
314 nv_debug(priv, "unknown clock source %d 0x%08x\n", src, mast);
315 return -EINVAL;
316}
317
318static u32
319calc_pll(struct nv50_clock_priv *priv, u32 reg, u32 clk, int *N, int *M, int *P)
320{
321 struct nouveau_bios *bios = nouveau_bios(priv);
322 struct nvbios_pll pll;
323 int ret;
324
325 ret = nvbios_pll_parse(bios, reg, &pll);
326 if (ret)
327 return 0;
328
329 pll.vco2.max_freq = 0;
330 pll.refclk = read_pll_ref(priv, reg);
331 if (!pll.refclk)
332 return 0;
333
334 return nv04_pll_calc(nv_subdev(priv), &pll, clk, N, M, NULL, NULL, P);
335}
336
337static inline u32
338calc_div(u32 src, u32 target, int *div)
339{
340 u32 clk0 = src, clk1 = src;
341 for (*div = 0; *div <= 7; (*div)++) {
342 if (clk0 <= target) {
343 clk1 = clk0 << (*div ? 1 : 0);
344 break;
345 }
346 clk0 >>= 1;
347 }
348
349 if (target - clk0 <= clk1 - target)
350 return clk0;
351 (*div)--;
352 return clk1;
353}
354
355static inline u32
356clk_same(u32 a, u32 b)
357{
358 return ((a / 1000) == (b / 1000));
359}
360
361static int
362nv50_clock_calc(struct nouveau_clock *clk, struct nouveau_cstate *cstate)
363{
364 struct nv50_clock_priv *priv = (void *)clk;
365 struct nv50_clock_hwsq *hwsq = &priv->hwsq;
366 const int shader = cstate->domain[nv_clk_src_shader];
367 const int core = cstate->domain[nv_clk_src_core];
368 const int vdec = cstate->domain[nv_clk_src_vdec];
369 const int dom6 = cstate->domain[nv_clk_src_dom6];
370 u32 mastm = 0, mastv = 0;
371 u32 divsm = 0, divsv = 0;
372 int N, M, P1, P2;
373 int freq, out;
374
375 /* prepare a hwsq script from which we'll perform the reclock */
376 out = clk_init(hwsq, nv_subdev(clk));
377 if (out)
378 return out;
379
380 clk_wr32(hwsq, fifo, 0x00000001); /* block fifo */
381 clk_nsec(hwsq, 8000);
382 clk_setf(hwsq, 0x10, 0x00); /* disable fb */
383 clk_wait(hwsq, 0x00, 0x01); /* wait for fb disabled */
384
385 /* vdec: avoid modifying xpll until we know exactly how the other
386 * clock domains work, i suspect at least some of them can also be
387 * tied to xpll...
388 */
389 if (vdec) {
390 /* see how close we can get using nvclk as a source */
391 freq = calc_div(core, vdec, &P1);
392
393 /* see how close we can get using xpll/hclk as a source */
394 if (nv_device(priv)->chipset != 0x98)
395 out = read_pll(priv, 0x004030);
396 else
397 out = clk->read(clk, nv_clk_src_hclkm3d2);
398 out = calc_div(out, vdec, &P2);
399
400 /* select whichever gets us closest */
401 if (abs(vdec - freq) <= abs(vdec - out)) {
402 if (nv_device(priv)->chipset != 0x98)
403 mastv |= 0x00000c00;
404 divsv |= P1 << 8;
405 } else {
406 mastv |= 0x00000800;
407 divsv |= P2 << 8;
408 }
409
410 mastm |= 0x00000c00;
411 divsm |= 0x00000700;
412 }
413
414 /* dom6: nfi what this is, but we're limited to various combinations
415 * of the host clock frequency
416 */
417 if (dom6) {
418 if (clk_same(dom6, clk->read(clk, nv_clk_src_href))) {
419 mastv |= 0x00000000;
420 } else
421 if (clk_same(dom6, clk->read(clk, nv_clk_src_hclk))) {
422 mastv |= 0x08000000;
423 } else {
424 freq = clk->read(clk, nv_clk_src_hclk) * 3;
425 freq = calc_div(freq, dom6, &P1);
426
427 mastv |= 0x0c000000;
428 divsv |= P1;
429 }
430
431 mastm |= 0x0c000000;
432 divsm |= 0x00000007;
433 }
434
435 /* vdec/dom6: switch to "safe" clocks temporarily, update dividers
436 * and then switch to target clocks
437 */
438 clk_mask(hwsq, mast, mastm, 0x00000000);
439 clk_mask(hwsq, divs, divsm, divsv);
440 clk_mask(hwsq, mast, mastm, mastv);
441
442 /* core/shader: disconnect nvclk/sclk from their PLLs (nvclk to dom6,
443 * sclk to hclk) before reprogramming
444 */
445 if (nv_device(priv)->chipset < 0x92)
446 clk_mask(hwsq, mast, 0x001000b0, 0x00100080);
447 else
448 clk_mask(hwsq, mast, 0x000000b3, 0x00000081);
449
450 /* core: for the moment at least, always use nvpll */
451 freq = calc_pll(priv, 0x4028, core, &N, &M, &P1);
452 if (freq == 0)
453 return -ERANGE;
454
455 clk_mask(hwsq, nvpll[0], 0xc03f0100,
456 0x80000000 | (P1 << 19) | (P1 << 16));
457 clk_mask(hwsq, nvpll[1], 0x0000ffff, (N << 8) | M);
458
459 /* shader: tie to nvclk if possible, otherwise use spll. have to be
460 * very careful that the shader clock is at least twice the core, or
461 * some chipsets will be very unhappy. i expect most or all of these
462 * cases will be handled by tying to nvclk, but it's possible there's
463 * corners
464 */
465 if (P1-- && shader == (core << 1)) {
466 clk_mask(hwsq, spll[0], 0xc03f0100, (P1 << 19) | (P1 << 16));
467 clk_mask(hwsq, mast, 0x00100033, 0x00000023);
468 } else {
469 freq = calc_pll(priv, 0x4020, shader, &N, &M, &P1);
470 if (freq == 0)
471 return -ERANGE;
472
473 clk_mask(hwsq, spll[0], 0xc03f0100,
474 0x80000000 | (P1 << 19) | (P1 << 16));
475 clk_mask(hwsq, spll[1], 0x0000ffff, (N << 8) | M);
476 clk_mask(hwsq, mast, 0x00100033, 0x00000033);
477 }
478
479 /* restore normal operation */
480 clk_setf(hwsq, 0x10, 0x01); /* enable fb */
481 clk_wait(hwsq, 0x00, 0x00); /* wait for fb enabled */
482 clk_wr32(hwsq, fifo, 0x00000000); /* un-block fifo */
483 return 0;
484}
485
486static int
487nv50_clock_prog(struct nouveau_clock *clk)
488{
489 struct nv50_clock_priv *priv = (void *)clk;
490 return clk_exec(&priv->hwsq, true);
491}
492
493static void
494nv50_clock_tidy(struct nouveau_clock *clk)
495{
496 struct nv50_clock_priv *priv = (void *)clk;
497 clk_exec(&priv->hwsq, false);
498}
499
500int
36nv50_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine, 501nv50_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
37 struct nouveau_oclass *oclass, void *data, u32 size, 502 struct nouveau_oclass *oclass, void *data, u32 size,
38 struct nouveau_object **pobject) 503 struct nouveau_object **pobject)
39{ 504{
505 struct nv50_clock_oclass *pclass = (void *)oclass;
40 struct nv50_clock_priv *priv; 506 struct nv50_clock_priv *priv;
41 int ret; 507 int ret;
42 508
43 ret = nouveau_clock_create(parent, engine, oclass, &priv); 509 ret = nouveau_clock_create(parent, engine, oclass, pclass->domains,
510 &priv);
44 *pobject = nv_object(priv); 511 *pobject = nv_object(priv);
45 if (ret) 512 if (ret)
46 return ret; 513 return ret;
47 514
48 priv->base.pll_calc = nv04_clock_pll_calc; 515 priv->hwsq.r_fifo = hwsq_reg(0x002504);
516 priv->hwsq.r_spll[0] = hwsq_reg(0x004020);
517 priv->hwsq.r_spll[1] = hwsq_reg(0x004024);
518 priv->hwsq.r_nvpll[0] = hwsq_reg(0x004028);
519 priv->hwsq.r_nvpll[1] = hwsq_reg(0x00402c);
520 switch (nv_device(priv)->chipset) {
521 case 0x92:
522 case 0x94:
523 case 0x96:
524 priv->hwsq.r_divs = hwsq_reg(0x004800);
525 break;
526 default:
527 priv->hwsq.r_divs = hwsq_reg(0x004700);
528 break;
529 }
530 priv->hwsq.r_mast = hwsq_reg(0x00c040);
531
532 priv->base.read = nv50_clock_read;
533 priv->base.calc = nv50_clock_calc;
534 priv->base.prog = nv50_clock_prog;
535 priv->base.tidy = nv50_clock_tidy;
49 return 0; 536 return 0;
50} 537}
51 538
52struct nouveau_oclass 539static struct nouveau_clocks
53nv50_clock_oclass = { 540nv50_domains[] = {
54 .handle = NV_SUBDEV(CLOCK, 0x50), 541 { nv_clk_src_crystal, 0xff },
55 .ofuncs = &(struct nouveau_ofuncs) { 542 { nv_clk_src_href , 0xff },
543 { nv_clk_src_core , 0xff, 0, "core", 1000 },
544 { nv_clk_src_shader , 0xff, 0, "shader", 1000 },
545 { nv_clk_src_mem , 0xff, 0, "memory", 1000 },
546 { nv_clk_src_max }
547};
548
549struct nouveau_oclass *
550nv50_clock_oclass = &(struct nv50_clock_oclass) {
551 .base.handle = NV_SUBDEV(CLOCK, 0x50),
552 .base.ofuncs = &(struct nouveau_ofuncs) {
56 .ctor = nv50_clock_ctor, 553 .ctor = nv50_clock_ctor,
57 .dtor = _nouveau_clock_dtor, 554 .dtor = _nouveau_clock_dtor,
58 .init = _nouveau_clock_init, 555 .init = _nouveau_clock_init,
59 .fini = _nouveau_clock_fini, 556 .fini = _nouveau_clock_fini,
60 }, 557 },
61}; 558 .domains = nv50_domains,
559}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nv50.h b/drivers/gpu/drm/nouveau/core/subdev/clock/nv50.h
new file mode 100644
index 000000000000..f10917d789e8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nv50.h
@@ -0,0 +1,31 @@
1#ifndef __NVKM_CLK_NV50_H__
2#define __NVKM_CLK_NV50_H__
3
4#include <subdev/bus.h>
5#include <subdev/bus/hwsq.h>
6#include <subdev/clock.h>
7
8struct nv50_clock_hwsq {
9 struct hwsq base;
10 struct hwsq_reg r_fifo;
11 struct hwsq_reg r_spll[2];
12 struct hwsq_reg r_nvpll[2];
13 struct hwsq_reg r_divs;
14 struct hwsq_reg r_mast;
15};
16
17struct nv50_clock_priv {
18 struct nouveau_clock base;
19 struct nv50_clock_hwsq hwsq;
20};
21
22int nv50_clock_ctor(struct nouveau_object *, struct nouveau_object *,
23 struct nouveau_oclass *, void *, u32,
24 struct nouveau_object **);
25
26struct nv50_clock_oclass {
27 struct nouveau_oclass base;
28 struct nouveau_clocks *domains;
29};
30
31#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nv84.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nv84.c
new file mode 100644
index 000000000000..b0b7c1437f10
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nv84.c
@@ -0,0 +1,48 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */
24
25#include "nv50.h"
26
27static struct nouveau_clocks
28nv84_domains[] = {
29 { nv_clk_src_crystal, 0xff },
30 { nv_clk_src_href , 0xff },
31 { nv_clk_src_core , 0xff, 0, "core", 1000 },
32 { nv_clk_src_shader , 0xff, 0, "shader", 1000 },
33 { nv_clk_src_mem , 0xff, 0, "memory", 1000 },
34 { nv_clk_src_vdec , 0xff },
35 { nv_clk_src_max }
36};
37
38struct nouveau_oclass *
39nv84_clock_oclass = &(struct nv50_clock_oclass) {
40 .base.handle = NV_SUBDEV(CLOCK, 0x84),
41 .base.ofuncs = &(struct nouveau_ofuncs) {
42 .ctor = nv50_clock_ctor,
43 .dtor = _nouveau_clock_dtor,
44 .init = _nouveau_clock_init,
45 .fini = _nouveau_clock_fini,
46 },
47 .domains = nv84_domains,
48}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c
index f074cd20bc9c..4f5a1373f002 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c
@@ -22,33 +22,277 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include <subdev/clock.h>
26#include <subdev/bios.h> 25#include <subdev/bios.h>
27#include <subdev/bios/pll.h> 26#include <subdev/bios/pll.h>
27#include <subdev/timer.h>
28 28
29#include "pll.h" 29#include "pll.h"
30 30
31#include "nva3.h"
32
31struct nva3_clock_priv { 33struct nva3_clock_priv {
32 struct nouveau_clock base; 34 struct nouveau_clock base;
35 struct nva3_clock_info eng[nv_clk_src_max];
33}; 36};
34 37
38static u32 read_clk(struct nva3_clock_priv *, int, bool);
39static u32 read_pll(struct nva3_clock_priv *, int, u32);
40
41static u32
42read_vco(struct nva3_clock_priv *priv, int clk)
43{
44 u32 sctl = nv_rd32(priv, 0x4120 + (clk * 4));
45 if ((sctl & 0x00000030) != 0x00000030)
46 return read_pll(priv, 0x41, 0x00e820);
47 return read_pll(priv, 0x42, 0x00e8a0);
48}
49
50static u32
51read_clk(struct nva3_clock_priv *priv, int clk, bool ignore_en)
52{
53 u32 sctl, sdiv, sclk;
54
55 /* refclk for the 0xe8xx plls is a fixed frequency */
56 if (clk >= 0x40) {
57 if (nv_device(priv)->chipset == 0xaf) {
58 /* no joke.. seriously.. sigh.. */
59 return nv_rd32(priv, 0x00471c) * 1000;
60 }
61
62 return nv_device(priv)->crystal;
63 }
64
65 sctl = nv_rd32(priv, 0x4120 + (clk * 4));
66 if (!ignore_en && !(sctl & 0x00000100))
67 return 0;
68
69 switch (sctl & 0x00003000) {
70 case 0x00000000:
71 return nv_device(priv)->crystal;
72 case 0x00002000:
73 if (sctl & 0x00000040)
74 return 108000;
75 return 100000;
76 case 0x00003000:
77 sclk = read_vco(priv, clk);
78 sdiv = ((sctl & 0x003f0000) >> 16) + 2;
79 return (sclk * 2) / sdiv;
80 default:
81 return 0;
82 }
83}
84
85static u32
86read_pll(struct nva3_clock_priv *priv, int clk, u32 pll)
87{
88 u32 ctrl = nv_rd32(priv, pll + 0);
89 u32 sclk = 0, P = 1, N = 1, M = 1;
90
91 if (!(ctrl & 0x00000008)) {
92 if (ctrl & 0x00000001) {
93 u32 coef = nv_rd32(priv, pll + 4);
94 M = (coef & 0x000000ff) >> 0;
95 N = (coef & 0x0000ff00) >> 8;
96 P = (coef & 0x003f0000) >> 16;
97
98 /* no post-divider on these.. */
99 if ((pll & 0x00ff00) == 0x00e800)
100 P = 1;
101
102 sclk = read_clk(priv, 0x00 + clk, false);
103 }
104 } else {
105 sclk = read_clk(priv, 0x10 + clk, false);
106 }
107
108 if (M * P)
109 return sclk * N / (M * P);
110 return 0;
111}
112
113static int
114nva3_clock_read(struct nouveau_clock *clk, enum nv_clk_src src)
115{
116 struct nva3_clock_priv *priv = (void *)clk;
117
118 switch (src) {
119 case nv_clk_src_crystal:
120 return nv_device(priv)->crystal;
121 case nv_clk_src_href:
122 return 100000;
123 case nv_clk_src_core:
124 return read_pll(priv, 0x00, 0x4200);
125 case nv_clk_src_shader:
126 return read_pll(priv, 0x01, 0x4220);
127 case nv_clk_src_mem:
128 return read_pll(priv, 0x02, 0x4000);
129 case nv_clk_src_disp:
130 return read_clk(priv, 0x20, false);
131 case nv_clk_src_vdec:
132 return read_clk(priv, 0x21, false);
133 case nv_clk_src_daemon:
134 return read_clk(priv, 0x25, false);
135 default:
136 nv_error(clk, "invalid clock source %d\n", src);
137 return -EINVAL;
138 }
139}
140
35int 141int
36nva3_clock_pll_calc(struct nouveau_clock *clock, struct nvbios_pll *info, 142nva3_clock_info(struct nouveau_clock *clock, int clk, u32 pll, u32 khz,
37 int clk, struct nouveau_pll_vals *pv) 143 struct nva3_clock_info *info)
38{ 144{
39 int ret, N, M, P; 145 struct nouveau_bios *bios = nouveau_bios(clock);
146 struct nva3_clock_priv *priv = (void *)clock;
147 struct nvbios_pll limits;
148 u32 oclk, sclk, sdiv;
149 int P, N, M, diff;
150 int ret;
151
152 info->pll = 0;
153 info->clk = 0;
154
155 switch (khz) {
156 case 27000:
157 info->clk = 0x00000100;
158 return khz;
159 case 100000:
160 info->clk = 0x00002100;
161 return khz;
162 case 108000:
163 info->clk = 0x00002140;
164 return khz;
165 default:
166 sclk = read_vco(priv, clk);
167 sdiv = min((sclk * 2) / (khz - 2999), (u32)65);
168 /* if the clock has a PLL attached, and we can get a within
169 * [-2, 3) MHz of a divider, we'll disable the PLL and use
170 * the divider instead.
171 *
172 * divider can go as low as 2, limited here because NVIDIA
173 * and the VBIOS on my NVA8 seem to prefer using the PLL
174 * for 810MHz - is there a good reason?
175 */
176 if (sdiv > 4) {
177 oclk = (sclk * 2) / sdiv;
178 diff = khz - oclk;
179 if (!pll || (diff >= -2000 && diff < 3000)) {
180 info->clk = (((sdiv - 2) << 16) | 0x00003100);
181 return oclk;
182 }
183 }
184
185 if (!pll)
186 return -ERANGE;
187 break;
188 }
40 189
41 ret = nva3_pll_calc(nv_subdev(clock), info, clk, &N, NULL, &M, &P); 190 ret = nvbios_pll_parse(bios, pll, &limits);
191 if (ret)
192 return ret;
193
194 limits.refclk = read_clk(priv, clk - 0x10, true);
195 if (!limits.refclk)
196 return -EINVAL;
42 197
43 if (ret > 0) { 198 ret = nva3_pll_calc(nv_subdev(priv), &limits, khz, &N, NULL, &M, &P);
44 pv->refclk = info->refclk; 199 if (ret >= 0) {
45 pv->N1 = N; 200 info->clk = nv_rd32(priv, 0x4120 + (clk * 4));
46 pv->M1 = M; 201 info->pll = (P << 16) | (N << 8) | M;
47 pv->log2P = P;
48 } 202 }
203
204 return ret ? ret : -ERANGE;
205}
206
207static int
208calc_clk(struct nva3_clock_priv *priv, struct nouveau_cstate *cstate,
209 int clk, u32 pll, int idx)
210{
211 int ret = nva3_clock_info(&priv->base, clk, pll, cstate->domain[idx],
212 &priv->eng[idx]);
213 if (ret >= 0)
214 return 0;
49 return ret; 215 return ret;
50} 216}
51 217
218static void
219prog_pll(struct nva3_clock_priv *priv, int clk, u32 pll, int idx)
220{
221 struct nva3_clock_info *info = &priv->eng[idx];
222 const u32 src0 = 0x004120 + (clk * 4);
223 const u32 src1 = 0x004160 + (clk * 4);
224 const u32 ctrl = pll + 0;
225 const u32 coef = pll + 4;
226
227 if (info->pll) {
228 nv_mask(priv, src0, 0x00000101, 0x00000101);
229 nv_wr32(priv, coef, info->pll);
230 nv_mask(priv, ctrl, 0x00000015, 0x00000015);
231 nv_mask(priv, ctrl, 0x00000010, 0x00000000);
232 nv_wait(priv, ctrl, 0x00020000, 0x00020000);
233 nv_mask(priv, ctrl, 0x00000010, 0x00000010);
234 nv_mask(priv, ctrl, 0x00000008, 0x00000000);
235 nv_mask(priv, src1, 0x00000100, 0x00000000);
236 nv_mask(priv, src1, 0x00000001, 0x00000000);
237 } else {
238 nv_mask(priv, src1, 0x003f3141, 0x00000101 | info->clk);
239 nv_mask(priv, ctrl, 0x00000018, 0x00000018);
240 udelay(20);
241 nv_mask(priv, ctrl, 0x00000001, 0x00000000);
242 nv_mask(priv, src0, 0x00000100, 0x00000000);
243 nv_mask(priv, src0, 0x00000001, 0x00000000);
244 }
245}
246
247static void
248prog_clk(struct nva3_clock_priv *priv, int clk, int idx)
249{
250 struct nva3_clock_info *info = &priv->eng[idx];
251 nv_mask(priv, 0x004120 + (clk * 4), 0x003f3141, 0x00000101 | info->clk);
252}
253
254static int
255nva3_clock_calc(struct nouveau_clock *clk, struct nouveau_cstate *cstate)
256{
257 struct nva3_clock_priv *priv = (void *)clk;
258 int ret;
259
260 if ((ret = calc_clk(priv, cstate, 0x10, 0x4200, nv_clk_src_core)) ||
261 (ret = calc_clk(priv, cstate, 0x11, 0x4220, nv_clk_src_shader)) ||
262 (ret = calc_clk(priv, cstate, 0x20, 0x0000, nv_clk_src_disp)) ||
263 (ret = calc_clk(priv, cstate, 0x21, 0x0000, nv_clk_src_vdec)))
264 return ret;
265
266 return 0;
267}
268
269static int
270nva3_clock_prog(struct nouveau_clock *clk)
271{
272 struct nva3_clock_priv *priv = (void *)clk;
273 prog_pll(priv, 0x00, 0x004200, nv_clk_src_core);
274 prog_pll(priv, 0x01, 0x004220, nv_clk_src_shader);
275 prog_clk(priv, 0x20, nv_clk_src_disp);
276 prog_clk(priv, 0x21, nv_clk_src_vdec);
277 return 0;
278}
279
280static void
281nva3_clock_tidy(struct nouveau_clock *clk)
282{
283}
284
285static struct nouveau_clocks
286nva3_domain[] = {
287 { nv_clk_src_crystal, 0xff },
288 { nv_clk_src_href , 0xff },
289 { nv_clk_src_core , 0x00, 0, "core", 1000 },
290 { nv_clk_src_shader , 0x01, 0, "shader", 1000 },
291 { nv_clk_src_mem , 0x02, 0, "memory", 1000 },
292 { nv_clk_src_vdec , 0x03 },
293 { nv_clk_src_disp , 0x04 },
294 { nv_clk_src_max }
295};
52 296
53static int 297static int
54nva3_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine, 298nva3_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
@@ -58,12 +302,15 @@ nva3_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
58 struct nva3_clock_priv *priv; 302 struct nva3_clock_priv *priv;
59 int ret; 303 int ret;
60 304
61 ret = nouveau_clock_create(parent, engine, oclass, &priv); 305 ret = nouveau_clock_create(parent, engine, oclass, nva3_domain, &priv);
62 *pobject = nv_object(priv); 306 *pobject = nv_object(priv);
63 if (ret) 307 if (ret)
64 return ret; 308 return ret;
65 309
66 priv->base.pll_calc = nva3_clock_pll_calc; 310 priv->base.read = nva3_clock_read;
311 priv->base.calc = nva3_clock_calc;
312 priv->base.prog = nva3_clock_prog;
313 priv->base.tidy = nva3_clock_tidy;
67 return 0; 314 return 0;
68} 315}
69 316
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.h b/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.h
new file mode 100644
index 000000000000..6229a509b42e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.h
@@ -0,0 +1,14 @@
1#ifndef __NVKM_CLK_NVA3_H__
2#define __NVKM_CLK_NVA3_H__
3
4#include <subdev/clock.h>
5
6struct nva3_clock_info {
7 u32 clk;
8 u32 pll;
9};
10
11int nva3_clock_info(struct nouveau_clock *, int, u32, u32,
12 struct nva3_clock_info *);
13
14#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c
index 439d81c26130..c3105720ed24 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c
@@ -25,11 +25,408 @@
25#include <subdev/clock.h> 25#include <subdev/clock.h>
26#include <subdev/bios.h> 26#include <subdev/bios.h>
27#include <subdev/bios/pll.h> 27#include <subdev/bios/pll.h>
28#include <subdev/timer.h>
28 29
29#include "pll.h" 30#include "pll.h"
30 31
32struct nvc0_clock_info {
33 u32 freq;
34 u32 ssel;
35 u32 mdiv;
36 u32 dsrc;
37 u32 ddiv;
38 u32 coef;
39};
40
31struct nvc0_clock_priv { 41struct nvc0_clock_priv {
32 struct nouveau_clock base; 42 struct nouveau_clock base;
43 struct nvc0_clock_info eng[16];
44};
45
46static u32 read_div(struct nvc0_clock_priv *, int, u32, u32);
47
48static u32
49read_vco(struct nvc0_clock_priv *priv, u32 dsrc)
50{
51 struct nouveau_clock *clk = &priv->base;
52 u32 ssrc = nv_rd32(priv, dsrc);
53 if (!(ssrc & 0x00000100))
54 return clk->read(clk, nv_clk_src_sppll0);
55 return clk->read(clk, nv_clk_src_sppll1);
56}
57
58static u32
59read_pll(struct nvc0_clock_priv *priv, u32 pll)
60{
61 struct nouveau_clock *clk = &priv->base;
62 u32 ctrl = nv_rd32(priv, pll + 0x00);
63 u32 coef = nv_rd32(priv, pll + 0x04);
64 u32 P = (coef & 0x003f0000) >> 16;
65 u32 N = (coef & 0x0000ff00) >> 8;
66 u32 M = (coef & 0x000000ff) >> 0;
67 u32 sclk;
68
69 if (!(ctrl & 0x00000001))
70 return 0;
71
72 switch (pll) {
73 case 0x00e800:
74 case 0x00e820:
75 sclk = nv_device(priv)->crystal;
76 P = 1;
77 break;
78 case 0x132000:
79 sclk = clk->read(clk, nv_clk_src_mpllsrc);
80 break;
81 case 0x132020:
82 sclk = clk->read(clk, nv_clk_src_mpllsrcref);
83 break;
84 case 0x137000:
85 case 0x137020:
86 case 0x137040:
87 case 0x1370e0:
88 sclk = read_div(priv, (pll & 0xff) / 0x20, 0x137120, 0x137140);
89 break;
90 default:
91 return 0;
92 }
93
94 return sclk * N / M / P;
95}
96
97static u32
98read_div(struct nvc0_clock_priv *priv, int doff, u32 dsrc, u32 dctl)
99{
100 u32 ssrc = nv_rd32(priv, dsrc + (doff * 4));
101 u32 sctl = nv_rd32(priv, dctl + (doff * 4));
102
103 switch (ssrc & 0x00000003) {
104 case 0:
105 if ((ssrc & 0x00030000) != 0x00030000)
106 return nv_device(priv)->crystal;
107 return 108000;
108 case 2:
109 return 100000;
110 case 3:
111 if (sctl & 0x80000000) {
112 u32 sclk = read_vco(priv, dsrc + (doff * 4));
113 u32 sdiv = (sctl & 0x0000003f) + 2;
114 return (sclk * 2) / sdiv;
115 }
116
117 return read_vco(priv, dsrc + (doff * 4));
118 default:
119 return 0;
120 }
121}
122
123static u32
124read_clk(struct nvc0_clock_priv *priv, int clk)
125{
126 u32 sctl = nv_rd32(priv, 0x137250 + (clk * 4));
127 u32 ssel = nv_rd32(priv, 0x137100);
128 u32 sclk, sdiv;
129
130 if (ssel & (1 << clk)) {
131 if (clk < 7)
132 sclk = read_pll(priv, 0x137000 + (clk * 0x20));
133 else
134 sclk = read_pll(priv, 0x1370e0);
135 sdiv = ((sctl & 0x00003f00) >> 8) + 2;
136 } else {
137 sclk = read_div(priv, clk, 0x137160, 0x1371d0);
138 sdiv = ((sctl & 0x0000003f) >> 0) + 2;
139 }
140
141 if (sctl & 0x80000000)
142 return (sclk * 2) / sdiv;
143
144 return sclk;
145}
146
147static int
148nvc0_clock_read(struct nouveau_clock *clk, enum nv_clk_src src)
149{
150 struct nouveau_device *device = nv_device(clk);
151 struct nvc0_clock_priv *priv = (void *)clk;
152
153 switch (src) {
154 case nv_clk_src_crystal:
155 return device->crystal;
156 case nv_clk_src_href:
157 return 100000;
158 case nv_clk_src_sppll0:
159 return read_pll(priv, 0x00e800);
160 case nv_clk_src_sppll1:
161 return read_pll(priv, 0x00e820);
162
163 case nv_clk_src_mpllsrcref:
164 return read_div(priv, 0, 0x137320, 0x137330);
165 case nv_clk_src_mpllsrc:
166 return read_pll(priv, 0x132020);
167 case nv_clk_src_mpll:
168 return read_pll(priv, 0x132000);
169 case nv_clk_src_mdiv:
170 return read_div(priv, 0, 0x137300, 0x137310);
171 case nv_clk_src_mem:
172 if (nv_rd32(priv, 0x1373f0) & 0x00000002)
173 return clk->read(clk, nv_clk_src_mpll);
174 return clk->read(clk, nv_clk_src_mdiv);
175
176 case nv_clk_src_gpc:
177 return read_clk(priv, 0x00);
178 case nv_clk_src_rop:
179 return read_clk(priv, 0x01);
180 case nv_clk_src_hubk07:
181 return read_clk(priv, 0x02);
182 case nv_clk_src_hubk06:
183 return read_clk(priv, 0x07);
184 case nv_clk_src_hubk01:
185 return read_clk(priv, 0x08);
186 case nv_clk_src_copy:
187 return read_clk(priv, 0x09);
188 case nv_clk_src_daemon:
189 return read_clk(priv, 0x0c);
190 case nv_clk_src_vdec:
191 return read_clk(priv, 0x0e);
192 default:
193 nv_error(clk, "invalid clock source %d\n", src);
194 return -EINVAL;
195 }
196}
197
198static u32
199calc_div(struct nvc0_clock_priv *priv, int clk, u32 ref, u32 freq, u32 *ddiv)
200{
201 u32 div = min((ref * 2) / freq, (u32)65);
202 if (div < 2)
203 div = 2;
204
205 *ddiv = div - 2;
206 return (ref * 2) / div;
207}
208
209static u32
210calc_src(struct nvc0_clock_priv *priv, int clk, u32 freq, u32 *dsrc, u32 *ddiv)
211{
212 u32 sclk;
213
214 /* use one of the fixed frequencies if possible */
215 *ddiv = 0x00000000;
216 switch (freq) {
217 case 27000:
218 case 108000:
219 *dsrc = 0x00000000;
220 if (freq == 108000)
221 *dsrc |= 0x00030000;
222 return freq;
223 case 100000:
224 *dsrc = 0x00000002;
225 return freq;
226 default:
227 *dsrc = 0x00000003;
228 break;
229 }
230
231 /* otherwise, calculate the closest divider */
232 sclk = read_vco(priv, 0x137160 + (clk * 4));
233 if (clk < 7)
234 sclk = calc_div(priv, clk, sclk, freq, ddiv);
235 return sclk;
236}
237
238static u32
239calc_pll(struct nvc0_clock_priv *priv, int clk, u32 freq, u32 *coef)
240{
241 struct nouveau_bios *bios = nouveau_bios(priv);
242 struct nvbios_pll limits;
243 int N, M, P, ret;
244
245 ret = nvbios_pll_parse(bios, 0x137000 + (clk * 0x20), &limits);
246 if (ret)
247 return 0;
248
249 limits.refclk = read_div(priv, clk, 0x137120, 0x137140);
250 if (!limits.refclk)
251 return 0;
252
253 ret = nva3_pll_calc(nv_subdev(priv), &limits, freq, &N, NULL, &M, &P);
254 if (ret <= 0)
255 return 0;
256
257 *coef = (P << 16) | (N << 8) | M;
258 return ret;
259}
260
261static int
262calc_clk(struct nvc0_clock_priv *priv,
263 struct nouveau_cstate *cstate, int clk, int dom)
264{
265 struct nvc0_clock_info *info = &priv->eng[clk];
266 u32 freq = cstate->domain[dom];
267 u32 src0, div0, div1D, div1P = 0;
268 u32 clk0, clk1 = 0;
269
270 /* invalid clock domain */
271 if (!freq)
272 return 0;
273
274 /* first possible path, using only dividers */
275 clk0 = calc_src(priv, clk, freq, &src0, &div0);
276 clk0 = calc_div(priv, clk, clk0, freq, &div1D);
277
278 /* see if we can get any closer using PLLs */
279 if (clk0 != freq && (0x00004387 & (1 << clk))) {
280 if (clk <= 7)
281 clk1 = calc_pll(priv, clk, freq, &info->coef);
282 else
283 clk1 = cstate->domain[nv_clk_src_hubk06];
284 clk1 = calc_div(priv, clk, clk1, freq, &div1P);
285 }
286
287 /* select the method which gets closest to target freq */
288 if (abs((int)freq - clk0) <= abs((int)freq - clk1)) {
289 info->dsrc = src0;
290 if (div0) {
291 info->ddiv |= 0x80000000;
292 info->ddiv |= div0 << 8;
293 info->ddiv |= div0;
294 }
295 if (div1D) {
296 info->mdiv |= 0x80000000;
297 info->mdiv |= div1D;
298 }
299 info->ssel = info->coef = 0;
300 info->freq = clk0;
301 } else {
302 if (div1P) {
303 info->mdiv |= 0x80000000;
304 info->mdiv |= div1P << 8;
305 }
306 info->ssel = (1 << clk);
307 info->freq = clk1;
308 }
309
310 return 0;
311}
312
313static int
314nvc0_clock_calc(struct nouveau_clock *clk, struct nouveau_cstate *cstate)
315{
316 struct nvc0_clock_priv *priv = (void *)clk;
317 int ret;
318
319 if ((ret = calc_clk(priv, cstate, 0x00, nv_clk_src_gpc)) ||
320 (ret = calc_clk(priv, cstate, 0x01, nv_clk_src_rop)) ||
321 (ret = calc_clk(priv, cstate, 0x02, nv_clk_src_hubk07)) ||
322 (ret = calc_clk(priv, cstate, 0x07, nv_clk_src_hubk06)) ||
323 (ret = calc_clk(priv, cstate, 0x08, nv_clk_src_hubk01)) ||
324 (ret = calc_clk(priv, cstate, 0x09, nv_clk_src_copy)) ||
325 (ret = calc_clk(priv, cstate, 0x0c, nv_clk_src_daemon)) ||
326 (ret = calc_clk(priv, cstate, 0x0e, nv_clk_src_vdec)))
327 return ret;
328
329 return 0;
330}
331
332static void
333nvc0_clock_prog_0(struct nvc0_clock_priv *priv, int clk)
334{
335 struct nvc0_clock_info *info = &priv->eng[clk];
336 if (clk < 7 && !info->ssel) {
337 nv_mask(priv, 0x1371d0 + (clk * 0x04), 0x80003f3f, info->ddiv);
338 nv_wr32(priv, 0x137160 + (clk * 0x04), info->dsrc);
339 }
340}
341
342static void
343nvc0_clock_prog_1(struct nvc0_clock_priv *priv, int clk)
344{
345 nv_mask(priv, 0x137100, (1 << clk), 0x00000000);
346 nv_wait(priv, 0x137100, (1 << clk), 0x00000000);
347}
348
349static void
350nvc0_clock_prog_2(struct nvc0_clock_priv *priv, int clk)
351{
352 struct nvc0_clock_info *info = &priv->eng[clk];
353 const u32 addr = 0x137000 + (clk * 0x20);
354 if (clk <= 7) {
355 nv_mask(priv, addr + 0x00, 0x00000004, 0x00000000);
356 nv_mask(priv, addr + 0x00, 0x00000001, 0x00000000);
357 if (info->coef) {
358 nv_wr32(priv, addr + 0x04, info->coef);
359 nv_mask(priv, addr + 0x00, 0x00000001, 0x00000001);
360 nv_wait(priv, addr + 0x00, 0x00020000, 0x00020000);
361 nv_mask(priv, addr + 0x00, 0x00020004, 0x00000004);
362 }
363 }
364}
365
366static void
367nvc0_clock_prog_3(struct nvc0_clock_priv *priv, int clk)
368{
369 struct nvc0_clock_info *info = &priv->eng[clk];
370 if (info->ssel) {
371 nv_mask(priv, 0x137100, (1 << clk), info->ssel);
372 nv_wait(priv, 0x137100, (1 << clk), info->ssel);
373 }
374}
375
376static void
377nvc0_clock_prog_4(struct nvc0_clock_priv *priv, int clk)
378{
379 struct nvc0_clock_info *info = &priv->eng[clk];
380 nv_mask(priv, 0x137250 + (clk * 0x04), 0x00003f3f, info->mdiv);
381}
382
383static int
384nvc0_clock_prog(struct nouveau_clock *clk)
385{
386 struct nvc0_clock_priv *priv = (void *)clk;
387 struct {
388 void (*exec)(struct nvc0_clock_priv *, int);
389 } stage[] = {
390 { nvc0_clock_prog_0 }, /* div programming */
391 { nvc0_clock_prog_1 }, /* select div mode */
392 { nvc0_clock_prog_2 }, /* (maybe) program pll */
393 { nvc0_clock_prog_3 }, /* (maybe) select pll mode */
394 { nvc0_clock_prog_4 }, /* final divider */
395 };
396 int i, j;
397
398 for (i = 0; i < ARRAY_SIZE(stage); i++) {
399 for (j = 0; j < ARRAY_SIZE(priv->eng); j++) {
400 if (!priv->eng[j].freq)
401 continue;
402 stage[i].exec(priv, j);
403 }
404 }
405
406 return 0;
407}
408
409static void
410nvc0_clock_tidy(struct nouveau_clock *clk)
411{
412 struct nvc0_clock_priv *priv = (void *)clk;
413 memset(priv->eng, 0x00, sizeof(priv->eng));
414}
415
416static struct nouveau_clocks
417nvc0_domain[] = {
418 { nv_clk_src_crystal, 0xff },
419 { nv_clk_src_href , 0xff },
420 { nv_clk_src_hubk06 , 0x00 },
421 { nv_clk_src_hubk01 , 0x01 },
422 { nv_clk_src_copy , 0x02 },
423 { nv_clk_src_gpc , 0x03, 0, "core", 2000 },
424 { nv_clk_src_rop , 0x04 },
425 { nv_clk_src_mem , 0x05, 0, "memory", 1000 },
426 { nv_clk_src_vdec , 0x06 },
427 { nv_clk_src_daemon , 0x0a },
428 { nv_clk_src_hubk07 , 0x0b },
429 { nv_clk_src_max }
33}; 430};
34 431
35static int 432static int
@@ -40,12 +437,15 @@ nvc0_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
40 struct nvc0_clock_priv *priv; 437 struct nvc0_clock_priv *priv;
41 int ret; 438 int ret;
42 439
43 ret = nouveau_clock_create(parent, engine, oclass, &priv); 440 ret = nouveau_clock_create(parent, engine, oclass, nvc0_domain, &priv);
44 *pobject = nv_object(priv); 441 *pobject = nv_object(priv);
45 if (ret) 442 if (ret)
46 return ret; 443 return ret;
47 444
48 priv->base.pll_calc = nva3_clock_pll_calc; 445 priv->base.read = nvc0_clock_read;
446 priv->base.calc = nvc0_clock_calc;
447 priv->base.prog = nvc0_clock_prog;
448 priv->base.tidy = nvc0_clock_tidy;
49 return 0; 449 return 0;
50} 450}
51 451
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nve0.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nve0.c
new file mode 100644
index 000000000000..4c62e84b96f5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nve0.c
@@ -0,0 +1,497 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/clock.h>
26#include <subdev/timer.h>
27#include <subdev/bios.h>
28#include <subdev/bios/pll.h>
29
30#include "pll.h"
31
32struct nve0_clock_info {
33 u32 freq;
34 u32 ssel;
35 u32 mdiv;
36 u32 dsrc;
37 u32 ddiv;
38 u32 coef;
39};
40
41struct nve0_clock_priv {
42 struct nouveau_clock base;
43 struct nve0_clock_info eng[16];
44};
45
46static u32 read_div(struct nve0_clock_priv *, int, u32, u32);
47static u32 read_pll(struct nve0_clock_priv *, u32);
48
49static u32
50read_vco(struct nve0_clock_priv *priv, u32 dsrc)
51{
52 u32 ssrc = nv_rd32(priv, dsrc);
53 if (!(ssrc & 0x00000100))
54 return read_pll(priv, 0x00e800);
55 return read_pll(priv, 0x00e820);
56}
57
58static u32
59read_pll(struct nve0_clock_priv *priv, u32 pll)
60{
61 u32 ctrl = nv_rd32(priv, pll + 0x00);
62 u32 coef = nv_rd32(priv, pll + 0x04);
63 u32 P = (coef & 0x003f0000) >> 16;
64 u32 N = (coef & 0x0000ff00) >> 8;
65 u32 M = (coef & 0x000000ff) >> 0;
66 u32 sclk;
67 u16 fN = 0xf000;
68
69 if (!(ctrl & 0x00000001))
70 return 0;
71
72 switch (pll) {
73 case 0x00e800:
74 case 0x00e820:
75 sclk = nv_device(priv)->crystal;
76 P = 1;
77 break;
78 case 0x132000:
79 sclk = read_pll(priv, 0x132020);
80 P = (coef & 0x10000000) ? 2 : 1;
81 break;
82 case 0x132020:
83 sclk = read_div(priv, 0, 0x137320, 0x137330);
84 fN = nv_rd32(priv, pll + 0x10) >> 16;
85 break;
86 case 0x137000:
87 case 0x137020:
88 case 0x137040:
89 case 0x1370e0:
90 sclk = read_div(priv, (pll & 0xff) / 0x20, 0x137120, 0x137140);
91 break;
92 default:
93 return 0;
94 }
95
96 if (P == 0)
97 P = 1;
98
99 sclk = (sclk * N) + (((u16)(fN + 4096) * sclk) >> 13);
100 return sclk / (M * P);
101}
102
103static u32
104read_div(struct nve0_clock_priv *priv, int doff, u32 dsrc, u32 dctl)
105{
106 u32 ssrc = nv_rd32(priv, dsrc + (doff * 4));
107 u32 sctl = nv_rd32(priv, dctl + (doff * 4));
108
109 switch (ssrc & 0x00000003) {
110 case 0:
111 if ((ssrc & 0x00030000) != 0x00030000)
112 return nv_device(priv)->crystal;
113 return 108000;
114 case 2:
115 return 100000;
116 case 3:
117 if (sctl & 0x80000000) {
118 u32 sclk = read_vco(priv, dsrc + (doff * 4));
119 u32 sdiv = (sctl & 0x0000003f) + 2;
120 return (sclk * 2) / sdiv;
121 }
122
123 return read_vco(priv, dsrc + (doff * 4));
124 default:
125 return 0;
126 }
127}
128
129static u32
130read_mem(struct nve0_clock_priv *priv)
131{
132 switch (nv_rd32(priv, 0x1373f4) & 0x0000000f) {
133 case 1: return read_pll(priv, 0x132020);
134 case 2: return read_pll(priv, 0x132000);
135 default:
136 return 0;
137 }
138}
139
140static u32
141read_clk(struct nve0_clock_priv *priv, int clk)
142{
143 u32 sctl = nv_rd32(priv, 0x137250 + (clk * 4));
144 u32 sclk, sdiv;
145
146 if (clk < 7) {
147 u32 ssel = nv_rd32(priv, 0x137100);
148 if (ssel & (1 << clk)) {
149 sclk = read_pll(priv, 0x137000 + (clk * 0x20));
150 sdiv = 1;
151 } else {
152 sclk = read_div(priv, clk, 0x137160, 0x1371d0);
153 sdiv = 0;
154 }
155 } else {
156 u32 ssrc = nv_rd32(priv, 0x137160 + (clk * 0x04));
157 if ((ssrc & 0x00000003) == 0x00000003) {
158 sclk = read_div(priv, clk, 0x137160, 0x1371d0);
159 if (ssrc & 0x00000100) {
160 if (ssrc & 0x40000000)
161 sclk = read_pll(priv, 0x1370e0);
162 sdiv = 1;
163 } else {
164 sdiv = 0;
165 }
166 } else {
167 sclk = read_div(priv, clk, 0x137160, 0x1371d0);
168 sdiv = 0;
169 }
170 }
171
172 if (sctl & 0x80000000) {
173 if (sdiv)
174 sdiv = ((sctl & 0x00003f00) >> 8) + 2;
175 else
176 sdiv = ((sctl & 0x0000003f) >> 0) + 2;
177 return (sclk * 2) / sdiv;
178 }
179
180 return sclk;
181}
182
183static int
184nve0_clock_read(struct nouveau_clock *clk, enum nv_clk_src src)
185{
186 struct nouveau_device *device = nv_device(clk);
187 struct nve0_clock_priv *priv = (void *)clk;
188
189 switch (src) {
190 case nv_clk_src_crystal:
191 return device->crystal;
192 case nv_clk_src_href:
193 return 100000;
194 case nv_clk_src_mem:
195 return read_mem(priv);
196 case nv_clk_src_gpc:
197 return read_clk(priv, 0x00);
198 case nv_clk_src_rop:
199 return read_clk(priv, 0x01);
200 case nv_clk_src_hubk07:
201 return read_clk(priv, 0x02);
202 case nv_clk_src_hubk06:
203 return read_clk(priv, 0x07);
204 case nv_clk_src_hubk01:
205 return read_clk(priv, 0x08);
206 case nv_clk_src_daemon:
207 return read_clk(priv, 0x0c);
208 case nv_clk_src_vdec:
209 return read_clk(priv, 0x0e);
210 default:
211 nv_error(clk, "invalid clock source %d\n", src);
212 return -EINVAL;
213 }
214}
215
216static u32
217calc_div(struct nve0_clock_priv *priv, int clk, u32 ref, u32 freq, u32 *ddiv)
218{
219 u32 div = min((ref * 2) / freq, (u32)65);
220 if (div < 2)
221 div = 2;
222
223 *ddiv = div - 2;
224 return (ref * 2) / div;
225}
226
227static u32
228calc_src(struct nve0_clock_priv *priv, int clk, u32 freq, u32 *dsrc, u32 *ddiv)
229{
230 u32 sclk;
231
232 /* use one of the fixed frequencies if possible */
233 *ddiv = 0x00000000;
234 switch (freq) {
235 case 27000:
236 case 108000:
237 *dsrc = 0x00000000;
238 if (freq == 108000)
239 *dsrc |= 0x00030000;
240 return freq;
241 case 100000:
242 *dsrc = 0x00000002;
243 return freq;
244 default:
245 *dsrc = 0x00000003;
246 break;
247 }
248
249 /* otherwise, calculate the closest divider */
250 sclk = read_vco(priv, 0x137160 + (clk * 4));
251 if (clk < 7)
252 sclk = calc_div(priv, clk, sclk, freq, ddiv);
253 return sclk;
254}
255
256static u32
257calc_pll(struct nve0_clock_priv *priv, int clk, u32 freq, u32 *coef)
258{
259 struct nouveau_bios *bios = nouveau_bios(priv);
260 struct nvbios_pll limits;
261 int N, M, P, ret;
262
263 ret = nvbios_pll_parse(bios, 0x137000 + (clk * 0x20), &limits);
264 if (ret)
265 return 0;
266
267 limits.refclk = read_div(priv, clk, 0x137120, 0x137140);
268 if (!limits.refclk)
269 return 0;
270
271 ret = nva3_pll_calc(nv_subdev(priv), &limits, freq, &N, NULL, &M, &P);
272 if (ret <= 0)
273 return 0;
274
275 *coef = (P << 16) | (N << 8) | M;
276 return ret;
277}
278
279static int
280calc_clk(struct nve0_clock_priv *priv,
281 struct nouveau_cstate *cstate, int clk, int dom)
282{
283 struct nve0_clock_info *info = &priv->eng[clk];
284 u32 freq = cstate->domain[dom];
285 u32 src0, div0, div1D, div1P = 0;
286 u32 clk0, clk1 = 0;
287
288 /* invalid clock domain */
289 if (!freq)
290 return 0;
291
292 /* first possible path, using only dividers */
293 clk0 = calc_src(priv, clk, freq, &src0, &div0);
294 clk0 = calc_div(priv, clk, clk0, freq, &div1D);
295
296 /* see if we can get any closer using PLLs */
297 if (clk0 != freq && (0x0000ff87 & (1 << clk))) {
298 if (clk <= 7)
299 clk1 = calc_pll(priv, clk, freq, &info->coef);
300 else
301 clk1 = cstate->domain[nv_clk_src_hubk06];
302 clk1 = calc_div(priv, clk, clk1, freq, &div1P);
303 }
304
305 /* select the method which gets closest to target freq */
306 if (abs((int)freq - clk0) <= abs((int)freq - clk1)) {
307 info->dsrc = src0;
308 if (div0) {
309 info->ddiv |= 0x80000000;
310 info->ddiv |= div0 << 8;
311 info->ddiv |= div0;
312 }
313 if (div1D) {
314 info->mdiv |= 0x80000000;
315 info->mdiv |= div1D;
316 }
317 info->ssel = 0;
318 info->freq = clk0;
319 } else {
320 if (div1P) {
321 info->mdiv |= 0x80000000;
322 info->mdiv |= div1P << 8;
323 }
324 info->ssel = (1 << clk);
325 info->dsrc = 0x40000100;
326 info->freq = clk1;
327 }
328
329 return 0;
330}
331
332static int
333nve0_clock_calc(struct nouveau_clock *clk, struct nouveau_cstate *cstate)
334{
335 struct nve0_clock_priv *priv = (void *)clk;
336 int ret;
337
338 if ((ret = calc_clk(priv, cstate, 0x00, nv_clk_src_gpc)) ||
339 (ret = calc_clk(priv, cstate, 0x01, nv_clk_src_rop)) ||
340 (ret = calc_clk(priv, cstate, 0x02, nv_clk_src_hubk07)) ||
341 (ret = calc_clk(priv, cstate, 0x07, nv_clk_src_hubk06)) ||
342 (ret = calc_clk(priv, cstate, 0x08, nv_clk_src_hubk01)) ||
343 (ret = calc_clk(priv, cstate, 0x0c, nv_clk_src_daemon)) ||
344 (ret = calc_clk(priv, cstate, 0x0e, nv_clk_src_vdec)))
345 return ret;
346
347 return 0;
348}
349
350static void
351nve0_clock_prog_0(struct nve0_clock_priv *priv, int clk)
352{
353 struct nve0_clock_info *info = &priv->eng[clk];
354 if (!info->ssel) {
355 nv_mask(priv, 0x1371d0 + (clk * 0x04), 0x80003f3f, info->ddiv);
356 nv_wr32(priv, 0x137160 + (clk * 0x04), info->dsrc);
357 }
358}
359
360static void
361nve0_clock_prog_1_0(struct nve0_clock_priv *priv, int clk)
362{
363 nv_mask(priv, 0x137100, (1 << clk), 0x00000000);
364 nv_wait(priv, 0x137100, (1 << clk), 0x00000000);
365}
366
367static void
368nve0_clock_prog_1_1(struct nve0_clock_priv *priv, int clk)
369{
370 nv_mask(priv, 0x137160 + (clk * 0x04), 0x00000100, 0x00000000);
371}
372
373static void
374nve0_clock_prog_2(struct nve0_clock_priv *priv, int clk)
375{
376 struct nve0_clock_info *info = &priv->eng[clk];
377 const u32 addr = 0x137000 + (clk * 0x20);
378 nv_mask(priv, addr + 0x00, 0x00000004, 0x00000000);
379 nv_mask(priv, addr + 0x00, 0x00000001, 0x00000000);
380 if (info->coef) {
381 nv_wr32(priv, addr + 0x04, info->coef);
382 nv_mask(priv, addr + 0x00, 0x00000001, 0x00000001);
383 nv_wait(priv, addr + 0x00, 0x00020000, 0x00020000);
384 nv_mask(priv, addr + 0x00, 0x00020004, 0x00000004);
385 }
386}
387
388static void
389nve0_clock_prog_3(struct nve0_clock_priv *priv, int clk)
390{
391 struct nve0_clock_info *info = &priv->eng[clk];
392 nv_mask(priv, 0x137250 + (clk * 0x04), 0x00003f3f, info->mdiv);
393}
394
395static void
396nve0_clock_prog_4_0(struct nve0_clock_priv *priv, int clk)
397{
398 struct nve0_clock_info *info = &priv->eng[clk];
399 if (info->ssel) {
400 nv_mask(priv, 0x137100, (1 << clk), info->ssel);
401 nv_wait(priv, 0x137100, (1 << clk), info->ssel);
402 }
403}
404
405static void
406nve0_clock_prog_4_1(struct nve0_clock_priv *priv, int clk)
407{
408 struct nve0_clock_info *info = &priv->eng[clk];
409 if (info->ssel) {
410 nv_mask(priv, 0x137160 + (clk * 0x04), 0x40000000, 0x40000000);
411 nv_mask(priv, 0x137160 + (clk * 0x04), 0x00000100, 0x00000100);
412 }
413}
414
415static int
416nve0_clock_prog(struct nouveau_clock *clk)
417{
418 struct nve0_clock_priv *priv = (void *)clk;
419 struct {
420 u32 mask;
421 void (*exec)(struct nve0_clock_priv *, int);
422 } stage[] = {
423 { 0x007f, nve0_clock_prog_0 }, /* div programming */
424 { 0x007f, nve0_clock_prog_1_0 }, /* select div mode */
425 { 0xff80, nve0_clock_prog_1_1 },
426 { 0x00ff, nve0_clock_prog_2 }, /* (maybe) program pll */
427 { 0xff80, nve0_clock_prog_3 }, /* final divider */
428 { 0x007f, nve0_clock_prog_4_0 }, /* (maybe) select pll mode */
429 { 0xff80, nve0_clock_prog_4_1 },
430 };
431 int i, j;
432
433 for (i = 0; i < ARRAY_SIZE(stage); i++) {
434 for (j = 0; j < ARRAY_SIZE(priv->eng); j++) {
435 if (!(stage[i].mask & (1 << j)))
436 continue;
437 if (!priv->eng[j].freq)
438 continue;
439 stage[i].exec(priv, j);
440 }
441 }
442
443 return 0;
444}
445
446static void
447nve0_clock_tidy(struct nouveau_clock *clk)
448{
449 struct nve0_clock_priv *priv = (void *)clk;
450 memset(priv->eng, 0x00, sizeof(priv->eng));
451}
452
453static struct nouveau_clocks
454nve0_domain[] = {
455 { nv_clk_src_crystal, 0xff },
456 { nv_clk_src_href , 0xff },
457 { nv_clk_src_gpc , 0x00, NVKM_CLK_DOM_FLAG_CORE, "core", 2000 },
458 { nv_clk_src_hubk07 , 0x01, NVKM_CLK_DOM_FLAG_CORE },
459 { nv_clk_src_rop , 0x02, NVKM_CLK_DOM_FLAG_CORE },
460 { nv_clk_src_mem , 0x03, 0, "memory", 1000 },
461 { nv_clk_src_hubk06 , 0x04, NVKM_CLK_DOM_FLAG_CORE },
462 { nv_clk_src_hubk01 , 0x05 },
463 { nv_clk_src_vdec , 0x06 },
464 { nv_clk_src_daemon , 0x07 },
465 { nv_clk_src_max }
466};
467
468static int
469nve0_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
470 struct nouveau_oclass *oclass, void *data, u32 size,
471 struct nouveau_object **pobject)
472{
473 struct nve0_clock_priv *priv;
474 int ret;
475
476 ret = nouveau_clock_create(parent, engine, oclass, nve0_domain, &priv);
477 *pobject = nv_object(priv);
478 if (ret)
479 return ret;
480
481 priv->base.read = nve0_clock_read;
482 priv->base.calc = nve0_clock_calc;
483 priv->base.prog = nve0_clock_prog;
484 priv->base.tidy = nve0_clock_tidy;
485 return 0;
486}
487
488struct nouveau_oclass
489nve0_clock_oclass = {
490 .handle = NV_SUBDEV(CLOCK, 0xe0),
491 .ofuncs = &(struct nouveau_ofuncs) {
492 .ctor = nve0_clock_ctor,
493 .dtor = _nouveau_clock_dtor,
494 .init = _nouveau_clock_init,
495 .fini = _nouveau_clock_fini,
496 },
497};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/pllnv04.c b/drivers/gpu/drm/nouveau/core/subdev/clock/pllnv04.c
index cf1ed0dc9bc9..b47d543ab2e3 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/pllnv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/pllnv04.c
@@ -38,7 +38,7 @@ getMNP_single(struct nouveau_subdev *subdev, struct nvbios_pll *info, int clk,
38 * "clk" parameter in kHz 38 * "clk" parameter in kHz
39 * returns calculated clock 39 * returns calculated clock
40 */ 40 */
41 int cv = nouveau_bios(subdev)->version.chip; 41 struct nouveau_bios *bios = nouveau_bios(subdev);
42 int minvco = info->vco1.min_freq, maxvco = info->vco1.max_freq; 42 int minvco = info->vco1.min_freq, maxvco = info->vco1.max_freq;
43 int minM = info->vco1.min_m, maxM = info->vco1.max_m; 43 int minM = info->vco1.min_m, maxM = info->vco1.max_m;
44 int minN = info->vco1.min_n, maxN = info->vco1.max_n; 44 int minN = info->vco1.min_n, maxN = info->vco1.max_n;
@@ -54,18 +54,21 @@ getMNP_single(struct nouveau_subdev *subdev, struct nvbios_pll *info, int clk,
54 54
55 /* this division verified for nv20, nv18, nv28 (Haiku), and nv34 */ 55 /* this division verified for nv20, nv18, nv28 (Haiku), and nv34 */
56 /* possibly correlated with introduction of 27MHz crystal */ 56 /* possibly correlated with introduction of 27MHz crystal */
57 if (cv < 0x17 || cv == 0x1a || cv == 0x20) { 57 if (bios->version.major < 0x60) {
58 if (clk > 250000) 58 int cv = bios->version.chip;
59 maxM = 6; 59 if (cv < 0x17 || cv == 0x1a || cv == 0x20) {
60 if (clk > 340000) 60 if (clk > 250000)
61 maxM = 2; 61 maxM = 6;
62 } else if (cv < 0x40) { 62 if (clk > 340000)
63 if (clk > 150000) 63 maxM = 2;
64 maxM = 6; 64 } else if (cv < 0x40) {
65 if (clk > 200000) 65 if (clk > 150000)
66 maxM = 4; 66 maxM = 6;
67 if (clk > 340000) 67 if (clk > 200000)
68 maxM = 2; 68 maxM = 4;
69 if (clk > 340000)
70 maxM = 2;
71 }
69 } 72 }
70 73
71 P = 1 << maxP; 74 P = 1 << maxP;
@@ -227,10 +230,12 @@ nv04_pll_calc(struct nouveau_subdev *subdev, struct nvbios_pll *info, u32 freq,
227{ 230{
228 int ret; 231 int ret;
229 232
230 if (!info->vco2.max_freq) { 233 if (!info->vco2.max_freq || !N2) {
231 ret = getMNP_single(subdev, info, freq, N1, M1, P); 234 ret = getMNP_single(subdev, info, freq, N1, M1, P);
232 *N2 = 1; 235 if (N2) {
233 *M2 = 1; 236 *N2 = 1;
237 *M2 = 1;
238 }
234 } else { 239 } else {
235 ret = getMNP_double(subdev, info, freq, N1, M1, N2, M2, P); 240 ret = getMNP_double(subdev, info, freq, N1, M1, N2, M2, P);
236 } 241 }
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/pllnva3.c b/drivers/gpu/drm/nouveau/core/subdev/clock/pllnva3.c
index 2fe1f712eefa..8eca457c2814 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/pllnva3.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/pllnva3.c
@@ -45,6 +45,7 @@ nva3_pll_calc(struct nouveau_subdev *subdev, struct nvbios_pll *info,
45 lM = max(lM, (int)info->vco1.min_m); 45 lM = max(lM, (int)info->vco1.min_m);
46 hM = (info->refclk + info->vco1.min_inputfreq) / info->vco1.min_inputfreq; 46 hM = (info->refclk + info->vco1.min_inputfreq) / info->vco1.min_inputfreq;
47 hM = min(hM, (int)info->vco1.max_m); 47 hM = min(hM, (int)info->vco1.max_m);
48 lM = min(lM, hM);
48 49
49 for (M = lM; M <= hM; M++) { 50 for (M = lM; M <= hM; M++) {
50 u32 tmp = freq * *P * M; 51 u32 tmp = freq * *P * M;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/seq.h b/drivers/gpu/drm/nouveau/core/subdev/clock/seq.h
new file mode 100644
index 000000000000..fb33f06ebd59
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/seq.h
@@ -0,0 +1,17 @@
1#ifndef __NVKM_CLK_SEQ_H__
2#define __NVKM_CLK_SEQ_H__
3
4#include <subdev/bus.h>
5#include <subdev/bus/hwsq.h>
6
7#define clk_init(s,p) hwsq_init(&(s)->base, (p))
8#define clk_exec(s,e) hwsq_exec(&(s)->base, (e))
9#define clk_have(s,r) ((s)->r_##r.addr != 0x000000)
10#define clk_rd32(s,r) hwsq_rd32(&(s)->base, &(s)->r_##r)
11#define clk_wr32(s,r,d) hwsq_wr32(&(s)->base, &(s)->r_##r, (d))
12#define clk_mask(s,r,m,d) hwsq_mask(&(s)->base, &(s)->r_##r, (m), (d))
13#define clk_setf(s,f,d) hwsq_setf(&(s)->base, (f), (d))
14#define clk_wait(s,f,d) hwsq_wait(&(s)->base, (f), (d))
15#define clk_nsec(s,n) hwsq_nsec(&(s)->base, (n))
16
17#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv04.c
index b22357d9b821..27c8235f1a85 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv04.c
@@ -168,7 +168,8 @@ setPLL_single(struct nouveau_devinit *devinit, u32 reg,
168 /* downclock -- write new NM first */ 168 /* downclock -- write new NM first */
169 nv_wr32(devinit, reg, (oldpll & 0xffff0000) | pv->NM1); 169 nv_wr32(devinit, reg, (oldpll & 0xffff0000) | pv->NM1);
170 170
171 if (chip_version < 0x17 && chip_version != 0x11) 171 if ((chip_version < 0x17 || chip_version == 0x1a) &&
172 chip_version != 0x11)
172 /* wait a bit on older chips */ 173 /* wait a bit on older chips */
173 msleep(64); 174 msleep(64);
174 nv_rd32(devinit, reg); 175 nv_rd32(devinit, reg);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv10.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv10.c
index 463b08fa0968..8d274dba1ef1 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv10.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv10.c
@@ -38,12 +38,18 @@ static void
38nv10_devinit_meminit(struct nouveau_devinit *devinit) 38nv10_devinit_meminit(struct nouveau_devinit *devinit)
39{ 39{
40 struct nv10_devinit_priv *priv = (void *)devinit; 40 struct nv10_devinit_priv *priv = (void *)devinit;
41 const int mem_width[] = { 0x10, 0x00, 0x20 }; 41 static const int mem_width[] = { 0x10, 0x00, 0x20 };
42 const int mem_width_count = nv_device(priv)->chipset >= 0x17 ? 3 : 2; 42 int mem_width_count;
43 uint32_t patt = 0xdeadbeef; 43 uint32_t patt = 0xdeadbeef;
44 struct io_mapping *fb; 44 struct io_mapping *fb;
45 int i, j, k; 45 int i, j, k;
46 46
47 if (nv_device(priv)->card_type >= NV_11 &&
48 nv_device(priv)->chipset >= 0x17)
49 mem_width_count = 3;
50 else
51 mem_width_count = 2;
52
47 /* Map the framebuffer aperture */ 53 /* Map the framebuffer aperture */
48 fb = fbmem_init(nv_device(priv)->pdev); 54 fb = fbmem_init(nv_device(priv)->pdev);
49 if (!fb) { 55 if (!fb) {
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/base.c b/drivers/gpu/drm/nouveau/core/subdev/fb/base.c
index 821cd75b86a3..f009d8a39d9d 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/base.c
@@ -22,9 +22,10 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include "subdev/fb.h" 25#include <subdev/bios.h>
26#include "subdev/bios.h" 26#include <subdev/bios/bit.h>
27#include "subdev/bios/bit.h" 27
28#include "priv.h"
28 29
29int 30int
30nouveau_fb_bios_memtype(struct nouveau_bios *bios) 31nouveau_fb_bios_memtype(struct nouveau_bios *bios)
@@ -106,9 +107,9 @@ _nouveau_fb_dtor(struct nouveau_object *object)
106 107
107int 108int
108nouveau_fb_create_(struct nouveau_object *parent, struct nouveau_object *engine, 109nouveau_fb_create_(struct nouveau_object *parent, struct nouveau_object *engine,
109 struct nouveau_oclass *oclass, struct nouveau_oclass *ramcls, 110 struct nouveau_oclass *oclass, int length, void **pobject)
110 int length, void **pobject)
111{ 111{
112 struct nouveau_fb_impl *impl = (void *)oclass;
112 static const char *name[] = { 113 static const char *name[] = {
113 [NV_MEM_TYPE_UNKNOWN] = "unknown", 114 [NV_MEM_TYPE_UNKNOWN] = "unknown",
114 [NV_MEM_TYPE_STOLEN ] = "stolen system memory", 115 [NV_MEM_TYPE_STOLEN ] = "stolen system memory",
@@ -132,8 +133,10 @@ nouveau_fb_create_(struct nouveau_object *parent, struct nouveau_object *engine,
132 if (ret) 133 if (ret)
133 return ret; 134 return ret;
134 135
136 pfb->memtype_valid = impl->memtype;
137
135 ret = nouveau_object_ctor(nv_object(pfb), nv_object(pfb), 138 ret = nouveau_object_ctor(nv_object(pfb), nv_object(pfb),
136 ramcls, NULL, 0, &ram); 139 impl->ram, NULL, 0, &ram);
137 if (ret) { 140 if (ret) {
138 nv_fatal(pfb, "error detecting memory configuration!!\n"); 141 nv_fatal(pfb, "error detecting memory configuration!!\n");
139 return ret; 142 return ret;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/gddr5.c b/drivers/gpu/drm/nouveau/core/subdev/fb/gddr5.c
new file mode 100644
index 000000000000..34f9605ffee6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/gddr5.c
@@ -0,0 +1,96 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */
24
25#include <subdev/bios.h>
26#include "priv.h"
27
28int
29nouveau_gddr5_calc(struct nouveau_ram *ram)
30{
31 struct nouveau_bios *bios = nouveau_bios(ram);
32 int pd, lf, xd, vh, vr, vo;
33 int WL, CL, WR, at, dt, ds;
34 int rq = ram->freq < 1000000; /* XXX */
35
36 switch (!!ram->ramcfg.data * ram->ramcfg.version) {
37 case 0x11:
38 pd = (nv_ro08(bios, ram->ramcfg.data + 0x01) & 0x80) >> 7;
39 lf = (nv_ro08(bios, ram->ramcfg.data + 0x01) & 0x40) >> 6;
40 xd = !(nv_ro08(bios, ram->ramcfg.data + 0x01) & 0x20);
41 vh = (nv_ro08(bios, ram->ramcfg.data + 0x02) & 0x10) >> 4;
42 vr = (nv_ro08(bios, ram->ramcfg.data + 0x02) & 0x04) >> 2;
43 vo = nv_ro08(bios, ram->ramcfg.data + 0x06) & 0xff;
44 break;
45 default:
46 return -ENOSYS;
47 }
48
49 switch (!!ram->timing.data * ram->timing.version) {
50 case 0x20:
51 WL = (nv_ro16(bios, ram->timing.data + 0x04) & 0x0f80) >> 7;
52 CL = nv_ro08(bios, ram->timing.data + 0x04) & 0x1f;
53 WR = nv_ro08(bios, ram->timing.data + 0x0a) & 0x7f;
54 at = (nv_ro08(bios, ram->timing.data + 0x2e) & 0xc0) >> 6;
55 dt = nv_ro08(bios, ram->timing.data + 0x2e) & 0x03;
56 ds = nv_ro08(bios, ram->timing.data + 0x2f) & 0x03;
57 break;
58 default:
59 return -ENOSYS;
60 }
61
62 if (WL < 1 || WL > 7 || CL < 5 || CL > 36 || WR < 4 || WR > 35)
63 return -EINVAL;
64 CL -= 5;
65 WR -= 4;
66
67 ram->mr[0] &= ~0xf7f;
68 ram->mr[0] |= (WR & 0x0f) << 8;
69 ram->mr[0] |= (CL & 0x0f) << 3;
70 ram->mr[0] |= (WL & 0x07) << 0;
71
72 ram->mr[1] &= ~0x0bf;
73 ram->mr[1] |= (xd & 0x01) << 7;
74 ram->mr[1] |= (at & 0x03) << 4;
75 ram->mr[1] |= (dt & 0x03) << 2;
76 ram->mr[1] |= (ds & 0x03) << 0;
77
78 ram->mr[3] &= ~0x020;
79 ram->mr[3] |= (rq & 0x01) << 5;
80
81 if (!vo)
82 vo = (ram->mr[6] & 0xff0) >> 4;
83 if (ram->mr[6] & 0x001)
84 pd = 1; /* binary driver does this.. bug? */
85 ram->mr[6] &= ~0xff1;
86 ram->mr[6] |= (vo & 0xff) << 4;
87 ram->mr[6] |= (pd & 0x01) << 0;
88
89 if (!(ram->mr[7] & 0x100))
90 vr = 0; /* binary driver does this.. bug? */
91 ram->mr[7] &= ~0x188;
92 ram->mr[7] |= (vr & 0x01) << 8;
93 ram->mr[7] |= (vh & 0x01) << 7;
94 ram->mr[7] |= (lf & 0x01) << 3;
95 return 0;
96}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv04.c
index 1f103c7b89fa..8309fe33fe84 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv04.c
@@ -22,14 +22,10 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include "priv.h" 25#include "nv04.h"
26 26
27#define NV04_PFB_CFG0 0x00100200 27#define NV04_PFB_CFG0 0x00100200
28 28
29struct nv04_fb_priv {
30 struct nouveau_fb base;
31};
32
33bool 29bool
34nv04_fb_memtype_valid(struct nouveau_fb *pfb, u32 tile_flags) 30nv04_fb_memtype_valid(struct nouveau_fb *pfb, u32 tile_flags)
35{ 31{
@@ -57,30 +53,37 @@ nv04_fb_init(struct nouveau_object *object)
57 return 0; 53 return 0;
58} 54}
59 55
60static int 56int
61nv04_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine, 57nv04_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
62 struct nouveau_oclass *oclass, void *data, u32 size, 58 struct nouveau_oclass *oclass, void *data, u32 size,
63 struct nouveau_object **pobject) 59 struct nouveau_object **pobject)
64{ 60{
61 struct nv04_fb_impl *impl = (void *)oclass;
65 struct nv04_fb_priv *priv; 62 struct nv04_fb_priv *priv;
66 int ret; 63 int ret;
67 64
68 ret = nouveau_fb_create(parent, engine, oclass, &nv04_ram_oclass, &priv); 65 ret = nouveau_fb_create(parent, engine, oclass, &priv);
69 *pobject = nv_object(priv); 66 *pobject = nv_object(priv);
70 if (ret) 67 if (ret)
71 return ret; 68 return ret;
72 69
73 priv->base.memtype_valid = nv04_fb_memtype_valid; 70 priv->base.tile.regions = impl->tile.regions;
71 priv->base.tile.init = impl->tile.init;
72 priv->base.tile.comp = impl->tile.comp;
73 priv->base.tile.fini = impl->tile.fini;
74 priv->base.tile.prog = impl->tile.prog;
74 return 0; 75 return 0;
75} 76}
76 77
77struct nouveau_oclass 78struct nouveau_oclass *
78nv04_fb_oclass = { 79nv04_fb_oclass = &(struct nv04_fb_impl) {
79 .handle = NV_SUBDEV(FB, 0x04), 80 .base.base.handle = NV_SUBDEV(FB, 0x04),
80 .ofuncs = &(struct nouveau_ofuncs) { 81 .base.base.ofuncs = &(struct nouveau_ofuncs) {
81 .ctor = nv04_fb_ctor, 82 .ctor = nv04_fb_ctor,
82 .dtor = _nouveau_fb_dtor, 83 .dtor = _nouveau_fb_dtor,
83 .init = nv04_fb_init, 84 .init = nv04_fb_init,
84 .fini = _nouveau_fb_fini, 85 .fini = _nouveau_fb_fini,
85 }, 86 },
86}; 87 .base.memtype = nv04_fb_memtype_valid,
88 .base.ram = &nv04_ram_oclass,
89}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv04.h b/drivers/gpu/drm/nouveau/core/subdev/fb/nv04.h
new file mode 100644
index 000000000000..06ce71f87a74
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv04.h
@@ -0,0 +1,55 @@
1#ifndef __NVKM_FB_NV04_H__
2#define __NVKM_FB_NV04_H__
3
4#include "priv.h"
5
6struct nv04_fb_priv {
7 struct nouveau_fb base;
8};
9
10int nv04_fb_ctor(struct nouveau_object *, struct nouveau_object *,
11 struct nouveau_oclass *, void *, u32,
12 struct nouveau_object **);
13
14struct nv04_fb_impl {
15 struct nouveau_fb_impl base;
16 struct {
17 int regions;
18 void (*init)(struct nouveau_fb *, int i, u32 addr, u32 size,
19 u32 pitch, u32 flags, struct nouveau_fb_tile *);
20 void (*comp)(struct nouveau_fb *, int i, u32 size, u32 flags,
21 struct nouveau_fb_tile *);
22 void (*fini)(struct nouveau_fb *, int i,
23 struct nouveau_fb_tile *);
24 void (*prog)(struct nouveau_fb *, int i,
25 struct nouveau_fb_tile *);
26 } tile;
27};
28
29void nv10_fb_tile_init(struct nouveau_fb *, int i, u32 addr, u32 size,
30 u32 pitch, u32 flags, struct nouveau_fb_tile *);
31void nv10_fb_tile_fini(struct nouveau_fb *, int i, struct nouveau_fb_tile *);
32void nv10_fb_tile_prog(struct nouveau_fb *, int, struct nouveau_fb_tile *);
33
34void nv20_fb_tile_init(struct nouveau_fb *, int i, u32 addr, u32 size,
35 u32 pitch, u32 flags, struct nouveau_fb_tile *);
36void nv20_fb_tile_fini(struct nouveau_fb *, int i, struct nouveau_fb_tile *);
37void nv20_fb_tile_prog(struct nouveau_fb *, int, struct nouveau_fb_tile *);
38
39int nv30_fb_init(struct nouveau_object *);
40void nv30_fb_tile_init(struct nouveau_fb *, int i, u32 addr, u32 size,
41 u32 pitch, u32 flags, struct nouveau_fb_tile *);
42
43void nv40_fb_tile_comp(struct nouveau_fb *, int i, u32 size, u32 flags,
44 struct nouveau_fb_tile *);
45
46int nv41_fb_init(struct nouveau_object *);
47void nv41_fb_tile_prog(struct nouveau_fb *, int, struct nouveau_fb_tile *);
48
49int nv44_fb_init(struct nouveau_object *);
50void nv44_fb_tile_prog(struct nouveau_fb *, int, struct nouveau_fb_tile *);
51
52void nv46_fb_tile_init(struct nouveau_fb *, int i, u32 addr, u32 size,
53 u32 pitch, u32 flags, struct nouveau_fb_tile *);
54
55#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv10.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv10.c
index be069b5306b6..ffb7ec6d97aa 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv10.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv10.c
@@ -24,11 +24,7 @@
24 * 24 *
25 */ 25 */
26 26
27#include "priv.h" 27#include "nv04.h"
28
29struct nv10_fb_priv {
30 struct nouveau_fb base;
31};
32 28
33void 29void
34nv10_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch, 30nv10_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
@@ -57,34 +53,19 @@ nv10_fb_tile_prog(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
57 nv_rd32(pfb, 0x100240 + (i * 0x10)); 53 nv_rd32(pfb, 0x100240 + (i * 0x10));
58} 54}
59 55
60static int 56struct nouveau_oclass *
61nv10_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine, 57nv10_fb_oclass = &(struct nv04_fb_impl) {
62 struct nouveau_oclass *oclass, void *data, u32 size, 58 .base.base.handle = NV_SUBDEV(FB, 0x10),
63 struct nouveau_object **pobject) 59 .base.base.ofuncs = &(struct nouveau_ofuncs) {
64{ 60 .ctor = nv04_fb_ctor,
65 struct nv10_fb_priv *priv;
66 int ret;
67
68 ret = nouveau_fb_create(parent, engine, oclass, &nv10_ram_oclass, &priv);
69 *pobject = nv_object(priv);
70 if (ret)
71 return ret;
72
73 priv->base.memtype_valid = nv04_fb_memtype_valid;
74 priv->base.tile.regions = 8;
75 priv->base.tile.init = nv10_fb_tile_init;
76 priv->base.tile.fini = nv10_fb_tile_fini;
77 priv->base.tile.prog = nv10_fb_tile_prog;
78 return 0;
79}
80
81struct nouveau_oclass
82nv10_fb_oclass = {
83 .handle = NV_SUBDEV(FB, 0x10),
84 .ofuncs = &(struct nouveau_ofuncs) {
85 .ctor = nv10_fb_ctor,
86 .dtor = _nouveau_fb_dtor, 61 .dtor = _nouveau_fb_dtor,
87 .init = _nouveau_fb_init, 62 .init = _nouveau_fb_init,
88 .fini = _nouveau_fb_fini, 63 .fini = _nouveau_fb_fini,
89 }, 64 },
90}; 65 .base.memtype = nv04_fb_memtype_valid,
66 .base.ram = &nv10_ram_oclass,
67 .tile.regions = 8,
68 .tile.init = nv10_fb_tile_init,
69 .tile.fini = nv10_fb_tile_fini,
70 .tile.prog = nv10_fb_tile_prog,
71}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c
index 57a2af0079b3..9159a5ccee93 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c
@@ -24,40 +24,21 @@
24 * 24 *
25 */ 25 */
26 26
27#include "priv.h" 27#include "nv04.h"
28 28
29struct nv1a_fb_priv { 29struct nouveau_oclass *
30 struct nouveau_fb base; 30nv1a_fb_oclass = &(struct nv04_fb_impl) {
31}; 31 .base.base.handle = NV_SUBDEV(FB, 0x1a),
32 32 .base.base.ofuncs = &(struct nouveau_ofuncs) {
33static int 33 .ctor = nv04_fb_ctor,
34nv1a_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
35 struct nouveau_oclass *oclass, void *data, u32 size,
36 struct nouveau_object **pobject)
37{
38 struct nv1a_fb_priv *priv;
39 int ret;
40
41 ret = nouveau_fb_create(parent, engine, oclass, &nv1a_ram_oclass, &priv);
42 *pobject = nv_object(priv);
43 if (ret)
44 return ret;
45
46 priv->base.memtype_valid = nv04_fb_memtype_valid;
47 priv->base.tile.regions = 8;
48 priv->base.tile.init = nv10_fb_tile_init;
49 priv->base.tile.fini = nv10_fb_tile_fini;
50 priv->base.tile.prog = nv10_fb_tile_prog;
51 return 0;
52}
53
54struct nouveau_oclass
55nv1a_fb_oclass = {
56 .handle = NV_SUBDEV(FB, 0x1a),
57 .ofuncs = &(struct nouveau_ofuncs) {
58 .ctor = nv1a_fb_ctor,
59 .dtor = _nouveau_fb_dtor, 34 .dtor = _nouveau_fb_dtor,
60 .init = _nouveau_fb_init, 35 .init = _nouveau_fb_init,
61 .fini = _nouveau_fb_fini, 36 .fini = _nouveau_fb_fini,
62 }, 37 },
63}; 38 .base.memtype = nv04_fb_memtype_valid,
39 .base.ram = &nv10_ram_oclass,
40 .tile.regions = 8,
41 .tile.init = nv10_fb_tile_init,
42 .tile.fini = nv10_fb_tile_fini,
43 .tile.prog = nv10_fb_tile_prog,
44}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv20.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv20.c
index b18c4e63bb47..f003c1b1893f 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv20.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv20.c
@@ -24,11 +24,7 @@
24 * 24 *
25 */ 25 */
26 26
27#include "priv.h" 27#include "nv04.h"
28
29struct nv20_fb_priv {
30 struct nouveau_fb base;
31};
32 28
33void 29void
34nv20_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch, 30nv20_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
@@ -80,35 +76,20 @@ nv20_fb_tile_prog(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
80 nv_wr32(pfb, 0x100300 + (i * 0x04), tile->zcomp); 76 nv_wr32(pfb, 0x100300 + (i * 0x04), tile->zcomp);
81} 77}
82 78
83static int 79struct nouveau_oclass *
84nv20_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine, 80nv20_fb_oclass = &(struct nv04_fb_impl) {
85 struct nouveau_oclass *oclass, void *data, u32 size, 81 .base.base.handle = NV_SUBDEV(FB, 0x20),
86 struct nouveau_object **pobject) 82 .base.base.ofuncs = &(struct nouveau_ofuncs) {
87{ 83 .ctor = nv04_fb_ctor,
88 struct nv20_fb_priv *priv;
89 int ret;
90
91 ret = nouveau_fb_create(parent, engine, oclass, &nv20_ram_oclass, &priv);
92 *pobject = nv_object(priv);
93 if (ret)
94 return ret;
95
96 priv->base.memtype_valid = nv04_fb_memtype_valid;
97 priv->base.tile.regions = 8;
98 priv->base.tile.init = nv20_fb_tile_init;
99 priv->base.tile.comp = nv20_fb_tile_comp;
100 priv->base.tile.fini = nv20_fb_tile_fini;
101 priv->base.tile.prog = nv20_fb_tile_prog;
102 return 0;
103}
104
105struct nouveau_oclass
106nv20_fb_oclass = {
107 .handle = NV_SUBDEV(FB, 0x20),
108 .ofuncs = &(struct nouveau_ofuncs) {
109 .ctor = nv20_fb_ctor,
110 .dtor = _nouveau_fb_dtor, 84 .dtor = _nouveau_fb_dtor,
111 .init = _nouveau_fb_init, 85 .init = _nouveau_fb_init,
112 .fini = _nouveau_fb_fini, 86 .fini = _nouveau_fb_fini,
113 }, 87 },
114}; 88 .base.memtype = nv04_fb_memtype_valid,
89 .base.ram = &nv20_ram_oclass,
90 .tile.regions = 8,
91 .tile.init = nv20_fb_tile_init,
92 .tile.comp = nv20_fb_tile_comp,
93 .tile.fini = nv20_fb_tile_fini,
94 .tile.prog = nv20_fb_tile_prog,
95}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv25.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv25.c
index 32ccabf10c45..f34f4223210b 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv25.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv25.c
@@ -24,11 +24,7 @@
24 * 24 *
25 */ 25 */
26 26
27#include "priv.h" 27#include "nv04.h"
28
29struct nv25_fb_priv {
30 struct nouveau_fb base;
31};
32 28
33static void 29static void
34nv25_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags, 30nv25_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags,
@@ -46,35 +42,20 @@ nv25_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags,
46 } 42 }
47} 43}
48 44
49static int 45struct nouveau_oclass *
50nv25_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine, 46nv25_fb_oclass = &(struct nv04_fb_impl) {
51 struct nouveau_oclass *oclass, void *data, u32 size, 47 .base.base.handle = NV_SUBDEV(FB, 0x25),
52 struct nouveau_object **pobject) 48 .base.base.ofuncs = &(struct nouveau_ofuncs) {
53{ 49 .ctor = nv04_fb_ctor,
54 struct nv25_fb_priv *priv;
55 int ret;
56
57 ret = nouveau_fb_create(parent, engine, oclass, &nv20_ram_oclass, &priv);
58 *pobject = nv_object(priv);
59 if (ret)
60 return ret;
61
62 priv->base.memtype_valid = nv04_fb_memtype_valid;
63 priv->base.tile.regions = 8;
64 priv->base.tile.init = nv20_fb_tile_init;
65 priv->base.tile.comp = nv25_fb_tile_comp;
66 priv->base.tile.fini = nv20_fb_tile_fini;
67 priv->base.tile.prog = nv20_fb_tile_prog;
68 return 0;
69}
70
71struct nouveau_oclass
72nv25_fb_oclass = {
73 .handle = NV_SUBDEV(FB, 0x25),
74 .ofuncs = &(struct nouveau_ofuncs) {
75 .ctor = nv25_fb_ctor,
76 .dtor = _nouveau_fb_dtor, 50 .dtor = _nouveau_fb_dtor,
77 .init = _nouveau_fb_init, 51 .init = _nouveau_fb_init,
78 .fini = _nouveau_fb_fini, 52 .fini = _nouveau_fb_fini,
79 }, 53 },
80}; 54 .base.memtype = nv04_fb_memtype_valid,
55 .base.ram = &nv20_ram_oclass,
56 .tile.regions = 8,
57 .tile.init = nv20_fb_tile_init,
58 .tile.comp = nv25_fb_tile_comp,
59 .tile.fini = nv20_fb_tile_fini,
60 .tile.prog = nv20_fb_tile_prog,
61}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv30.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv30.c
index bef756d43d33..69093f7151f0 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv30.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv30.c
@@ -24,11 +24,7 @@
24 * 24 *
25 */ 25 */
26 26
27#include "priv.h" 27#include "nv04.h"
28
29struct nv30_fb_priv {
30 struct nouveau_fb base;
31};
32 28
33void 29void
34nv30_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch, 30nv30_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
@@ -67,7 +63,7 @@ nv30_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags,
67} 63}
68 64
69static int 65static int
70calc_bias(struct nv30_fb_priv *priv, int k, int i, int j) 66calc_bias(struct nv04_fb_priv *priv, int k, int i, int j)
71{ 67{
72 struct nouveau_device *device = nv_device(priv); 68 struct nouveau_device *device = nv_device(priv);
73 int b = (device->chipset > 0x30 ? 69 int b = (device->chipset > 0x30 ?
@@ -78,7 +74,7 @@ calc_bias(struct nv30_fb_priv *priv, int k, int i, int j)
78} 74}
79 75
80static int 76static int
81calc_ref(struct nv30_fb_priv *priv, int l, int k, int i) 77calc_ref(struct nv04_fb_priv *priv, int l, int k, int i)
82{ 78{
83 int j, x = 0; 79 int j, x = 0;
84 80
@@ -95,7 +91,7 @@ int
95nv30_fb_init(struct nouveau_object *object) 91nv30_fb_init(struct nouveau_object *object)
96{ 92{
97 struct nouveau_device *device = nv_device(object); 93 struct nouveau_device *device = nv_device(object);
98 struct nv30_fb_priv *priv = (void *)object; 94 struct nv04_fb_priv *priv = (void *)object;
99 int ret, i, j; 95 int ret, i, j;
100 96
101 ret = nouveau_fb_init(&priv->base); 97 ret = nouveau_fb_init(&priv->base);
@@ -124,35 +120,20 @@ nv30_fb_init(struct nouveau_object *object)
124 return 0; 120 return 0;
125} 121}
126 122
127static int 123struct nouveau_oclass *
128nv30_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine, 124nv30_fb_oclass = &(struct nv04_fb_impl) {
129 struct nouveau_oclass *oclass, void *data, u32 size, 125 .base.base.handle = NV_SUBDEV(FB, 0x30),
130 struct nouveau_object **pobject) 126 .base.base.ofuncs = &(struct nouveau_ofuncs) {
131{ 127 .ctor = nv04_fb_ctor,
132 struct nv30_fb_priv *priv;
133 int ret;
134
135 ret = nouveau_fb_create(parent, engine, oclass, &nv20_ram_oclass, &priv);
136 *pobject = nv_object(priv);
137 if (ret)
138 return ret;
139
140 priv->base.memtype_valid = nv04_fb_memtype_valid;
141 priv->base.tile.regions = 8;
142 priv->base.tile.init = nv30_fb_tile_init;
143 priv->base.tile.comp = nv30_fb_tile_comp;
144 priv->base.tile.fini = nv20_fb_tile_fini;
145 priv->base.tile.prog = nv20_fb_tile_prog;
146 return 0;
147}
148
149struct nouveau_oclass
150nv30_fb_oclass = {
151 .handle = NV_SUBDEV(FB, 0x30),
152 .ofuncs = &(struct nouveau_ofuncs) {
153 .ctor = nv30_fb_ctor,
154 .dtor = _nouveau_fb_dtor, 128 .dtor = _nouveau_fb_dtor,
155 .init = nv30_fb_init, 129 .init = nv30_fb_init,
156 .fini = _nouveau_fb_fini, 130 .fini = _nouveau_fb_fini,
157 }, 131 },
158}; 132 .base.memtype = nv04_fb_memtype_valid,
133 .base.ram = &nv20_ram_oclass,
134 .tile.regions = 8,
135 .tile.init = nv30_fb_tile_init,
136 .tile.comp = nv30_fb_tile_comp,
137 .tile.fini = nv20_fb_tile_fini,
138 .tile.prog = nv20_fb_tile_prog,
139}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv35.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv35.c
index 097d8e3824f2..161b06e8fc3f 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv35.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv35.c
@@ -24,11 +24,7 @@
24 * 24 *
25 */ 25 */
26 26
27#include "priv.h" 27#include "nv04.h"
28
29struct nv35_fb_priv {
30 struct nouveau_fb base;
31};
32 28
33static void 29static void
34nv35_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags, 30nv35_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags,
@@ -47,35 +43,20 @@ nv35_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags,
47 } 43 }
48} 44}
49 45
50static int 46struct nouveau_oclass *
51nv35_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine, 47nv35_fb_oclass = &(struct nv04_fb_impl) {
52 struct nouveau_oclass *oclass, void *data, u32 size, 48 .base.base.handle = NV_SUBDEV(FB, 0x35),
53 struct nouveau_object **pobject) 49 .base.base.ofuncs = &(struct nouveau_ofuncs) {
54{ 50 .ctor = nv04_fb_ctor,
55 struct nv35_fb_priv *priv;
56 int ret;
57
58 ret = nouveau_fb_create(parent, engine, oclass, &nv20_ram_oclass, &priv);
59 *pobject = nv_object(priv);
60 if (ret)
61 return ret;
62
63 priv->base.memtype_valid = nv04_fb_memtype_valid;
64 priv->base.tile.regions = 8;
65 priv->base.tile.init = nv30_fb_tile_init;
66 priv->base.tile.comp = nv35_fb_tile_comp;
67 priv->base.tile.fini = nv20_fb_tile_fini;
68 priv->base.tile.prog = nv20_fb_tile_prog;
69 return 0;
70}
71
72struct nouveau_oclass
73nv35_fb_oclass = {
74 .handle = NV_SUBDEV(FB, 0x35),
75 .ofuncs = &(struct nouveau_ofuncs) {
76 .ctor = nv35_fb_ctor,
77 .dtor = _nouveau_fb_dtor, 51 .dtor = _nouveau_fb_dtor,
78 .init = nv30_fb_init, 52 .init = nv30_fb_init,
79 .fini = _nouveau_fb_fini, 53 .fini = _nouveau_fb_fini,
80 }, 54 },
81}; 55 .base.memtype = nv04_fb_memtype_valid,
56 .base.ram = &nv20_ram_oclass,
57 .tile.regions = 8,
58 .tile.init = nv30_fb_tile_init,
59 .tile.comp = nv35_fb_tile_comp,
60 .tile.fini = nv20_fb_tile_fini,
61 .tile.prog = nv20_fb_tile_prog,
62}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv36.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv36.c
index 9d6d9df896d9..2dd3d0aab6bb 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv36.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv36.c
@@ -24,11 +24,7 @@
24 * 24 *
25 */ 25 */
26 26
27#include "priv.h" 27#include "nv04.h"
28
29struct nv36_fb_priv {
30 struct nouveau_fb base;
31};
32 28
33static void 29static void
34nv36_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags, 30nv36_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags,
@@ -47,35 +43,20 @@ nv36_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags,
47 } 43 }
48} 44}
49 45
50static int 46struct nouveau_oclass *
51nv36_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine, 47nv36_fb_oclass = &(struct nv04_fb_impl) {
52 struct nouveau_oclass *oclass, void *data, u32 size, 48 .base.base.handle = NV_SUBDEV(FB, 0x36),
53 struct nouveau_object **pobject) 49 .base.base.ofuncs = &(struct nouveau_ofuncs) {
54{ 50 .ctor = nv04_fb_ctor,
55 struct nv36_fb_priv *priv;
56 int ret;
57
58 ret = nouveau_fb_create(parent, engine, oclass, &nv20_ram_oclass, &priv);
59 *pobject = nv_object(priv);
60 if (ret)
61 return ret;
62
63 priv->base.memtype_valid = nv04_fb_memtype_valid;
64 priv->base.tile.regions = 8;
65 priv->base.tile.init = nv30_fb_tile_init;
66 priv->base.tile.comp = nv36_fb_tile_comp;
67 priv->base.tile.fini = nv20_fb_tile_fini;
68 priv->base.tile.prog = nv20_fb_tile_prog;
69 return 0;
70}
71
72struct nouveau_oclass
73nv36_fb_oclass = {
74 .handle = NV_SUBDEV(FB, 0x36),
75 .ofuncs = &(struct nouveau_ofuncs) {
76 .ctor = nv36_fb_ctor,
77 .dtor = _nouveau_fb_dtor, 51 .dtor = _nouveau_fb_dtor,
78 .init = nv30_fb_init, 52 .init = nv30_fb_init,
79 .fini = _nouveau_fb_fini, 53 .fini = _nouveau_fb_fini,
80 }, 54 },
81}; 55 .base.memtype = nv04_fb_memtype_valid,
56 .base.ram = &nv20_ram_oclass,
57 .tile.regions = 8,
58 .tile.init = nv30_fb_tile_init,
59 .tile.comp = nv36_fb_tile_comp,
60 .tile.fini = nv20_fb_tile_fini,
61 .tile.prog = nv20_fb_tile_prog,
62}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv40.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv40.c
index 33b4393a7829..95a115ab0c86 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv40.c
@@ -24,11 +24,7 @@
24 * 24 *
25 */ 25 */
26 26
27#include "priv.h" 27#include "nv04.h"
28
29struct nv40_fb_priv {
30 struct nouveau_fb base;
31};
32 28
33void 29void
34nv40_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags, 30nv40_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags,
@@ -50,7 +46,7 @@ nv40_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags,
50static int 46static int
51nv40_fb_init(struct nouveau_object *object) 47nv40_fb_init(struct nouveau_object *object)
52{ 48{
53 struct nv40_fb_priv *priv = (void *)object; 49 struct nv04_fb_priv *priv = (void *)object;
54 int ret; 50 int ret;
55 51
56 ret = nouveau_fb_init(&priv->base); 52 ret = nouveau_fb_init(&priv->base);
@@ -61,36 +57,20 @@ nv40_fb_init(struct nouveau_object *object)
61 return 0; 57 return 0;
62} 58}
63 59
64static int 60struct nouveau_oclass *
65nv40_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine, 61nv40_fb_oclass = &(struct nv04_fb_impl) {
66 struct nouveau_oclass *oclass, void *data, u32 size, 62 .base.base.handle = NV_SUBDEV(FB, 0x40),
67 struct nouveau_object **pobject) 63 .base.base.ofuncs = &(struct nouveau_ofuncs) {
68{ 64 .ctor = nv04_fb_ctor,
69 struct nv40_fb_priv *priv;
70 int ret;
71
72 ret = nouveau_fb_create(parent, engine, oclass, &nv40_ram_oclass, &priv);
73 *pobject = nv_object(priv);
74 if (ret)
75 return ret;
76
77 priv->base.memtype_valid = nv04_fb_memtype_valid;
78 priv->base.tile.regions = 8;
79 priv->base.tile.init = nv30_fb_tile_init;
80 priv->base.tile.comp = nv40_fb_tile_comp;
81 priv->base.tile.fini = nv20_fb_tile_fini;
82 priv->base.tile.prog = nv20_fb_tile_prog;
83 return 0;
84}
85
86
87struct nouveau_oclass
88nv40_fb_oclass = {
89 .handle = NV_SUBDEV(FB, 0x40),
90 .ofuncs = &(struct nouveau_ofuncs) {
91 .ctor = nv40_fb_ctor,
92 .dtor = _nouveau_fb_dtor, 65 .dtor = _nouveau_fb_dtor,
93 .init = nv40_fb_init, 66 .init = nv40_fb_init,
94 .fini = _nouveau_fb_fini, 67 .fini = _nouveau_fb_fini,
95 }, 68 },
96}; 69 .base.memtype = nv04_fb_memtype_valid,
70 .base.ram = &nv40_ram_oclass,
71 .tile.regions = 8,
72 .tile.init = nv30_fb_tile_init,
73 .tile.comp = nv40_fb_tile_comp,
74 .tile.fini = nv20_fb_tile_fini,
75 .tile.prog = nv20_fb_tile_prog,
76}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv40.h b/drivers/gpu/drm/nouveau/core/subdev/fb/nv40.h
new file mode 100644
index 000000000000..581f808527f2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv40.h
@@ -0,0 +1,17 @@
1#ifndef __NVKM_FB_NV40_H__
2#define __NVKM_FB_NV40_H__
3
4#include "priv.h"
5
6struct nv40_ram {
7 struct nouveau_ram base;
8 u32 ctrl;
9 u32 coef;
10};
11
12
13int nv40_ram_calc(struct nouveau_fb *, u32);
14int nv40_ram_prog(struct nouveau_fb *);
15void nv40_ram_tidy(struct nouveau_fb *);
16
17#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv41.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv41.c
index 02cd83789cd4..b239a8615599 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv41.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv41.c
@@ -24,11 +24,7 @@
24 * 24 *
25 */ 25 */
26 26
27#include "priv.h" 27#include "nv04.h"
28
29struct nv41_fb_priv {
30 struct nouveau_fb base;
31};
32 28
33void 29void
34nv41_fb_tile_prog(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile) 30nv41_fb_tile_prog(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
@@ -43,7 +39,7 @@ nv41_fb_tile_prog(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
43int 39int
44nv41_fb_init(struct nouveau_object *object) 40nv41_fb_init(struct nouveau_object *object)
45{ 41{
46 struct nv41_fb_priv *priv = (void *)object; 42 struct nv04_fb_priv *priv = (void *)object;
47 int ret; 43 int ret;
48 44
49 ret = nouveau_fb_init(&priv->base); 45 ret = nouveau_fb_init(&priv->base);
@@ -54,36 +50,20 @@ nv41_fb_init(struct nouveau_object *object)
54 return 0; 50 return 0;
55} 51}
56 52
57static int 53struct nouveau_oclass *
58nv41_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine, 54nv41_fb_oclass = &(struct nv04_fb_impl) {
59 struct nouveau_oclass *oclass, void *data, u32 size, 55 .base.base.handle = NV_SUBDEV(FB, 0x41),
60 struct nouveau_object **pobject) 56 .base.base.ofuncs = &(struct nouveau_ofuncs) {
61{ 57 .ctor = nv04_fb_ctor,
62 struct nv41_fb_priv *priv;
63 int ret;
64
65 ret = nouveau_fb_create(parent, engine, oclass, &nv41_ram_oclass, &priv);
66 *pobject = nv_object(priv);
67 if (ret)
68 return ret;
69
70 priv->base.memtype_valid = nv04_fb_memtype_valid;
71 priv->base.tile.regions = 12;
72 priv->base.tile.init = nv30_fb_tile_init;
73 priv->base.tile.comp = nv40_fb_tile_comp;
74 priv->base.tile.fini = nv20_fb_tile_fini;
75 priv->base.tile.prog = nv41_fb_tile_prog;
76 return 0;
77}
78
79
80struct nouveau_oclass
81nv41_fb_oclass = {
82 .handle = NV_SUBDEV(FB, 0x41),
83 .ofuncs = &(struct nouveau_ofuncs) {
84 .ctor = nv41_fb_ctor,
85 .dtor = _nouveau_fb_dtor, 58 .dtor = _nouveau_fb_dtor,
86 .init = nv41_fb_init, 59 .init = nv41_fb_init,
87 .fini = _nouveau_fb_fini, 60 .fini = _nouveau_fb_fini,
88 }, 61 },
89}; 62 .base.memtype = nv04_fb_memtype_valid,
63 .base.ram = &nv41_ram_oclass,
64 .tile.regions = 12,
65 .tile.init = nv30_fb_tile_init,
66 .tile.comp = nv40_fb_tile_comp,
67 .tile.fini = nv20_fb_tile_fini,
68 .tile.prog = nv41_fb_tile_prog,
69}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv44.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv44.c
index c5246c29f293..d8478208a681 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv44.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv44.c
@@ -24,11 +24,7 @@
24 * 24 *
25 */ 25 */
26 26
27#include "priv.h" 27#include "nv04.h"
28
29struct nv44_fb_priv {
30 struct nouveau_fb base;
31};
32 28
33static void 29static void
34nv44_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch, 30nv44_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
@@ -52,7 +48,7 @@ nv44_fb_tile_prog(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
52int 48int
53nv44_fb_init(struct nouveau_object *object) 49nv44_fb_init(struct nouveau_object *object)
54{ 50{
55 struct nv44_fb_priv *priv = (void *)object; 51 struct nv04_fb_priv *priv = (void *)object;
56 int ret; 52 int ret;
57 53
58 ret = nouveau_fb_init(&priv->base); 54 ret = nouveau_fb_init(&priv->base);
@@ -64,35 +60,19 @@ nv44_fb_init(struct nouveau_object *object)
64 return 0; 60 return 0;
65} 61}
66 62
67static int 63struct nouveau_oclass *
68nv44_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine, 64nv44_fb_oclass = &(struct nv04_fb_impl) {
69 struct nouveau_oclass *oclass, void *data, u32 size, 65 .base.base.handle = NV_SUBDEV(FB, 0x44),
70 struct nouveau_object **pobject) 66 .base.base.ofuncs = &(struct nouveau_ofuncs) {
71{ 67 .ctor = nv04_fb_ctor,
72 struct nv44_fb_priv *priv;
73 int ret;
74
75 ret = nouveau_fb_create(parent, engine, oclass, &nv44_ram_oclass, &priv);
76 *pobject = nv_object(priv);
77 if (ret)
78 return ret;
79
80 priv->base.memtype_valid = nv04_fb_memtype_valid;
81 priv->base.tile.regions = 12;
82 priv->base.tile.init = nv44_fb_tile_init;
83 priv->base.tile.fini = nv20_fb_tile_fini;
84 priv->base.tile.prog = nv44_fb_tile_prog;
85 return 0;
86}
87
88
89struct nouveau_oclass
90nv44_fb_oclass = {
91 .handle = NV_SUBDEV(FB, 0x44),
92 .ofuncs = &(struct nouveau_ofuncs) {
93 .ctor = nv44_fb_ctor,
94 .dtor = _nouveau_fb_dtor, 68 .dtor = _nouveau_fb_dtor,
95 .init = nv44_fb_init, 69 .init = nv44_fb_init,
96 .fini = _nouveau_fb_fini, 70 .fini = _nouveau_fb_fini,
97 }, 71 },
98}; 72 .base.memtype = nv04_fb_memtype_valid,
73 .base.ram = &nv44_ram_oclass,
74 .tile.regions = 12,
75 .tile.init = nv44_fb_tile_init,
76 .tile.fini = nv20_fb_tile_fini,
77 .tile.prog = nv44_fb_tile_prog,
78}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv46.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv46.c
index e2b57909bfca..a5b77514d35b 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv46.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv46.c
@@ -24,11 +24,7 @@
24 * 24 *
25 */ 25 */
26 26
27#include "priv.h" 27#include "nv04.h"
28
29struct nv46_fb_priv {
30 struct nouveau_fb base;
31};
32 28
33void 29void
34nv46_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch, 30nv46_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
@@ -44,35 +40,19 @@ nv46_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
44 tile->pitch = pitch; 40 tile->pitch = pitch;
45} 41}
46 42
47static int 43struct nouveau_oclass *
48nv46_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine, 44nv46_fb_oclass = &(struct nv04_fb_impl) {
49 struct nouveau_oclass *oclass, void *data, u32 size, 45 .base.base.handle = NV_SUBDEV(FB, 0x46),
50 struct nouveau_object **pobject) 46 .base.base.ofuncs = &(struct nouveau_ofuncs) {
51{ 47 .ctor = nv04_fb_ctor,
52 struct nv46_fb_priv *priv;
53 int ret;
54
55 ret = nouveau_fb_create(parent, engine, oclass, &nv44_ram_oclass, &priv);
56 *pobject = nv_object(priv);
57 if (ret)
58 return ret;
59
60 priv->base.memtype_valid = nv04_fb_memtype_valid;
61 priv->base.tile.regions = 15;
62 priv->base.tile.init = nv46_fb_tile_init;
63 priv->base.tile.fini = nv20_fb_tile_fini;
64 priv->base.tile.prog = nv44_fb_tile_prog;
65 return 0;
66}
67
68
69struct nouveau_oclass
70nv46_fb_oclass = {
71 .handle = NV_SUBDEV(FB, 0x46),
72 .ofuncs = &(struct nouveau_ofuncs) {
73 .ctor = nv46_fb_ctor,
74 .dtor = _nouveau_fb_dtor, 48 .dtor = _nouveau_fb_dtor,
75 .init = nv44_fb_init, 49 .init = nv44_fb_init,
76 .fini = _nouveau_fb_fini, 50 .fini = _nouveau_fb_fini,
77 }, 51 },
78}; 52 .base.memtype = nv04_fb_memtype_valid,
53 .base.ram = &nv44_ram_oclass,
54 .tile.regions = 15,
55 .tile.init = nv46_fb_tile_init,
56 .tile.fini = nv20_fb_tile_fini,
57 .tile.prog = nv44_fb_tile_prog,
58}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv47.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv47.c
index fe6a2278621d..3bea142376bc 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv47.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv47.c
@@ -24,42 +24,22 @@
24 * 24 *
25 */ 25 */
26 26
27#include "priv.h" 27#include "nv04.h"
28 28
29struct nv47_fb_priv { 29struct nouveau_oclass *
30 struct nouveau_fb base; 30nv47_fb_oclass = &(struct nv04_fb_impl) {
31}; 31 .base.base.handle = NV_SUBDEV(FB, 0x47),
32 32 .base.base.ofuncs = &(struct nouveau_ofuncs) {
33static int 33 .ctor = nv04_fb_ctor,
34nv47_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
35 struct nouveau_oclass *oclass, void *data, u32 size,
36 struct nouveau_object **pobject)
37{
38 struct nv47_fb_priv *priv;
39 int ret;
40
41 ret = nouveau_fb_create(parent, engine, oclass, &nv41_ram_oclass, &priv);
42 *pobject = nv_object(priv);
43 if (ret)
44 return ret;
45
46 priv->base.memtype_valid = nv04_fb_memtype_valid;
47 priv->base.tile.regions = 15;
48 priv->base.tile.init = nv30_fb_tile_init;
49 priv->base.tile.comp = nv40_fb_tile_comp;
50 priv->base.tile.fini = nv20_fb_tile_fini;
51 priv->base.tile.prog = nv41_fb_tile_prog;
52 return 0;
53}
54
55
56struct nouveau_oclass
57nv47_fb_oclass = {
58 .handle = NV_SUBDEV(FB, 0x47),
59 .ofuncs = &(struct nouveau_ofuncs) {
60 .ctor = nv47_fb_ctor,
61 .dtor = _nouveau_fb_dtor, 34 .dtor = _nouveau_fb_dtor,
62 .init = nv41_fb_init, 35 .init = nv41_fb_init,
63 .fini = _nouveau_fb_fini, 36 .fini = _nouveau_fb_fini,
64 }, 37 },
65}; 38 .base.memtype = nv04_fb_memtype_valid,
39 .base.ram = &nv41_ram_oclass,
40 .tile.regions = 15,
41 .tile.init = nv30_fb_tile_init,
42 .tile.comp = nv40_fb_tile_comp,
43 .tile.fini = nv20_fb_tile_fini,
44 .tile.prog = nv41_fb_tile_prog,
45}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv49.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv49.c
index 5eca99b8c7e2..666cbd5d47f5 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv49.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv49.c
@@ -24,42 +24,22 @@
24 * 24 *
25 */ 25 */
26 26
27#include "priv.h" 27#include "nv04.h"
28 28
29struct nv49_fb_priv { 29struct nouveau_oclass *
30 struct nouveau_fb base; 30nv49_fb_oclass = &(struct nv04_fb_impl) {
31}; 31 .base.base.handle = NV_SUBDEV(FB, 0x49),
32 32 .base.base.ofuncs = &(struct nouveau_ofuncs) {
33static int 33 .ctor = nv04_fb_ctor,
34nv49_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
35 struct nouveau_oclass *oclass, void *data, u32 size,
36 struct nouveau_object **pobject)
37{
38 struct nv49_fb_priv *priv;
39 int ret;
40
41 ret = nouveau_fb_create(parent, engine, oclass, &nv49_ram_oclass, &priv);
42 *pobject = nv_object(priv);
43 if (ret)
44 return ret;
45
46 priv->base.memtype_valid = nv04_fb_memtype_valid;
47 priv->base.tile.regions = 15;
48 priv->base.tile.init = nv30_fb_tile_init;
49 priv->base.tile.comp = nv40_fb_tile_comp;
50 priv->base.tile.fini = nv20_fb_tile_fini;
51 priv->base.tile.prog = nv41_fb_tile_prog;
52 return 0;
53}
54
55
56struct nouveau_oclass
57nv49_fb_oclass = {
58 .handle = NV_SUBDEV(FB, 0x49),
59 .ofuncs = &(struct nouveau_ofuncs) {
60 .ctor = nv49_fb_ctor,
61 .dtor = _nouveau_fb_dtor, 34 .dtor = _nouveau_fb_dtor,
62 .init = nv41_fb_init, 35 .init = nv41_fb_init,
63 .fini = _nouveau_fb_fini, 36 .fini = _nouveau_fb_fini,
64 }, 37 },
65}; 38 .base.memtype = nv04_fb_memtype_valid,
39 .base.ram = &nv49_ram_oclass,
40 .tile.regions = 15,
41 .tile.init = nv30_fb_tile_init,
42 .tile.comp = nv40_fb_tile_comp,
43 .tile.fini = nv20_fb_tile_fini,
44 .tile.prog = nv41_fb_tile_prog,
45}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv4e.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv4e.c
index 1190b78a1e91..42e64f364ec1 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv4e.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv4e.c
@@ -24,40 +24,21 @@
24 * 24 *
25 */ 25 */
26 26
27#include "priv.h" 27#include "nv04.h"
28 28
29struct nv4e_fb_priv { 29struct nouveau_oclass *
30 struct nouveau_fb base; 30nv4e_fb_oclass = &(struct nv04_fb_impl) {
31}; 31 .base.base.handle = NV_SUBDEV(FB, 0x4e),
32 32 .base.base.ofuncs = &(struct nouveau_ofuncs) {
33static int 33 .ctor = nv04_fb_ctor,
34nv4e_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
35 struct nouveau_oclass *oclass, void *data, u32 size,
36 struct nouveau_object **pobject)
37{
38 struct nv4e_fb_priv *priv;
39 int ret;
40
41 ret = nouveau_fb_create(parent, engine, oclass, &nv4e_ram_oclass, &priv);
42 *pobject = nv_object(priv);
43 if (ret)
44 return ret;
45
46 priv->base.memtype_valid = nv04_fb_memtype_valid;
47 priv->base.tile.regions = 12;
48 priv->base.tile.init = nv46_fb_tile_init;
49 priv->base.tile.fini = nv20_fb_tile_fini;
50 priv->base.tile.prog = nv44_fb_tile_prog;
51 return 0;
52}
53
54struct nouveau_oclass
55nv4e_fb_oclass = {
56 .handle = NV_SUBDEV(FB, 0x4e),
57 .ofuncs = &(struct nouveau_ofuncs) {
58 .ctor = nv4e_fb_ctor,
59 .dtor = _nouveau_fb_dtor, 34 .dtor = _nouveau_fb_dtor,
60 .init = nv44_fb_init, 35 .init = nv44_fb_init,
61 .fini = _nouveau_fb_fini, 36 .fini = _nouveau_fb_fini,
62 }, 37 },
63}; 38 .base.memtype = nv04_fb_memtype_valid,
39 .base.ram = &nv4e_ram_oclass,
40 .tile.regions = 12,
41 .tile.init = nv46_fb_tile_init,
42 .tile.fini = nv20_fb_tile_fini,
43 .tile.prog = nv44_fb_tile_prog,
44}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c
index da614ec5564b..cbc7f00c1278 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c
@@ -27,14 +27,9 @@
27#include <core/engctx.h> 27#include <core/engctx.h>
28#include <core/object.h> 28#include <core/object.h>
29 29
30#include "priv.h"
31#include <subdev/bios.h> 30#include <subdev/bios.h>
32 31
33struct nv50_fb_priv { 32#include "nv50.h"
34 struct nouveau_fb base;
35 struct page *r100c08_page;
36 dma_addr_t r100c08;
37};
38 33
39int 34int
40nv50_fb_memtype[0x80] = { 35nv50_fb_memtype[0x80] = {
@@ -48,7 +43,7 @@ nv50_fb_memtype[0x80] = {
48 1, 0, 2, 0, 1, 0, 2, 0, 1, 1, 2, 2, 1, 1, 0, 0 43 1, 0, 2, 0, 1, 0, 2, 0, 1, 1, 2, 2, 1, 1, 0, 0
49}; 44};
50 45
51static bool 46bool
52nv50_fb_memtype_valid(struct nouveau_fb *pfb, u32 memtype) 47nv50_fb_memtype_valid(struct nouveau_fb *pfb, u32 memtype)
53{ 48{
54 return nv50_fb_memtype[(memtype & 0xff00) >> 8] != 0; 49 return nv50_fb_memtype[(memtype & 0xff00) >> 8] != 0;
@@ -239,7 +234,7 @@ nv50_fb_intr(struct nouveau_subdev *subdev)
239 pr_cont("0x%08x\n", st1); 234 pr_cont("0x%08x\n", st1);
240} 235}
241 236
242static int 237int
243nv50_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine, 238nv50_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
244 struct nouveau_oclass *oclass, void *data, u32 size, 239 struct nouveau_oclass *oclass, void *data, u32 size,
245 struct nouveau_object **pobject) 240 struct nouveau_object **pobject)
@@ -248,7 +243,7 @@ nv50_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
248 struct nv50_fb_priv *priv; 243 struct nv50_fb_priv *priv;
249 int ret; 244 int ret;
250 245
251 ret = nouveau_fb_create(parent, engine, oclass, &nv50_ram_oclass, &priv); 246 ret = nouveau_fb_create(parent, engine, oclass, &priv);
252 *pobject = nv_object(priv); 247 *pobject = nv_object(priv);
253 if (ret) 248 if (ret)
254 return ret; 249 return ret;
@@ -264,12 +259,11 @@ nv50_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
264 nv_warn(priv, "failed 0x100c08 page alloc\n"); 259 nv_warn(priv, "failed 0x100c08 page alloc\n");
265 } 260 }
266 261
267 priv->base.memtype_valid = nv50_fb_memtype_valid;
268 nv_subdev(priv)->intr = nv50_fb_intr; 262 nv_subdev(priv)->intr = nv50_fb_intr;
269 return 0; 263 return 0;
270} 264}
271 265
272static void 266void
273nv50_fb_dtor(struct nouveau_object *object) 267nv50_fb_dtor(struct nouveau_object *object)
274{ 268{
275 struct nouveau_device *device = nv_device(object); 269 struct nouveau_device *device = nv_device(object);
@@ -284,10 +278,10 @@ nv50_fb_dtor(struct nouveau_object *object)
284 nouveau_fb_destroy(&priv->base); 278 nouveau_fb_destroy(&priv->base);
285} 279}
286 280
287static int 281int
288nv50_fb_init(struct nouveau_object *object) 282nv50_fb_init(struct nouveau_object *object)
289{ 283{
290 struct nouveau_device *device = nv_device(object); 284 struct nv50_fb_impl *impl = (void *)object->oclass;
291 struct nv50_fb_priv *priv = (void *)object; 285 struct nv50_fb_priv *priv = (void *)object;
292 int ret; 286 int ret;
293 287
@@ -303,33 +297,20 @@ nv50_fb_init(struct nouveau_object *object)
303 297
304 /* This is needed to get meaningful information from 100c90 298 /* This is needed to get meaningful information from 100c90
305 * on traps. No idea what these values mean exactly. */ 299 * on traps. No idea what these values mean exactly. */
306 switch (device->chipset) { 300 nv_wr32(priv, 0x100c90, impl->trap);
307 case 0x50:
308 nv_wr32(priv, 0x100c90, 0x000707ff);
309 break;
310 case 0xa3:
311 case 0xa5:
312 case 0xa8:
313 nv_wr32(priv, 0x100c90, 0x000d0fff);
314 break;
315 case 0xaf:
316 nv_wr32(priv, 0x100c90, 0x089d1fff);
317 break;
318 default:
319 nv_wr32(priv, 0x100c90, 0x001d07ff);
320 break;
321 }
322
323 return 0; 301 return 0;
324} 302}
325 303
326struct nouveau_oclass 304struct nouveau_oclass *
327nv50_fb_oclass = { 305nv50_fb_oclass = &(struct nv50_fb_impl) {
328 .handle = NV_SUBDEV(FB, 0x50), 306 .base.base.handle = NV_SUBDEV(FB, 0x50),
329 .ofuncs = &(struct nouveau_ofuncs) { 307 .base.base.ofuncs = &(struct nouveau_ofuncs) {
330 .ctor = nv50_fb_ctor, 308 .ctor = nv50_fb_ctor,
331 .dtor = nv50_fb_dtor, 309 .dtor = nv50_fb_dtor,
332 .init = nv50_fb_init, 310 .init = nv50_fb_init,
333 .fini = _nouveau_fb_fini, 311 .fini = _nouveau_fb_fini,
334 }, 312 },
335}; 313 .base.memtype = nv50_fb_memtype_valid,
314 .base.ram = &nv50_ram_oclass,
315 .trap = 0x000707ff,
316}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.h b/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.h
new file mode 100644
index 000000000000..c5e5a888c607
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.h
@@ -0,0 +1,33 @@
1#ifndef __NVKM_FB_NV50_H__
2#define __NVKM_FB_NV50_H__
3
4#include "priv.h"
5
6struct nv50_fb_priv {
7 struct nouveau_fb base;
8 struct page *r100c08_page;
9 dma_addr_t r100c08;
10};
11
12int nv50_fb_ctor(struct nouveau_object *, struct nouveau_object *,
13 struct nouveau_oclass *, void *, u32,
14 struct nouveau_object **);
15void nv50_fb_dtor(struct nouveau_object *);
16int nv50_fb_init(struct nouveau_object *);
17
18struct nv50_fb_impl {
19 struct nouveau_fb_impl base;
20 u32 trap;
21};
22
23#define nv50_ram_create(p,e,o,d) \
24 nv50_ram_create_((p), (e), (o), sizeof(**d), (void **)d)
25int nv50_ram_create_(struct nouveau_object *, struct nouveau_object *,
26 struct nouveau_oclass *, int, void **);
27int nv50_ram_get(struct nouveau_fb *, u64 size, u32 align, u32 ncmin,
28 u32 memtype, struct nouveau_mem **);
29void nv50_ram_put(struct nouveau_fb *, struct nouveau_mem **);
30void __nv50_ram_put(struct nouveau_fb *, struct nouveau_mem *);
31extern int nv50_fb_memtype[0x80];
32
33#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv84.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv84.c
new file mode 100644
index 000000000000..cf0e767d3833
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv84.c
@@ -0,0 +1,39 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "nv50.h"
26
27struct nouveau_oclass *
28nv84_fb_oclass = &(struct nv50_fb_impl) {
29 .base.base.handle = NV_SUBDEV(FB, 0x84),
30 .base.base.ofuncs = &(struct nouveau_ofuncs) {
31 .ctor = nv50_fb_ctor,
32 .dtor = nv50_fb_dtor,
33 .init = nv50_fb_init,
34 .fini = _nouveau_fb_fini,
35 },
36 .base.memtype = nv50_fb_memtype_valid,
37 .base.ram = &nv50_ram_oclass,
38 .trap = 0x001d07ff,
39}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nva3.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nva3.c
new file mode 100644
index 000000000000..dab6e1c63d48
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nva3.c
@@ -0,0 +1,39 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "nv50.h"
26
27struct nouveau_oclass *
28nva3_fb_oclass = &(struct nv50_fb_impl) {
29 .base.base.handle = NV_SUBDEV(FB, 0xa3),
30 .base.base.ofuncs = &(struct nouveau_ofuncs) {
31 .ctor = nv50_fb_ctor,
32 .dtor = nv50_fb_dtor,
33 .init = nv50_fb_init,
34 .fini = _nouveau_fb_fini,
35 },
36 .base.memtype = nv50_fb_memtype_valid,
37 .base.ram = &nva3_ram_oclass,
38 .trap = 0x000d0fff,
39}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nvaa.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nvaa.c
new file mode 100644
index 000000000000..cba8e6818035
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nvaa.c
@@ -0,0 +1,39 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "nv50.h"
26
27struct nouveau_oclass *
28nvaa_fb_oclass = &(struct nv50_fb_impl) {
29 .base.base.handle = NV_SUBDEV(FB, 0xaa),
30 .base.base.ofuncs = &(struct nouveau_ofuncs) {
31 .ctor = nv50_fb_ctor,
32 .dtor = nv50_fb_dtor,
33 .init = nv50_fb_init,
34 .fini = _nouveau_fb_fini,
35 },
36 .base.memtype = nv50_fb_memtype_valid,
37 .base.ram = &nvaa_ram_oclass,
38 .trap = 0x001d07ff,
39}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nvaf.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nvaf.c
new file mode 100644
index 000000000000..5423faa2c09b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nvaf.c
@@ -0,0 +1,39 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "nv50.h"
26
27struct nouveau_oclass *
28nvaf_fb_oclass = &(struct nv50_fb_impl) {
29 .base.base.handle = NV_SUBDEV(FB, 0xaf),
30 .base.base.ofuncs = &(struct nouveau_ofuncs) {
31 .ctor = nv50_fb_ctor,
32 .dtor = nv50_fb_dtor,
33 .init = nv50_fb_init,
34 .fini = _nouveau_fb_fini,
35 },
36 .base.memtype = nv50_fb_memtype_valid,
37 .base.ram = &nvaa_ram_oclass,
38 .trap = 0x089d1fff,
39}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c
index f35d76fd746d..e5fc37c4caac 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c
@@ -22,24 +22,18 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include "priv.h" 25#include "nvc0.h"
26
27struct nvc0_fb_priv {
28 struct nouveau_fb base;
29 struct page *r100c10_page;
30 dma_addr_t r100c10;
31};
32 26
33extern const u8 nvc0_pte_storage_type_map[256]; 27extern const u8 nvc0_pte_storage_type_map[256];
34 28
35static bool 29bool
36nvc0_fb_memtype_valid(struct nouveau_fb *pfb, u32 tile_flags) 30nvc0_fb_memtype_valid(struct nouveau_fb *pfb, u32 tile_flags)
37{ 31{
38 u8 memtype = (tile_flags & 0x0000ff00) >> 8; 32 u8 memtype = (tile_flags & 0x0000ff00) >> 8;
39 return likely((nvc0_pte_storage_type_map[memtype] != 0xff)); 33 return likely((nvc0_pte_storage_type_map[memtype] != 0xff));
40} 34}
41 35
42static int 36int
43nvc0_fb_init(struct nouveau_object *object) 37nvc0_fb_init(struct nouveau_object *object)
44{ 38{
45 struct nvc0_fb_priv *priv = (void *)object; 39 struct nvc0_fb_priv *priv = (void *)object;
@@ -54,7 +48,7 @@ nvc0_fb_init(struct nouveau_object *object)
54 return 0; 48 return 0;
55} 49}
56 50
57static void 51void
58nvc0_fb_dtor(struct nouveau_object *object) 52nvc0_fb_dtor(struct nouveau_object *object)
59{ 53{
60 struct nouveau_device *device = nv_device(object); 54 struct nouveau_device *device = nv_device(object);
@@ -69,7 +63,7 @@ nvc0_fb_dtor(struct nouveau_object *object)
69 nouveau_fb_destroy(&priv->base); 63 nouveau_fb_destroy(&priv->base);
70} 64}
71 65
72static int 66int
73nvc0_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine, 67nvc0_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
74 struct nouveau_oclass *oclass, void *data, u32 size, 68 struct nouveau_oclass *oclass, void *data, u32 size,
75 struct nouveau_object **pobject) 69 struct nouveau_object **pobject)
@@ -78,13 +72,11 @@ nvc0_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
78 struct nvc0_fb_priv *priv; 72 struct nvc0_fb_priv *priv;
79 int ret; 73 int ret;
80 74
81 ret = nouveau_fb_create(parent, engine, oclass, &nvc0_ram_oclass, &priv); 75 ret = nouveau_fb_create(parent, engine, oclass, &priv);
82 *pobject = nv_object(priv); 76 *pobject = nv_object(priv);
83 if (ret) 77 if (ret)
84 return ret; 78 return ret;
85 79
86 priv->base.memtype_valid = nvc0_fb_memtype_valid;
87
88 priv->r100c10_page = alloc_page(GFP_KERNEL | __GFP_ZERO); 80 priv->r100c10_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
89 if (priv->r100c10_page) { 81 if (priv->r100c10_page) {
90 priv->r100c10 = pci_map_page(device->pdev, priv->r100c10_page, 82 priv->r100c10 = pci_map_page(device->pdev, priv->r100c10_page,
@@ -97,14 +89,15 @@ nvc0_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
97 return 0; 89 return 0;
98} 90}
99 91
100 92struct nouveau_oclass *
101struct nouveau_oclass 93nvc0_fb_oclass = &(struct nouveau_fb_impl) {
102nvc0_fb_oclass = { 94 .base.handle = NV_SUBDEV(FB, 0xc0),
103 .handle = NV_SUBDEV(FB, 0xc0), 95 .base.ofuncs = &(struct nouveau_ofuncs) {
104 .ofuncs = &(struct nouveau_ofuncs) {
105 .ctor = nvc0_fb_ctor, 96 .ctor = nvc0_fb_ctor,
106 .dtor = nvc0_fb_dtor, 97 .dtor = nvc0_fb_dtor,
107 .init = nvc0_fb_init, 98 .init = nvc0_fb_init,
108 .fini = _nouveau_fb_fini, 99 .fini = _nouveau_fb_fini,
109 }, 100 },
110}; 101 .memtype = nvc0_fb_memtype_valid,
102 .ram = &nvc0_ram_oclass,
103}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.h b/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.h
new file mode 100644
index 000000000000..9e1931eb746f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.h
@@ -0,0 +1,29 @@
1#ifndef __NVKM_RAM_NVC0_H__
2#define __NVKM_RAM_NVC0_H__
3
4#include "priv.h"
5#include "nv50.h"
6
7struct nvc0_fb_priv {
8 struct nouveau_fb base;
9 struct page *r100c10_page;
10 dma_addr_t r100c10;
11};
12
13int nvc0_fb_ctor(struct nouveau_object *, struct nouveau_object *,
14 struct nouveau_oclass *, void *, u32,
15 struct nouveau_object **);
16void nvc0_fb_dtor(struct nouveau_object *);
17int nvc0_fb_init(struct nouveau_object *);
18bool nvc0_fb_memtype_valid(struct nouveau_fb *, u32);
19
20
21#define nvc0_ram_create(p,e,o,d) \
22 nvc0_ram_create_((p), (e), (o), sizeof(**d), (void **)d)
23int nvc0_ram_create_(struct nouveau_object *, struct nouveau_object *,
24 struct nouveau_oclass *, int, void **);
25int nvc0_ram_get(struct nouveau_fb *, u64, u32, u32, u32,
26 struct nouveau_mem **);
27void nvc0_ram_put(struct nouveau_fb *, struct nouveau_mem **);
28
29#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nve0.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nve0.c
new file mode 100644
index 000000000000..595db50cfef3
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nve0.c
@@ -0,0 +1,38 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "nvc0.h"
26
27struct nouveau_oclass *
28nve0_fb_oclass = &(struct nouveau_fb_impl) {
29 .base.handle = NV_SUBDEV(FB, 0xe0),
30 .base.ofuncs = &(struct nouveau_ofuncs) {
31 .ctor = nvc0_fb_ctor,
32 .dtor = nvc0_fb_dtor,
33 .init = nvc0_fb_init,
34 .fini = _nouveau_fb_fini,
35 },
36 .memtype = nvc0_fb_memtype_valid,
37 .ram = &nve0_ram_oclass,
38}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/priv.h b/drivers/gpu/drm/nouveau/core/subdev/fb/priv.h
index db9d6ddde52c..493125214e88 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/priv.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/priv.h
@@ -12,6 +12,8 @@
12#define nouveau_ram_fini(p,s) \ 12#define nouveau_ram_fini(p,s) \
13 nouveau_object_fini(&(p)->base, (s)) 13 nouveau_object_fini(&(p)->base, (s))
14 14
15#define nouveau_ram_create_(p,e,o,s,d) \
16 nouveau_object_create_((p), (e), (o), 0, (s), (void **)d)
15#define _nouveau_ram_dtor nouveau_object_destroy 17#define _nouveau_ram_dtor nouveau_object_destroy
16#define _nouveau_ram_init nouveau_object_init 18#define _nouveau_ram_init nouveau_object_init
17#define _nouveau_ram_fini nouveau_object_fini 19#define _nouveau_ram_fini nouveau_object_fini
@@ -26,10 +28,16 @@ extern struct nouveau_oclass nv44_ram_oclass;
26extern struct nouveau_oclass nv49_ram_oclass; 28extern struct nouveau_oclass nv49_ram_oclass;
27extern struct nouveau_oclass nv4e_ram_oclass; 29extern struct nouveau_oclass nv4e_ram_oclass;
28extern struct nouveau_oclass nv50_ram_oclass; 30extern struct nouveau_oclass nv50_ram_oclass;
31extern struct nouveau_oclass nva3_ram_oclass;
32extern struct nouveau_oclass nvaa_ram_oclass;
29extern struct nouveau_oclass nvc0_ram_oclass; 33extern struct nouveau_oclass nvc0_ram_oclass;
34extern struct nouveau_oclass nve0_ram_oclass;
30 35
31#define nouveau_fb_create(p,e,c,r,d) \ 36int nouveau_sddr3_calc(struct nouveau_ram *ram);
32 nouveau_fb_create_((p), (e), (c), (r), sizeof(**d), (void **)d) 37int nouveau_gddr5_calc(struct nouveau_ram *ram);
38
39#define nouveau_fb_create(p,e,c,d) \
40 nouveau_fb_create_((p), (e), (c), sizeof(**d), (void **)d)
33#define nouveau_fb_destroy(p) ({ \ 41#define nouveau_fb_destroy(p) ({ \
34 struct nouveau_fb *pfb = (p); \ 42 struct nouveau_fb *pfb = (p); \
35 _nouveau_fb_dtor(nv_object(pfb)); \ 43 _nouveau_fb_dtor(nv_object(pfb)); \
@@ -44,44 +52,21 @@ extern struct nouveau_oclass nvc0_ram_oclass;
44}) 52})
45 53
46int nouveau_fb_create_(struct nouveau_object *, struct nouveau_object *, 54int nouveau_fb_create_(struct nouveau_object *, struct nouveau_object *,
47 struct nouveau_oclass *, struct nouveau_oclass *, 55 struct nouveau_oclass *, int, void **);
48 int length, void **pobject);
49void _nouveau_fb_dtor(struct nouveau_object *); 56void _nouveau_fb_dtor(struct nouveau_object *);
50int _nouveau_fb_init(struct nouveau_object *); 57int _nouveau_fb_init(struct nouveau_object *);
51int _nouveau_fb_fini(struct nouveau_object *, bool); 58int _nouveau_fb_fini(struct nouveau_object *, bool);
52 59
53struct nouveau_bios; 60struct nouveau_fb_impl {
54int nouveau_fb_bios_memtype(struct nouveau_bios *); 61 struct nouveau_oclass base;
62 struct nouveau_oclass *ram;
63 bool (*memtype)(struct nouveau_fb *, u32);
64};
55 65
56bool nv04_fb_memtype_valid(struct nouveau_fb *, u32 memtype); 66bool nv04_fb_memtype_valid(struct nouveau_fb *, u32 memtype);
67bool nv50_fb_memtype_valid(struct nouveau_fb *, u32 memtype);
57 68
58void nv10_fb_tile_init(struct nouveau_fb *, int i, u32 addr, u32 size, 69struct nouveau_bios;
59 u32 pitch, u32 flags, struct nouveau_fb_tile *); 70int nouveau_fb_bios_memtype(struct nouveau_bios *);
60void nv10_fb_tile_fini(struct nouveau_fb *, int i, struct nouveau_fb_tile *);
61void nv10_fb_tile_prog(struct nouveau_fb *, int, struct nouveau_fb_tile *);
62
63void nv20_fb_tile_init(struct nouveau_fb *, int i, u32 addr, u32 size,
64 u32 pitch, u32 flags, struct nouveau_fb_tile *);
65void nv20_fb_tile_fini(struct nouveau_fb *, int i, struct nouveau_fb_tile *);
66void nv20_fb_tile_prog(struct nouveau_fb *, int, struct nouveau_fb_tile *);
67
68int nv30_fb_init(struct nouveau_object *);
69void nv30_fb_tile_init(struct nouveau_fb *, int i, u32 addr, u32 size,
70 u32 pitch, u32 flags, struct nouveau_fb_tile *);
71
72void nv40_fb_tile_comp(struct nouveau_fb *, int i, u32 size, u32 flags,
73 struct nouveau_fb_tile *);
74
75int nv41_fb_init(struct nouveau_object *);
76void nv41_fb_tile_prog(struct nouveau_fb *, int, struct nouveau_fb_tile *);
77
78int nv44_fb_init(struct nouveau_object *);
79void nv44_fb_tile_prog(struct nouveau_fb *, int, struct nouveau_fb_tile *);
80
81void nv46_fb_tile_init(struct nouveau_fb *, int i, u32 addr, u32 size,
82 u32 pitch, u32 flags, struct nouveau_fb_tile *);
83
84void __nv50_ram_put(struct nouveau_fb *, struct nouveau_mem *);
85extern int nv50_fb_memtype[0x80];
86 71
87#endif 72#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramfuc.h b/drivers/gpu/drm/nouveau/core/subdev/fb/ramfuc.h
new file mode 100644
index 000000000000..0f57fcfe0bbf
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramfuc.h
@@ -0,0 +1,118 @@
1#ifndef __NVKM_FBRAM_FUC_H__
2#define __NVKM_FBRAM_FUC_H__
3
4#include <subdev/pwr.h>
5
6struct ramfuc {
7 struct nouveau_memx *memx;
8 struct nouveau_fb *pfb;
9 int sequence;
10};
11
12struct ramfuc_reg {
13 int sequence;
14 bool force;
15 u32 addr[2];
16 u32 data;
17};
18
19static inline struct ramfuc_reg
20ramfuc_reg2(u32 addr1, u32 addr2)
21{
22 return (struct ramfuc_reg) {
23 .sequence = 0,
24 .addr = { addr1, addr2 },
25 .data = 0xdeadbeef,
26 };
27}
28
29static inline struct ramfuc_reg
30ramfuc_reg(u32 addr)
31{
32 return ramfuc_reg2(addr, addr);
33}
34
35static inline int
36ramfuc_init(struct ramfuc *ram, struct nouveau_fb *pfb)
37{
38 struct nouveau_pwr *ppwr = nouveau_pwr(pfb);
39 int ret;
40
41 ret = nouveau_memx_init(ppwr, &ram->memx);
42 if (ret)
43 return ret;
44
45 ram->sequence++;
46 ram->pfb = pfb;
47 return 0;
48}
49
50static inline int
51ramfuc_exec(struct ramfuc *ram, bool exec)
52{
53 int ret = 0;
54 if (ram->pfb) {
55 ret = nouveau_memx_fini(&ram->memx, exec);
56 ram->pfb = NULL;
57 }
58 return ret;
59}
60
61static inline u32
62ramfuc_rd32(struct ramfuc *ram, struct ramfuc_reg *reg)
63{
64 if (reg->sequence != ram->sequence)
65 reg->data = nv_rd32(ram->pfb, reg->addr[0]);
66 return reg->data;
67}
68
69static inline void
70ramfuc_wr32(struct ramfuc *ram, struct ramfuc_reg *reg, u32 data)
71{
72 reg->sequence = ram->sequence;
73 reg->data = data;
74 if (reg->addr[0] != reg->addr[1])
75 nouveau_memx_wr32(ram->memx, reg->addr[1], reg->data);
76 nouveau_memx_wr32(ram->memx, reg->addr[0], reg->data);
77}
78
79static inline void
80ramfuc_nuke(struct ramfuc *ram, struct ramfuc_reg *reg)
81{
82 reg->force = true;
83}
84
85static inline u32
86ramfuc_mask(struct ramfuc *ram, struct ramfuc_reg *reg, u32 mask, u32 data)
87{
88 u32 temp = ramfuc_rd32(ram, reg);
89 if (temp != ((temp & ~mask) | data) || reg->force) {
90 ramfuc_wr32(ram, reg, (temp & ~mask) | data);
91 reg->force = false;
92 }
93 return temp;
94}
95
96static inline void
97ramfuc_wait(struct ramfuc *ram, u32 addr, u32 mask, u32 data, u32 nsec)
98{
99 nouveau_memx_wait(ram->memx, addr, mask, data, nsec);
100}
101
102static inline void
103ramfuc_nsec(struct ramfuc *ram, u32 nsec)
104{
105 nouveau_memx_nsec(ram->memx, nsec);
106}
107
108#define ram_init(s,p) ramfuc_init(&(s)->base, (p))
109#define ram_exec(s,e) ramfuc_exec(&(s)->base, (e))
110#define ram_have(s,r) ((s)->r_##r.addr != 0x000000)
111#define ram_rd32(s,r) ramfuc_rd32(&(s)->base, &(s)->r_##r)
112#define ram_wr32(s,r,d) ramfuc_wr32(&(s)->base, &(s)->r_##r, (d))
113#define ram_nuke(s,r) ramfuc_nuke(&(s)->base, &(s)->r_##r)
114#define ram_mask(s,r,m,d) ramfuc_mask(&(s)->base, &(s)->r_##r, (m), (d))
115#define ram_wait(s,r,m,d,n) ramfuc_wait(&(s)->base, (r), (m), (d), (n))
116#define ram_nsec(s,n) ramfuc_nsec(&(s)->base, (n))
117
118#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv40.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv40.c
index ee49ac4dbdb6..7648beb11199 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv40.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv40.c
@@ -22,7 +22,154 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include "priv.h" 25#include <subdev/bios.h>
26#include <subdev/bios/bit.h>
27#include <subdev/bios/pll.h>
28#include <subdev/bios/init.h>
29#include <subdev/clock.h>
30#include <subdev/clock/pll.h>
31#include <subdev/timer.h>
32
33#include <engine/fifo.h>
34
35#include "nv40.h"
36
37int
38nv40_ram_calc(struct nouveau_fb *pfb, u32 freq)
39{
40 struct nouveau_bios *bios = nouveau_bios(pfb);
41 struct nv40_ram *ram = (void *)pfb->ram;
42 struct nvbios_pll pll;
43 int N1, M1, N2, M2;
44 int log2P, ret;
45
46 ret = nvbios_pll_parse(bios, 0x04, &pll);
47 if (ret) {
48 nv_error(pfb, "mclk pll data not found\n");
49 return ret;
50 }
51
52 ret = nv04_pll_calc(nv_subdev(pfb), &pll, freq,
53 &N1, &M1, &N2, &M2, &log2P);
54 if (ret < 0)
55 return ret;
56
57 ram->ctrl = 0x80000000 | (log2P << 16);
58 ram->ctrl |= min(pll.bias_p + log2P, (int)pll.max_p) << 20;
59 if (N2 == M2) {
60 ram->ctrl |= 0x00000100;
61 ram->coef = (N1 << 8) | M1;
62 } else {
63 ram->ctrl |= 0x40000000;
64 ram->coef = (N2 << 24) | (M2 << 16) | (N1 << 8) | M1;
65 }
66
67 return 0;
68}
69
70int
71nv40_ram_prog(struct nouveau_fb *pfb)
72{
73 struct nouveau_bios *bios = nouveau_bios(pfb);
74 struct nv40_ram *ram = (void *)pfb->ram;
75 struct bit_entry M;
76 u32 crtc_mask = 0;
77 u8 sr1[2];
78 int i;
79
80 /* determine which CRTCs are active, fetch VGA_SR1 for each */
81 for (i = 0; i < 2; i++) {
82 u32 vbl = nv_rd32(pfb, 0x600808 + (i * 0x2000));
83 u32 cnt = 0;
84 do {
85 if (vbl != nv_rd32(pfb, 0x600808 + (i * 0x2000))) {
86 nv_wr08(pfb, 0x0c03c4 + (i * 0x2000), 0x01);
87 sr1[i] = nv_rd08(pfb, 0x0c03c5 + (i * 0x2000));
88 if (!(sr1[i] & 0x20))
89 crtc_mask |= (1 << i);
90 break;
91 }
92 udelay(1);
93 } while (cnt++ < 32);
94 }
95
96 /* wait for vblank start on active crtcs, disable memory access */
97 for (i = 0; i < 2; i++) {
98 if (!(crtc_mask & (1 << i)))
99 continue;
100 nv_wait(pfb, 0x600808 + (i * 0x2000), 0x00010000, 0x00000000);
101 nv_wait(pfb, 0x600808 + (i * 0x2000), 0x00010000, 0x00010000);
102 nv_wr08(pfb, 0x0c03c4 + (i * 0x2000), 0x01);
103 nv_wr08(pfb, 0x0c03c5 + (i * 0x2000), sr1[i] | 0x20);
104 }
105
106 /* prepare ram for reclocking */
107 nv_wr32(pfb, 0x1002d4, 0x00000001); /* precharge */
108 nv_wr32(pfb, 0x1002d0, 0x00000001); /* refresh */
109 nv_wr32(pfb, 0x1002d0, 0x00000001); /* refresh */
110 nv_mask(pfb, 0x100210, 0x80000000, 0x00000000); /* no auto refresh */
111 nv_wr32(pfb, 0x1002dc, 0x00000001); /* enable self-refresh */
112
113 /* change the PLL of each memory partition */
114 nv_mask(pfb, 0x00c040, 0x0000c000, 0x00000000);
115 switch (nv_device(pfb)->chipset) {
116 case 0x40:
117 case 0x45:
118 case 0x41:
119 case 0x42:
120 case 0x47:
121 nv_mask(pfb, 0x004044, 0xc0771100, ram->ctrl);
122 nv_mask(pfb, 0x00402c, 0xc0771100, ram->ctrl);
123 nv_wr32(pfb, 0x004048, ram->coef);
124 nv_wr32(pfb, 0x004030, ram->coef);
125 case 0x43:
126 case 0x49:
127 case 0x4b:
128 nv_mask(pfb, 0x004038, 0xc0771100, ram->ctrl);
129 nv_wr32(pfb, 0x00403c, ram->coef);
130 default:
131 nv_mask(pfb, 0x004020, 0xc0771100, ram->ctrl);
132 nv_wr32(pfb, 0x004024, ram->coef);
133 break;
134 }
135 udelay(100);
136 nv_mask(pfb, 0x00c040, 0x0000c000, 0x0000c000);
137
138 /* re-enable normal operation of memory controller */
139 nv_wr32(pfb, 0x1002dc, 0x00000000);
140 nv_mask(pfb, 0x100210, 0x80000000, 0x80000000);
141 udelay(100);
142
143 /* execute memory reset script from vbios */
144 if (!bit_entry(bios, 'M', &M)) {
145 struct nvbios_init init = {
146 .subdev = nv_subdev(pfb),
147 .bios = bios,
148 .offset = nv_ro16(bios, M.offset + 0x00),
149 .execute = 1,
150 };
151
152 nvbios_exec(&init);
153 }
154
155 /* make sure we're in vblank (hopefully the same one as before), and
156 * then re-enable crtc memory access
157 */
158 for (i = 0; i < 2; i++) {
159 if (!(crtc_mask & (1 << i)))
160 continue;
161 nv_wait(pfb, 0x600808 + (i * 0x2000), 0x00010000, 0x00010000);
162 nv_wr08(pfb, 0x0c03c4 + (i * 0x2000), 0x01);
163 nv_wr08(pfb, 0x0c03c5 + (i * 0x2000), sr1[i]);
164 }
165
166 return 0;
167}
168
169void
170nv40_ram_tidy(struct nouveau_fb *pfb)
171{
172}
26 173
27static int 174static int
28nv40_ram_create(struct nouveau_object *parent, struct nouveau_object *engine, 175nv40_ram_create(struct nouveau_object *parent, struct nouveau_object *engine,
@@ -30,7 +177,7 @@ nv40_ram_create(struct nouveau_object *parent, struct nouveau_object *engine,
30 struct nouveau_object **pobject) 177 struct nouveau_object **pobject)
31{ 178{
32 struct nouveau_fb *pfb = nouveau_fb(parent); 179 struct nouveau_fb *pfb = nouveau_fb(parent);
33 struct nouveau_ram *ram; 180 struct nv40_ram *ram;
34 u32 pbus1218 = nv_rd32(pfb, 0x001218); 181 u32 pbus1218 = nv_rd32(pfb, 0x001218);
35 int ret; 182 int ret;
36 183
@@ -40,15 +187,18 @@ nv40_ram_create(struct nouveau_object *parent, struct nouveau_object *engine,
40 return ret; 187 return ret;
41 188
42 switch (pbus1218 & 0x00000300) { 189 switch (pbus1218 & 0x00000300) {
43 case 0x00000000: ram->type = NV_MEM_TYPE_SDRAM; break; 190 case 0x00000000: ram->base.type = NV_MEM_TYPE_SDRAM; break;
44 case 0x00000100: ram->type = NV_MEM_TYPE_DDR1; break; 191 case 0x00000100: ram->base.type = NV_MEM_TYPE_DDR1; break;
45 case 0x00000200: ram->type = NV_MEM_TYPE_GDDR3; break; 192 case 0x00000200: ram->base.type = NV_MEM_TYPE_GDDR3; break;
46 case 0x00000300: ram->type = NV_MEM_TYPE_DDR2; break; 193 case 0x00000300: ram->base.type = NV_MEM_TYPE_DDR2; break;
47 } 194 }
48 195
49 ram->size = nv_rd32(pfb, 0x10020c) & 0xff000000; 196 ram->base.size = nv_rd32(pfb, 0x10020c) & 0xff000000;
50 ram->parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1; 197 ram->base.parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1;
51 ram->tags = nv_rd32(pfb, 0x100320); 198 ram->base.tags = nv_rd32(pfb, 0x100320);
199 ram->base.calc = nv40_ram_calc;
200 ram->base.prog = nv40_ram_prog;
201 ram->base.tidy = nv40_ram_tidy;
52 return 0; 202 return 0;
53} 203}
54 204
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv41.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv41.c
index 1dab7e12abab..d64498a4d9ee 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv41.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv41.c
@@ -22,7 +22,7 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include "priv.h" 25#include "nv40.h"
26 26
27static int 27static int
28nv41_ram_create(struct nouveau_object *parent, struct nouveau_object *engine, 28nv41_ram_create(struct nouveau_object *parent, struct nouveau_object *engine,
@@ -30,7 +30,7 @@ nv41_ram_create(struct nouveau_object *parent, struct nouveau_object *engine,
30 struct nouveau_object **pobject) 30 struct nouveau_object **pobject)
31{ 31{
32 struct nouveau_fb *pfb = nouveau_fb(parent); 32 struct nouveau_fb *pfb = nouveau_fb(parent);
33 struct nouveau_ram *ram; 33 struct nv40_ram *ram;
34 u32 pfb474 = nv_rd32(pfb, 0x100474); 34 u32 pfb474 = nv_rd32(pfb, 0x100474);
35 int ret; 35 int ret;
36 36
@@ -40,15 +40,18 @@ nv41_ram_create(struct nouveau_object *parent, struct nouveau_object *engine,
40 return ret; 40 return ret;
41 41
42 if (pfb474 & 0x00000004) 42 if (pfb474 & 0x00000004)
43 ram->type = NV_MEM_TYPE_GDDR3; 43 ram->base.type = NV_MEM_TYPE_GDDR3;
44 if (pfb474 & 0x00000002) 44 if (pfb474 & 0x00000002)
45 ram->type = NV_MEM_TYPE_DDR2; 45 ram->base.type = NV_MEM_TYPE_DDR2;
46 if (pfb474 & 0x00000001) 46 if (pfb474 & 0x00000001)
47 ram->type = NV_MEM_TYPE_DDR1; 47 ram->base.type = NV_MEM_TYPE_DDR1;
48 48
49 ram->size = nv_rd32(pfb, 0x10020c) & 0xff000000; 49 ram->base.size = nv_rd32(pfb, 0x10020c) & 0xff000000;
50 ram->parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1; 50 ram->base.parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1;
51 ram->tags = nv_rd32(pfb, 0x100320); 51 ram->base.tags = nv_rd32(pfb, 0x100320);
52 ram->base.calc = nv40_ram_calc;
53 ram->base.prog = nv40_ram_prog;
54 ram->base.tidy = nv40_ram_tidy;
52 return 0; 55 return 0;
53} 56}
54 57
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv44.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv44.c
index 25fff842e5c1..089acac810c5 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv44.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv44.c
@@ -22,7 +22,7 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include "priv.h" 25#include "nv40.h"
26 26
27static int 27static int
28nv44_ram_create(struct nouveau_object *parent, struct nouveau_object *engine, 28nv44_ram_create(struct nouveau_object *parent, struct nouveau_object *engine,
@@ -30,7 +30,7 @@ nv44_ram_create(struct nouveau_object *parent, struct nouveau_object *engine,
30 struct nouveau_object **pobject) 30 struct nouveau_object **pobject)
31{ 31{
32 struct nouveau_fb *pfb = nouveau_fb(parent); 32 struct nouveau_fb *pfb = nouveau_fb(parent);
33 struct nouveau_ram *ram; 33 struct nv40_ram *ram;
34 u32 pfb474 = nv_rd32(pfb, 0x100474); 34 u32 pfb474 = nv_rd32(pfb, 0x100474);
35 int ret; 35 int ret;
36 36
@@ -40,13 +40,16 @@ nv44_ram_create(struct nouveau_object *parent, struct nouveau_object *engine,
40 return ret; 40 return ret;
41 41
42 if (pfb474 & 0x00000004) 42 if (pfb474 & 0x00000004)
43 ram->type = NV_MEM_TYPE_GDDR3; 43 ram->base.type = NV_MEM_TYPE_GDDR3;
44 if (pfb474 & 0x00000002) 44 if (pfb474 & 0x00000002)
45 ram->type = NV_MEM_TYPE_DDR2; 45 ram->base.type = NV_MEM_TYPE_DDR2;
46 if (pfb474 & 0x00000001) 46 if (pfb474 & 0x00000001)
47 ram->type = NV_MEM_TYPE_DDR1; 47 ram->base.type = NV_MEM_TYPE_DDR1;
48 48
49 ram->size = nv_rd32(pfb, 0x10020c) & 0xff000000; 49 ram->base.size = nv_rd32(pfb, 0x10020c) & 0xff000000;
50 ram->base.calc = nv40_ram_calc;
51 ram->base.prog = nv40_ram_prog;
52 ram->base.tidy = nv40_ram_tidy;
50 return 0; 53 return 0;
51} 54}
52 55
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv49.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv49.c
index ab7ef0ac9e34..baa013afa57b 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv49.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv49.c
@@ -22,7 +22,7 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include "priv.h" 25#include "nv40.h"
26 26
27static int 27static int
28nv49_ram_create(struct nouveau_object *parent, struct nouveau_object *engine, 28nv49_ram_create(struct nouveau_object *parent, struct nouveau_object *engine,
@@ -30,7 +30,7 @@ nv49_ram_create(struct nouveau_object *parent, struct nouveau_object *engine,
30 struct nouveau_object **pobject) 30 struct nouveau_object **pobject)
31{ 31{
32 struct nouveau_fb *pfb = nouveau_fb(parent); 32 struct nouveau_fb *pfb = nouveau_fb(parent);
33 struct nouveau_ram *ram; 33 struct nv40_ram *ram;
34 u32 pfb914 = nv_rd32(pfb, 0x100914); 34 u32 pfb914 = nv_rd32(pfb, 0x100914);
35 int ret; 35 int ret;
36 36
@@ -40,15 +40,18 @@ nv49_ram_create(struct nouveau_object *parent, struct nouveau_object *engine,
40 return ret; 40 return ret;
41 41
42 switch (pfb914 & 0x00000003) { 42 switch (pfb914 & 0x00000003) {
43 case 0x00000000: ram->type = NV_MEM_TYPE_DDR1; break; 43 case 0x00000000: ram->base.type = NV_MEM_TYPE_DDR1; break;
44 case 0x00000001: ram->type = NV_MEM_TYPE_DDR2; break; 44 case 0x00000001: ram->base.type = NV_MEM_TYPE_DDR2; break;
45 case 0x00000002: ram->type = NV_MEM_TYPE_GDDR3; break; 45 case 0x00000002: ram->base.type = NV_MEM_TYPE_GDDR3; break;
46 case 0x00000003: break; 46 case 0x00000003: break;
47 } 47 }
48 48
49 ram->size = nv_rd32(pfb, 0x10020c) & 0xff000000; 49 ram->base.size = nv_rd32(pfb, 0x10020c) & 0xff000000;
50 ram->parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1; 50 ram->base.parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1;
51 ram->tags = nv_rd32(pfb, 0x100320); 51 ram->base.tags = nv_rd32(pfb, 0x100320);
52 ram->base.calc = nv40_ram_calc;
53 ram->base.prog = nv40_ram_prog;
54 ram->base.tidy = nv40_ram_tidy;
52 return 0; 55 return 0;
53} 56}
54 57
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv50.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv50.c
index 903baff77fdd..76762a17d89c 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv50.c
@@ -23,8 +23,215 @@
23 */ 23 */
24 24
25#include <subdev/bios.h> 25#include <subdev/bios.h>
26#include <subdev/bios/bit.h>
27#include <subdev/bios/pll.h>
28#include <subdev/bios/perf.h>
29#include <subdev/bios/timing.h>
30#include <subdev/clock/pll.h>
31#include <subdev/fb.h>
32
33#include <core/option.h>
26#include <core/mm.h> 34#include <core/mm.h>
27#include "priv.h" 35
36#include "ramseq.h"
37
38#include "nv50.h"
39
40struct nv50_ramseq {
41 struct hwsq base;
42 struct hwsq_reg r_0x002504;
43 struct hwsq_reg r_0x004008;
44 struct hwsq_reg r_0x00400c;
45 struct hwsq_reg r_0x00c040;
46 struct hwsq_reg r_0x100210;
47 struct hwsq_reg r_0x1002d0;
48 struct hwsq_reg r_0x1002d4;
49 struct hwsq_reg r_0x1002dc;
50 struct hwsq_reg r_0x100da0[8];
51 struct hwsq_reg r_0x100e20;
52 struct hwsq_reg r_0x100e24;
53 struct hwsq_reg r_0x611200;
54 struct hwsq_reg r_timing[9];
55 struct hwsq_reg r_mr[4];
56};
57
58struct nv50_ram {
59 struct nouveau_ram base;
60 struct nv50_ramseq hwsq;
61};
62
63#define QFX5800NVA0 1
64
65static int
66nv50_ram_calc(struct nouveau_fb *pfb, u32 freq)
67{
68 struct nouveau_bios *bios = nouveau_bios(pfb);
69 struct nv50_ram *ram = (void *)pfb->ram;
70 struct nv50_ramseq *hwsq = &ram->hwsq;
71 struct nvbios_perfE perfE;
72 struct nvbios_pll mpll;
73 struct bit_entry M;
74 struct {
75 u32 data;
76 u8 size;
77 } ramcfg, timing;
78 u8 ver, hdr, cnt, strap;
79 u32 data;
80 int N1, M1, N2, M2, P;
81 int ret, i;
82
83 /* lookup closest matching performance table entry for frequency */
84 i = 0;
85 do {
86 ramcfg.data = nvbios_perfEp(bios, i++, &ver, &hdr, &cnt,
87 &ramcfg.size, &perfE);
88 if (!ramcfg.data || (ver < 0x25 || ver >= 0x40) ||
89 (ramcfg.size < 2)) {
90 nv_error(pfb, "invalid/missing perftab entry\n");
91 return -EINVAL;
92 }
93 } while (perfE.memory < freq);
94
95 /* locate specific data set for the attached memory */
96 if (bit_entry(bios, 'M', &M) || M.version != 1 || M.length < 5) {
97 nv_error(pfb, "invalid/missing memory table\n");
98 return -EINVAL;
99 }
100
101 strap = (nv_rd32(pfb, 0x101000) & 0x0000003c) >> 2;
102 data = nv_ro16(bios, M.offset + 3);
103 if (data)
104 strap = nv_ro08(bios, data + strap);
105
106 if (strap >= cnt) {
107 nv_error(pfb, "invalid ramcfg strap\n");
108 return -EINVAL;
109 }
110
111 ramcfg.data += hdr + (strap * ramcfg.size);
112
113 /* lookup memory timings, if bios says they're present */
114 strap = nv_ro08(bios, ramcfg.data + 0x01);
115 if (strap != 0xff) {
116 timing.data = nvbios_timing_entry(bios, strap, &ver, &hdr);
117 if (!timing.data || ver != 0x10 || hdr < 0x12) {
118 nv_error(pfb, "invalid/missing timing entry "
119 "%02x %04x %02x %02x\n",
120 strap, timing.data, ver, hdr);
121 return -EINVAL;
122 }
123 } else {
124 timing.data = 0;
125 }
126
127 ret = ram_init(hwsq, nv_subdev(pfb));
128 if (ret)
129 return ret;
130
131 ram_wait(hwsq, 0x01, 0x00); /* wait for !vblank */
132 ram_wait(hwsq, 0x01, 0x01); /* wait for vblank */
133 ram_wr32(hwsq, 0x611200, 0x00003300);
134 ram_wr32(hwsq, 0x002504, 0x00000001); /* block fifo */
135 ram_nsec(hwsq, 8000);
136 ram_setf(hwsq, 0x10, 0x00); /* disable fb */
137 ram_wait(hwsq, 0x00, 0x01); /* wait for fb disabled */
138
139 ram_wr32(hwsq, 0x1002d4, 0x00000001); /* precharge */
140 ram_wr32(hwsq, 0x1002d0, 0x00000001); /* refresh */
141 ram_wr32(hwsq, 0x1002d0, 0x00000001); /* refresh */
142 ram_wr32(hwsq, 0x100210, 0x00000000); /* disable auto-refresh */
143 ram_wr32(hwsq, 0x1002dc, 0x00000001); /* enable self-refresh */
144
145 ret = nvbios_pll_parse(bios, 0x004008, &mpll);
146 mpll.vco2.max_freq = 0;
147 if (ret == 0) {
148 ret = nv04_pll_calc(nv_subdev(pfb), &mpll, freq,
149 &N1, &M1, &N2, &M2, &P);
150 if (ret == 0)
151 ret = -EINVAL;
152 }
153
154 if (ret < 0)
155 return ret;
156
157 ram_mask(hwsq, 0x00c040, 0xc000c000, 0x0000c000);
158 ram_mask(hwsq, 0x004008, 0x00000200, 0x00000200);
159 ram_mask(hwsq, 0x00400c, 0x0000ffff, (N1 << 8) | M1);
160 ram_mask(hwsq, 0x004008, 0x81ff0000, 0x80000000 | (mpll.bias_p << 19) |
161 (P << 22) | (P << 16));
162#if QFX5800NVA0
163 for (i = 0; i < 8; i++)
164 ram_mask(hwsq, 0x100da0[i], 0x00000000, 0x00000000); /*XXX*/
165#endif
166 ram_nsec(hwsq, 96000); /*XXX*/
167 ram_mask(hwsq, 0x004008, 0x00002200, 0x00002000);
168
169 ram_wr32(hwsq, 0x1002dc, 0x00000000); /* disable self-refresh */
170 ram_wr32(hwsq, 0x100210, 0x80000000); /* enable auto-refresh */
171
172 ram_nsec(hwsq, 12000);
173
174 switch (ram->base.type) {
175 case NV_MEM_TYPE_DDR2:
176 ram_nuke(hwsq, mr[0]); /* force update */
177 ram_mask(hwsq, mr[0], 0x000, 0x000);
178 break;
179 case NV_MEM_TYPE_GDDR3:
180 ram_mask(hwsq, mr[2], 0x000, 0x000);
181 ram_nuke(hwsq, mr[0]); /* force update */
182 ram_mask(hwsq, mr[0], 0x000, 0x000);
183 break;
184 default:
185 break;
186 }
187
188 ram_mask(hwsq, timing[3], 0x00000000, 0x00000000); /*XXX*/
189 ram_mask(hwsq, timing[1], 0x00000000, 0x00000000); /*XXX*/
190 ram_mask(hwsq, timing[6], 0x00000000, 0x00000000); /*XXX*/
191 ram_mask(hwsq, timing[7], 0x00000000, 0x00000000); /*XXX*/
192 ram_mask(hwsq, timing[8], 0x00000000, 0x00000000); /*XXX*/
193 ram_mask(hwsq, timing[0], 0x00000000, 0x00000000); /*XXX*/
194 ram_mask(hwsq, timing[2], 0x00000000, 0x00000000); /*XXX*/
195 ram_mask(hwsq, timing[4], 0x00000000, 0x00000000); /*XXX*/
196 ram_mask(hwsq, timing[5], 0x00000000, 0x00000000); /*XXX*/
197
198 ram_mask(hwsq, timing[0], 0x00000000, 0x00000000); /*XXX*/
199
200#if QFX5800NVA0
201 ram_nuke(hwsq, 0x100e24);
202 ram_mask(hwsq, 0x100e24, 0x00000000, 0x00000000);
203 ram_nuke(hwsq, 0x100e20);
204 ram_mask(hwsq, 0x100e20, 0x00000000, 0x00000000);
205#endif
206
207 ram_mask(hwsq, mr[0], 0x100, 0x100);
208 ram_mask(hwsq, mr[0], 0x100, 0x000);
209
210 ram_setf(hwsq, 0x10, 0x01); /* enable fb */
211 ram_wait(hwsq, 0x00, 0x00); /* wait for fb enabled */
212 ram_wr32(hwsq, 0x611200, 0x00003330);
213 ram_wr32(hwsq, 0x002504, 0x00000000); /* un-block fifo */
214 return 0;
215}
216
217static int
218nv50_ram_prog(struct nouveau_fb *pfb)
219{
220 struct nouveau_device *device = nv_device(pfb);
221 struct nv50_ram *ram = (void *)pfb->ram;
222 struct nv50_ramseq *hwsq = &ram->hwsq;
223
224 ram_exec(hwsq, nouveau_boolopt(device->cfgopt, "NvMemExec", false));
225 return 0;
226}
227
228static void
229nv50_ram_tidy(struct nouveau_fb *pfb)
230{
231 struct nv50_ram *ram = (void *)pfb->ram;
232 struct nv50_ramseq *hwsq = &ram->hwsq;
233 ram_exec(hwsq, false);
234}
28 235
29void 236void
30__nv50_ram_put(struct nouveau_fb *pfb, struct nouveau_mem *mem) 237__nv50_ram_put(struct nouveau_fb *pfb, struct nouveau_mem *mem)
@@ -57,7 +264,7 @@ nv50_ram_put(struct nouveau_fb *pfb, struct nouveau_mem **pmem)
57 kfree(mem); 264 kfree(mem);
58} 265}
59 266
60static int 267int
61nv50_ram_get(struct nouveau_fb *pfb, u64 size, u32 align, u32 ncmin, 268nv50_ram_get(struct nouveau_fb *pfb, u64 size, u32 align, u32 ncmin,
62 u32 memtype, struct nouveau_mem **pmem) 269 u32 memtype, struct nouveau_mem **pmem)
63{ 270{
@@ -160,77 +367,114 @@ nv50_fb_vram_rblock(struct nouveau_fb *pfb, struct nouveau_ram *ram)
160 return rblock_size; 367 return rblock_size;
161} 368}
162 369
163static int 370int
164nv50_ram_create(struct nouveau_object *parent, struct nouveau_object *engine, 371nv50_ram_create_(struct nouveau_object *parent, struct nouveau_object *engine,
165 struct nouveau_oclass *oclass, void *data, u32 datasize, 372 struct nouveau_oclass *oclass, int length, void **pobject)
166 struct nouveau_object **pobject)
167{ 373{
168 struct nouveau_fb *pfb = nouveau_fb(parent);
169 struct nouveau_device *device = nv_device(pfb);
170 struct nouveau_bios *bios = nouveau_bios(device);
171 struct nouveau_ram *ram;
172 const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */ 374 const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */
173 const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */ 375 const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */
174 u32 size; 376 struct nouveau_bios *bios = nouveau_bios(parent);
377 struct nouveau_fb *pfb = nouveau_fb(parent);
378 struct nouveau_ram *ram;
175 int ret; 379 int ret;
176 380
177 ret = nouveau_ram_create(parent, engine, oclass, &ram); 381 ret = nouveau_ram_create_(parent, engine, oclass, length, pobject);
178 *pobject = nv_object(ram); 382 ram = *pobject;
179 if (ret) 383 if (ret)
180 return ret; 384 return ret;
181 385
182 ram->size = nv_rd32(pfb, 0x10020c); 386 ram->size = nv_rd32(pfb, 0x10020c);
183 ram->size = (ram->size & 0xffffff00) | 387 ram->size = (ram->size & 0xffffff00) | ((ram->size & 0x000000ff) << 32);
184 ((ram->size & 0x000000ff) << 32);
185
186 size = (ram->size >> 12) - rsvd_head - rsvd_tail;
187 switch (device->chipset) {
188 case 0xaa:
189 case 0xac:
190 case 0xaf: /* IGPs, no reordering, no real VRAM */
191 ret = nouveau_mm_init(&pfb->vram, rsvd_head, size, 1);
192 if (ret)
193 return ret;
194 388
195 ram->type = NV_MEM_TYPE_STOLEN; 389 switch (nv_rd32(pfb, 0x100714) & 0x00000007) {
196 ram->stolen = (u64)nv_rd32(pfb, 0x100e10) << 12; 390 case 0: ram->type = NV_MEM_TYPE_DDR1; break;
391 case 1:
392 if (nouveau_fb_bios_memtype(bios) == NV_MEM_TYPE_DDR3)
393 ram->type = NV_MEM_TYPE_DDR3;
394 else
395 ram->type = NV_MEM_TYPE_DDR2;
197 break; 396 break;
397 case 2: ram->type = NV_MEM_TYPE_GDDR3; break;
398 case 3: ram->type = NV_MEM_TYPE_GDDR4; break;
399 case 4: ram->type = NV_MEM_TYPE_GDDR5; break;
198 default: 400 default:
199 switch (nv_rd32(pfb, 0x100714) & 0x00000007) {
200 case 0: ram->type = NV_MEM_TYPE_DDR1; break;
201 case 1:
202 if (nouveau_fb_bios_memtype(bios) == NV_MEM_TYPE_DDR3)
203 ram->type = NV_MEM_TYPE_DDR3;
204 else
205 ram->type = NV_MEM_TYPE_DDR2;
206 break;
207 case 2: ram->type = NV_MEM_TYPE_GDDR3; break;
208 case 3: ram->type = NV_MEM_TYPE_GDDR4; break;
209 case 4: ram->type = NV_MEM_TYPE_GDDR5; break;
210 default:
211 break;
212 }
213
214 ret = nouveau_mm_init(&pfb->vram, rsvd_head, size,
215 nv50_fb_vram_rblock(pfb, ram) >> 12);
216 if (ret)
217 return ret;
218
219 ram->ranks = (nv_rd32(pfb, 0x100200) & 0x4) ? 2 : 1;
220 ram->tags = nv_rd32(pfb, 0x100320);
221 break; 401 break;
222 } 402 }
223 403
404 ret = nouveau_mm_init(&pfb->vram, rsvd_head, (ram->size >> 12) -
405 (rsvd_head + rsvd_tail),
406 nv50_fb_vram_rblock(pfb, ram) >> 12);
407 if (ret)
408 return ret;
409
410 ram->ranks = (nv_rd32(pfb, 0x100200) & 0x4) ? 2 : 1;
411 ram->tags = nv_rd32(pfb, 0x100320);
224 ram->get = nv50_ram_get; 412 ram->get = nv50_ram_get;
225 ram->put = nv50_ram_put; 413 ram->put = nv50_ram_put;
226 return 0; 414 return 0;
227} 415}
228 416
417static int
418nv50_ram_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
419 struct nouveau_oclass *oclass, void *data, u32 datasize,
420 struct nouveau_object **pobject)
421{
422 struct nv50_ram *ram;
423 int ret, i;
424
425 ret = nv50_ram_create(parent, engine, oclass, &ram);
426 *pobject = nv_object(ram);
427 if (ret)
428 return ret;
429
430 switch (ram->base.type) {
431 case NV_MEM_TYPE_DDR2:
432 case NV_MEM_TYPE_GDDR3:
433 ram->base.calc = nv50_ram_calc;
434 ram->base.prog = nv50_ram_prog;
435 ram->base.tidy = nv50_ram_tidy;
436 break;
437 default:
438 nv_warn(ram, "reclocking of this ram type unsupported\n");
439 return 0;
440 }
441
442 ram->hwsq.r_0x002504 = hwsq_reg(0x002504);
443 ram->hwsq.r_0x00c040 = hwsq_reg(0x00c040);
444 ram->hwsq.r_0x004008 = hwsq_reg(0x004008);
445 ram->hwsq.r_0x00400c = hwsq_reg(0x00400c);
446 ram->hwsq.r_0x100210 = hwsq_reg(0x100210);
447 ram->hwsq.r_0x1002d0 = hwsq_reg(0x1002d0);
448 ram->hwsq.r_0x1002d4 = hwsq_reg(0x1002d4);
449 ram->hwsq.r_0x1002dc = hwsq_reg(0x1002dc);
450 for (i = 0; i < 8; i++)
451 ram->hwsq.r_0x100da0[i] = hwsq_reg(0x100da0 + (i * 0x04));
452 ram->hwsq.r_0x100e20 = hwsq_reg(0x100e20);
453 ram->hwsq.r_0x100e24 = hwsq_reg(0x100e24);
454 ram->hwsq.r_0x611200 = hwsq_reg(0x611200);
455
456 for (i = 0; i < 9; i++)
457 ram->hwsq.r_timing[i] = hwsq_reg(0x100220 + (i * 0x04));
458
459 if (ram->base.ranks > 1) {
460 ram->hwsq.r_mr[0] = hwsq_reg2(0x1002c0, 0x1002c8);
461 ram->hwsq.r_mr[1] = hwsq_reg2(0x1002c4, 0x1002cc);
462 ram->hwsq.r_mr[2] = hwsq_reg2(0x1002e0, 0x1002e8);
463 ram->hwsq.r_mr[3] = hwsq_reg2(0x1002e4, 0x1002ec);
464 } else {
465 ram->hwsq.r_mr[0] = hwsq_reg(0x1002c0);
466 ram->hwsq.r_mr[1] = hwsq_reg(0x1002c4);
467 ram->hwsq.r_mr[2] = hwsq_reg(0x1002e0);
468 ram->hwsq.r_mr[3] = hwsq_reg(0x1002e4);
469 }
470
471 return 0;
472}
473
229struct nouveau_oclass 474struct nouveau_oclass
230nv50_ram_oclass = { 475nv50_ram_oclass = {
231 .handle = 0,
232 .ofuncs = &(struct nouveau_ofuncs) { 476 .ofuncs = &(struct nouveau_ofuncs) {
233 .ctor = nv50_ram_create, 477 .ctor = nv50_ram_ctor,
234 .dtor = _nouveau_ram_dtor, 478 .dtor = _nouveau_ram_dtor,
235 .init = _nouveau_ram_init, 479 .init = _nouveau_ram_init,
236 .fini = _nouveau_ram_fini, 480 .fini = _nouveau_ram_fini,
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnva3.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnva3.c
new file mode 100644
index 000000000000..f6292cd9207c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnva3.c
@@ -0,0 +1,447 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/bios.h>
26#include <subdev/bios/bit.h>
27#include <subdev/bios/pll.h>
28#include <subdev/bios/rammap.h>
29#include <subdev/bios/timing.h>
30
31#include <subdev/clock/nva3.h>
32#include <subdev/clock/pll.h>
33
34#include <core/option.h>
35
36#include "ramfuc.h"
37
38#include "nv50.h"
39
40struct nva3_ramfuc {
41 struct ramfuc base;
42 struct ramfuc_reg r_0x004000;
43 struct ramfuc_reg r_0x004004;
44 struct ramfuc_reg r_0x004018;
45 struct ramfuc_reg r_0x004128;
46 struct ramfuc_reg r_0x004168;
47 struct ramfuc_reg r_0x100200;
48 struct ramfuc_reg r_0x100210;
49 struct ramfuc_reg r_0x100220[9];
50 struct ramfuc_reg r_0x1002d0;
51 struct ramfuc_reg r_0x1002d4;
52 struct ramfuc_reg r_0x1002dc;
53 struct ramfuc_reg r_0x10053c;
54 struct ramfuc_reg r_0x1005a0;
55 struct ramfuc_reg r_0x1005a4;
56 struct ramfuc_reg r_0x100714;
57 struct ramfuc_reg r_0x100718;
58 struct ramfuc_reg r_0x10071c;
59 struct ramfuc_reg r_0x100760;
60 struct ramfuc_reg r_0x1007a0;
61 struct ramfuc_reg r_0x1007e0;
62 struct ramfuc_reg r_0x10f804;
63 struct ramfuc_reg r_0x1110e0;
64 struct ramfuc_reg r_0x111100;
65 struct ramfuc_reg r_0x111104;
66 struct ramfuc_reg r_0x611200;
67 struct ramfuc_reg r_mr[4];
68};
69
70struct nva3_ram {
71 struct nouveau_ram base;
72 struct nva3_ramfuc fuc;
73};
74
75static int
76nva3_ram_calc(struct nouveau_fb *pfb, u32 freq)
77{
78 struct nouveau_bios *bios = nouveau_bios(pfb);
79 struct nva3_ram *ram = (void *)pfb->ram;
80 struct nva3_ramfuc *fuc = &ram->fuc;
81 struct nva3_clock_info mclk;
82 struct bit_entry M;
83 u8 ver, cnt, strap;
84 u32 data;
85 struct {
86 u32 data;
87 u8 size;
88 } rammap, ramcfg, timing;
89 u32 r004018, r100760, ctrl;
90 u32 unk714, unk718, unk71c;
91 int ret;
92
93 /* lookup memory config data relevant to the target frequency */
94 rammap.data = nvbios_rammap_match(bios, freq / 1000, &ver, &rammap.size,
95 &cnt, &ramcfg.size);
96 if (!rammap.data || ver != 0x10 || rammap.size < 0x0e) {
97 nv_error(pfb, "invalid/missing rammap entry\n");
98 return -EINVAL;
99 }
100
101 /* locate specific data set for the attached memory */
102 if (bit_entry(bios, 'M', &M) || M.version != 2 || M.length < 3) {
103 nv_error(pfb, "invalid/missing memory table\n");
104 return -EINVAL;
105 }
106
107 strap = (nv_rd32(pfb, 0x101000) & 0x0000003c) >> 2;
108 data = nv_ro16(bios, M.offset + 1);
109 if (data)
110 strap = nv_ro08(bios, data + strap);
111
112 if (strap >= cnt) {
113 nv_error(pfb, "invalid ramcfg strap\n");
114 return -EINVAL;
115 }
116
117 ramcfg.data = rammap.data + rammap.size + (strap * ramcfg.size);
118 if (!ramcfg.data || ver != 0x10 || ramcfg.size < 0x0e) {
119 nv_error(pfb, "invalid/missing ramcfg entry\n");
120 return -EINVAL;
121 }
122
123 /* lookup memory timings, if bios says they're present */
124 strap = nv_ro08(bios, ramcfg.data + 0x01);
125 if (strap != 0xff) {
126 timing.data = nvbios_timing_entry(bios, strap, &ver,
127 &timing.size);
128 if (!timing.data || ver != 0x10 || timing.size < 0x19) {
129 nv_error(pfb, "invalid/missing timing entry\n");
130 return -EINVAL;
131 }
132 } else {
133 timing.data = 0;
134 }
135
136 ret = nva3_clock_info(nouveau_clock(pfb), 0x12, 0x4000, freq, &mclk);
137 if (ret < 0) {
138 nv_error(pfb, "failed mclk calculation\n");
139 return ret;
140 }
141
142 ret = ram_init(fuc, pfb);
143 if (ret)
144 return ret;
145
146 /* XXX: where the fuck does 750MHz come from? */
147 if (freq <= 750000) {
148 r004018 = 0x10000000;
149 r100760 = 0x22222222;
150 } else {
151 r004018 = 0x00000000;
152 r100760 = 0x00000000;
153 }
154
155 ctrl = ram_rd32(fuc, 0x004000);
156 if (ctrl & 0x00000008) {
157 if (mclk.pll) {
158 ram_mask(fuc, 0x004128, 0x00000101, 0x00000101);
159 ram_wr32(fuc, 0x004004, mclk.pll);
160 ram_wr32(fuc, 0x004000, (ctrl |= 0x00000001));
161 ram_wr32(fuc, 0x004000, (ctrl &= 0xffffffef));
162 ram_wait(fuc, 0x004000, 0x00020000, 0x00020000, 64000);
163 ram_wr32(fuc, 0x004000, (ctrl |= 0x00000010));
164 ram_wr32(fuc, 0x004018, 0x00005000 | r004018);
165 ram_wr32(fuc, 0x004000, (ctrl |= 0x00000004));
166 }
167 } else {
168 u32 ssel = 0x00000101;
169 if (mclk.clk)
170 ssel |= mclk.clk;
171 else
172 ssel |= 0x00080000; /* 324MHz, shouldn't matter... */
173 ram_mask(fuc, 0x004168, 0x003f3141, ctrl);
174 }
175
176 if ( (nv_ro08(bios, ramcfg.data + 0x02) & 0x10)) {
177 ram_mask(fuc, 0x111104, 0x00000600, 0x00000000);
178 } else {
179 ram_mask(fuc, 0x111100, 0x40000000, 0x40000000);
180 ram_mask(fuc, 0x111104, 0x00000180, 0x00000000);
181 }
182
183 if (!(nv_ro08(bios, rammap.data + 0x04) & 0x02))
184 ram_mask(fuc, 0x100200, 0x00000800, 0x00000000);
185 ram_wr32(fuc, 0x611200, 0x00003300);
186 if (!(nv_ro08(bios, ramcfg.data + 0x02) & 0x10))
187 ram_wr32(fuc, 0x111100, 0x4c020000); /*XXX*/
188
189 ram_wr32(fuc, 0x1002d4, 0x00000001);
190 ram_wr32(fuc, 0x1002d0, 0x00000001);
191 ram_wr32(fuc, 0x1002d0, 0x00000001);
192 ram_wr32(fuc, 0x100210, 0x00000000);
193 ram_wr32(fuc, 0x1002dc, 0x00000001);
194 ram_nsec(fuc, 2000);
195
196 ctrl = ram_rd32(fuc, 0x004000);
197 if (!(ctrl & 0x00000008) && mclk.pll) {
198 ram_wr32(fuc, 0x004000, (ctrl |= 0x00000008));
199 ram_mask(fuc, 0x1110e0, 0x00088000, 0x00088000);
200 ram_wr32(fuc, 0x004018, 0x00001000);
201 ram_wr32(fuc, 0x004000, (ctrl &= ~0x00000001));
202 ram_wr32(fuc, 0x004004, mclk.pll);
203 ram_wr32(fuc, 0x004000, (ctrl |= 0x00000001));
204 udelay(64);
205 ram_wr32(fuc, 0x004018, 0x00005000 | r004018);
206 udelay(20);
207 } else
208 if (!mclk.pll) {
209 ram_mask(fuc, 0x004168, 0x003f3040, mclk.clk);
210 ram_wr32(fuc, 0x004000, (ctrl |= 0x00000008));
211 ram_mask(fuc, 0x1110e0, 0x00088000, 0x00088000);
212 ram_wr32(fuc, 0x004018, 0x0000d000 | r004018);
213 }
214
215 if ( (nv_ro08(bios, rammap.data + 0x04) & 0x08)) {
216 u32 unk5a0 = (nv_ro16(bios, ramcfg.data + 0x05) << 8) |
217 nv_ro08(bios, ramcfg.data + 0x05);
218 u32 unk5a4 = (nv_ro16(bios, ramcfg.data + 0x07));
219 u32 unk804 = (nv_ro08(bios, ramcfg.data + 0x09) & 0xf0) << 16 |
220 (nv_ro08(bios, ramcfg.data + 0x03) & 0x0f) << 16 |
221 (nv_ro08(bios, ramcfg.data + 0x09) & 0x0f) |
222 0x80000000;
223 ram_wr32(fuc, 0x1005a0, unk5a0);
224 ram_wr32(fuc, 0x1005a4, unk5a4);
225 ram_wr32(fuc, 0x10f804, unk804);
226 ram_mask(fuc, 0x10053c, 0x00001000, 0x00000000);
227 } else {
228 ram_mask(fuc, 0x10053c, 0x00001000, 0x00001000);
229 ram_mask(fuc, 0x10f804, 0x80000000, 0x00000000);
230 ram_mask(fuc, 0x100760, 0x22222222, r100760);
231 ram_mask(fuc, 0x1007a0, 0x22222222, r100760);
232 ram_mask(fuc, 0x1007e0, 0x22222222, r100760);
233 }
234
235 if (mclk.pll) {
236 ram_mask(fuc, 0x1110e0, 0x00088000, 0x00011000);
237 ram_wr32(fuc, 0x004000, (ctrl &= ~0x00000008));
238 }
239
240 /*XXX: LEAVE */
241 ram_wr32(fuc, 0x1002dc, 0x00000000);
242 ram_wr32(fuc, 0x1002d4, 0x00000001);
243 ram_wr32(fuc, 0x100210, 0x80000000);
244 ram_nsec(fuc, 1000);
245 ram_nsec(fuc, 1000);
246
247 ram_mask(fuc, mr[2], 0x00000000, 0x00000000);
248 ram_nsec(fuc, 1000);
249 ram_nuke(fuc, mr[0]);
250 ram_mask(fuc, mr[0], 0x00000000, 0x00000000);
251 ram_nsec(fuc, 1000);
252
253 ram_mask(fuc, 0x100220[3], 0x00000000, 0x00000000);
254 ram_mask(fuc, 0x100220[1], 0x00000000, 0x00000000);
255 ram_mask(fuc, 0x100220[6], 0x00000000, 0x00000000);
256 ram_mask(fuc, 0x100220[7], 0x00000000, 0x00000000);
257 ram_mask(fuc, 0x100220[2], 0x00000000, 0x00000000);
258 ram_mask(fuc, 0x100220[4], 0x00000000, 0x00000000);
259 ram_mask(fuc, 0x100220[5], 0x00000000, 0x00000000);
260 ram_mask(fuc, 0x100220[0], 0x00000000, 0x00000000);
261 ram_mask(fuc, 0x100220[8], 0x00000000, 0x00000000);
262
263 data = (nv_ro08(bios, ramcfg.data + 0x02) & 0x08) ? 0x00000000 : 0x00001000;
264 ram_mask(fuc, 0x100200, 0x00001000, data);
265
266 unk714 = ram_rd32(fuc, 0x100714) & ~0xf0000010;
267 unk718 = ram_rd32(fuc, 0x100718) & ~0x00000100;
268 unk71c = ram_rd32(fuc, 0x10071c) & ~0x00000100;
269 if ( (nv_ro08(bios, ramcfg.data + 0x02) & 0x20))
270 unk714 |= 0xf0000000;
271 if (!(nv_ro08(bios, ramcfg.data + 0x02) & 0x04))
272 unk714 |= 0x00000010;
273 ram_wr32(fuc, 0x100714, unk714);
274
275 if (nv_ro08(bios, ramcfg.data + 0x02) & 0x01)
276 unk71c |= 0x00000100;
277 ram_wr32(fuc, 0x10071c, unk71c);
278
279 if (nv_ro08(bios, ramcfg.data + 0x02) & 0x02)
280 unk718 |= 0x00000100;
281 ram_wr32(fuc, 0x100718, unk718);
282
283 if (nv_ro08(bios, ramcfg.data + 0x02) & 0x10)
284 ram_wr32(fuc, 0x111100, 0x48000000); /*XXX*/
285
286 ram_mask(fuc, mr[0], 0x100, 0x100);
287 ram_nsec(fuc, 1000);
288 ram_mask(fuc, mr[0], 0x100, 0x000);
289 ram_nsec(fuc, 1000);
290
291 ram_nsec(fuc, 2000);
292 ram_nsec(fuc, 12000);
293
294 ram_wr32(fuc, 0x611200, 0x00003330);
295 if ( (nv_ro08(bios, rammap.data + 0x04) & 0x02))
296 ram_mask(fuc, 0x100200, 0x00000800, 0x00000800);
297 if ( (nv_ro08(bios, ramcfg.data + 0x02) & 0x10)) {
298 ram_mask(fuc, 0x111104, 0x00000180, 0x00000180);
299 ram_mask(fuc, 0x111100, 0x40000000, 0x00000000);
300 } else {
301 ram_mask(fuc, 0x111104, 0x00000600, 0x00000600);
302 }
303
304 if (mclk.pll) {
305 ram_mask(fuc, 0x004168, 0x00000001, 0x00000000);
306 ram_mask(fuc, 0x004168, 0x00000100, 0x00000000);
307 } else {
308 ram_mask(fuc, 0x004000, 0x00000001, 0x00000000);
309 ram_mask(fuc, 0x004128, 0x00000001, 0x00000000);
310 ram_mask(fuc, 0x004128, 0x00000100, 0x00000000);
311 }
312
313 return 0;
314}
315
316static int
317nva3_ram_prog(struct nouveau_fb *pfb)
318{
319 struct nouveau_device *device = nv_device(pfb);
320 struct nva3_ram *ram = (void *)pfb->ram;
321 struct nva3_ramfuc *fuc = &ram->fuc;
322 ram_exec(fuc, nouveau_boolopt(device->cfgopt, "NvMemExec", false));
323 return 0;
324}
325
326static void
327nva3_ram_tidy(struct nouveau_fb *pfb)
328{
329 struct nva3_ram *ram = (void *)pfb->ram;
330 struct nva3_ramfuc *fuc = &ram->fuc;
331 ram_exec(fuc, false);
332}
333
334static int
335nva3_ram_init(struct nouveau_object *object)
336{
337 struct nouveau_fb *pfb = (void *)object->parent;
338 struct nva3_ram *ram = (void *)object;
339 int ret, i;
340
341 ret = nouveau_ram_init(&ram->base);
342 if (ret)
343 return ret;
344
345 /* prepare for ddr link training, and load training patterns */
346 switch (ram->base.type) {
347 case NV_MEM_TYPE_DDR3: {
348 static const u32 pattern[16] = {
349 0xaaaaaaaa, 0xcccccccc, 0xdddddddd, 0xeeeeeeee,
350 0x00000000, 0x11111111, 0x44444444, 0xdddddddd,
351 0x33333333, 0x55555555, 0x77777777, 0x66666666,
352 0x99999999, 0x88888888, 0xeeeeeeee, 0xbbbbbbbb,
353 };
354
355 nv_wr32(pfb, 0x100538, 0x10001ff6); /*XXX*/
356 nv_wr32(pfb, 0x1005a8, 0x0000ffff);
357 nv_mask(pfb, 0x10f800, 0x00000001, 0x00000001);
358 for (i = 0; i < 0x30; i++) {
359 nv_wr32(pfb, 0x10f8c0, (i << 8) | i);
360 nv_wr32(pfb, 0x10f8e0, (i << 8) | i);
361 nv_wr32(pfb, 0x10f900, pattern[i % 16]);
362 nv_wr32(pfb, 0x10f920, pattern[i % 16]);
363 }
364 }
365 break;
366 default:
367 break;
368 }
369
370 return 0;
371}
372
373static int
374nva3_ram_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
375 struct nouveau_oclass *oclass, void *data, u32 datasize,
376 struct nouveau_object **pobject)
377{
378 struct nva3_ram *ram;
379 int ret, i;
380
381 ret = nv50_ram_create(parent, engine, oclass, &ram);
382 *pobject = nv_object(ram);
383 if (ret)
384 return ret;
385
386 switch (ram->base.type) {
387 case NV_MEM_TYPE_DDR3:
388 ram->base.calc = nva3_ram_calc;
389 ram->base.prog = nva3_ram_prog;
390 ram->base.tidy = nva3_ram_tidy;
391 break;
392 default:
393 nv_warn(ram, "reclocking of this ram type unsupported\n");
394 return 0;
395 }
396
397 ram->fuc.r_0x004000 = ramfuc_reg(0x004000);
398 ram->fuc.r_0x004004 = ramfuc_reg(0x004004);
399 ram->fuc.r_0x004018 = ramfuc_reg(0x004018);
400 ram->fuc.r_0x004128 = ramfuc_reg(0x004128);
401 ram->fuc.r_0x004168 = ramfuc_reg(0x004168);
402 ram->fuc.r_0x100200 = ramfuc_reg(0x100200);
403 ram->fuc.r_0x100210 = ramfuc_reg(0x100210);
404 for (i = 0; i < 9; i++)
405 ram->fuc.r_0x100220[i] = ramfuc_reg(0x100220 + (i * 4));
406 ram->fuc.r_0x1002d0 = ramfuc_reg(0x1002d0);
407 ram->fuc.r_0x1002d4 = ramfuc_reg(0x1002d4);
408 ram->fuc.r_0x1002dc = ramfuc_reg(0x1002dc);
409 ram->fuc.r_0x10053c = ramfuc_reg(0x10053c);
410 ram->fuc.r_0x1005a0 = ramfuc_reg(0x1005a0);
411 ram->fuc.r_0x1005a4 = ramfuc_reg(0x1005a4);
412 ram->fuc.r_0x100714 = ramfuc_reg(0x100714);
413 ram->fuc.r_0x100718 = ramfuc_reg(0x100718);
414 ram->fuc.r_0x10071c = ramfuc_reg(0x10071c);
415 ram->fuc.r_0x100760 = ramfuc_reg(0x100760);
416 ram->fuc.r_0x1007a0 = ramfuc_reg(0x1007a0);
417 ram->fuc.r_0x1007e0 = ramfuc_reg(0x1007e0);
418 ram->fuc.r_0x10f804 = ramfuc_reg(0x10f804);
419 ram->fuc.r_0x1110e0 = ramfuc_reg(0x1110e0);
420 ram->fuc.r_0x111100 = ramfuc_reg(0x111100);
421 ram->fuc.r_0x111104 = ramfuc_reg(0x111104);
422 ram->fuc.r_0x611200 = ramfuc_reg(0x611200);
423
424 if (ram->base.ranks > 1) {
425 ram->fuc.r_mr[0] = ramfuc_reg2(0x1002c0, 0x1002c8);
426 ram->fuc.r_mr[1] = ramfuc_reg2(0x1002c4, 0x1002cc);
427 ram->fuc.r_mr[2] = ramfuc_reg2(0x1002e0, 0x1002e8);
428 ram->fuc.r_mr[3] = ramfuc_reg2(0x1002e4, 0x1002ec);
429 } else {
430 ram->fuc.r_mr[0] = ramfuc_reg(0x1002c0);
431 ram->fuc.r_mr[1] = ramfuc_reg(0x1002c4);
432 ram->fuc.r_mr[2] = ramfuc_reg(0x1002e0);
433 ram->fuc.r_mr[3] = ramfuc_reg(0x1002e4);
434 }
435
436 return 0;
437}
438
439struct nouveau_oclass
440nva3_ram_oclass = {
441 .ofuncs = &(struct nouveau_ofuncs) {
442 .ctor = nva3_ram_ctor,
443 .dtor = _nouveau_ram_dtor,
444 .init = nva3_ram_init,
445 .fini = _nouveau_ram_fini,
446 },
447};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvaa.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvaa.c
new file mode 100644
index 000000000000..00f2ca7e44a5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvaa.c
@@ -0,0 +1,66 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "nv50.h"
26
27static int
28nvaa_ram_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
29 struct nouveau_oclass *oclass, void *data, u32 datasize,
30 struct nouveau_object **pobject)
31{
32 const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */
33 const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */
34 struct nouveau_fb *pfb = nouveau_fb(parent);
35 struct nouveau_ram *ram;
36 int ret;
37
38 ret = nouveau_ram_create(parent, engine, oclass, &ram);
39 *pobject = nv_object(ram);
40 if (ret)
41 return ret;
42
43 ram->size = nv_rd32(pfb, 0x10020c);
44 ram->size = (ram->size & 0xffffff00) | ((ram->size & 0x000000ff) << 32);
45
46 ret = nouveau_mm_init(&pfb->vram, rsvd_head, (ram->size >> 12) -
47 (rsvd_head + rsvd_tail), 1);
48 if (ret)
49 return ret;
50
51 ram->type = NV_MEM_TYPE_STOLEN;
52 ram->stolen = (u64)nv_rd32(pfb, 0x100e10) << 12;
53 ram->get = nv50_ram_get;
54 ram->put = nv50_ram_put;
55 return 0;
56}
57
58struct nouveau_oclass
59nvaa_ram_oclass = {
60 .ofuncs = &(struct nouveau_ofuncs) {
61 .ctor = nvaa_ram_ctor,
62 .dtor = _nouveau_ram_dtor,
63 .init = _nouveau_ram_init,
64 .fini = _nouveau_ram_fini,
65 },
66};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c
index cf97c4de4a6b..f464547c6bab 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c
@@ -23,9 +23,414 @@
23 */ 23 */
24 24
25#include <subdev/bios.h> 25#include <subdev/bios.h>
26#include <subdev/bios/bit.h>
27#include <subdev/bios/pll.h>
28#include <subdev/bios/rammap.h>
29#include <subdev/bios/timing.h>
26#include <subdev/ltcg.h> 30#include <subdev/ltcg.h>
27 31
28#include "priv.h" 32#include <subdev/clock.h>
33#include <subdev/clock/pll.h>
34
35#include <core/option.h>
36
37#include "ramfuc.h"
38
39#include "nvc0.h"
40
41struct nvc0_ramfuc {
42 struct ramfuc base;
43
44 struct ramfuc_reg r_0x10fe20;
45 struct ramfuc_reg r_0x10fe24;
46 struct ramfuc_reg r_0x137320;
47 struct ramfuc_reg r_0x137330;
48
49 struct ramfuc_reg r_0x132000;
50 struct ramfuc_reg r_0x132004;
51 struct ramfuc_reg r_0x132100;
52
53 struct ramfuc_reg r_0x137390;
54
55 struct ramfuc_reg r_0x10f290;
56 struct ramfuc_reg r_0x10f294;
57 struct ramfuc_reg r_0x10f298;
58 struct ramfuc_reg r_0x10f29c;
59 struct ramfuc_reg r_0x10f2a0;
60
61 struct ramfuc_reg r_0x10f300;
62 struct ramfuc_reg r_0x10f338;
63 struct ramfuc_reg r_0x10f340;
64 struct ramfuc_reg r_0x10f344;
65 struct ramfuc_reg r_0x10f348;
66
67 struct ramfuc_reg r_0x10f910;
68 struct ramfuc_reg r_0x10f914;
69
70 struct ramfuc_reg r_0x100b0c;
71 struct ramfuc_reg r_0x10f050;
72 struct ramfuc_reg r_0x10f090;
73 struct ramfuc_reg r_0x10f200;
74 struct ramfuc_reg r_0x10f210;
75 struct ramfuc_reg r_0x10f310;
76 struct ramfuc_reg r_0x10f314;
77 struct ramfuc_reg r_0x10f610;
78 struct ramfuc_reg r_0x10f614;
79 struct ramfuc_reg r_0x10f800;
80 struct ramfuc_reg r_0x10f808;
81 struct ramfuc_reg r_0x10f824;
82 struct ramfuc_reg r_0x10f830;
83 struct ramfuc_reg r_0x10f988;
84 struct ramfuc_reg r_0x10f98c;
85 struct ramfuc_reg r_0x10f990;
86 struct ramfuc_reg r_0x10f998;
87 struct ramfuc_reg r_0x10f9b0;
88 struct ramfuc_reg r_0x10f9b4;
89 struct ramfuc_reg r_0x10fb04;
90 struct ramfuc_reg r_0x10fb08;
91 struct ramfuc_reg r_0x137300;
92 struct ramfuc_reg r_0x137310;
93 struct ramfuc_reg r_0x137360;
94 struct ramfuc_reg r_0x1373ec;
95 struct ramfuc_reg r_0x1373f0;
96 struct ramfuc_reg r_0x1373f8;
97
98 struct ramfuc_reg r_0x61c140;
99 struct ramfuc_reg r_0x611200;
100
101 struct ramfuc_reg r_0x13d8f4;
102};
103
104struct nvc0_ram {
105 struct nouveau_ram base;
106 struct nvc0_ramfuc fuc;
107 struct nvbios_pll refpll;
108 struct nvbios_pll mempll;
109};
110
111static void
112nvc0_ram_train(struct nvc0_ramfuc *fuc, u32 magic)
113{
114 struct nvc0_ram *ram = container_of(fuc, typeof(*ram), fuc);
115 struct nouveau_fb *pfb = nouveau_fb(ram);
116 u32 part = nv_rd32(pfb, 0x022438), i;
117 u32 mask = nv_rd32(pfb, 0x022554);
118 u32 addr = 0x110974;
119
120 ram_wr32(fuc, 0x10f910, magic);
121 ram_wr32(fuc, 0x10f914, magic);
122
123 for (i = 0; (magic & 0x80000000) && i < part; addr += 0x1000, i++) {
124 if (mask & (1 << i))
125 continue;
126 ram_wait(fuc, addr, 0x0000000f, 0x00000000, 500000);
127 }
128}
129
130static int
131nvc0_ram_calc(struct nouveau_fb *pfb, u32 freq)
132{
133 struct nouveau_clock *clk = nouveau_clock(pfb);
134 struct nouveau_bios *bios = nouveau_bios(pfb);
135 struct nvc0_ram *ram = (void *)pfb->ram;
136 struct nvc0_ramfuc *fuc = &ram->fuc;
137 struct bit_entry M;
138 u8 ver, cnt, strap;
139 u32 data;
140 struct {
141 u32 data;
142 u8 size;
143 } rammap, ramcfg, timing;
144 int ref, div, out;
145 int from, mode;
146 int N1, M1, P;
147 int ret;
148
149 /* lookup memory config data relevant to the target frequency */
150 rammap.data = nvbios_rammap_match(bios, freq / 1000, &ver, &rammap.size,
151 &cnt, &ramcfg.size);
152 if (!rammap.data || ver != 0x10 || rammap.size < 0x0e) {
153 nv_error(pfb, "invalid/missing rammap entry\n");
154 return -EINVAL;
155 }
156
157 /* locate specific data set for the attached memory */
158 if (bit_entry(bios, 'M', &M) || M.version != 2 || M.length < 3) {
159 nv_error(pfb, "invalid/missing memory table\n");
160 return -EINVAL;
161 }
162
163 strap = (nv_rd32(pfb, 0x101000) & 0x0000003c) >> 2;
164 data = nv_ro16(bios, M.offset + 1);
165 if (data)
166 strap = nv_ro08(bios, data + strap);
167
168 if (strap >= cnt) {
169 nv_error(pfb, "invalid ramcfg strap\n");
170 return -EINVAL;
171 }
172
173 ramcfg.data = rammap.data + rammap.size + (strap * ramcfg.size);
174 if (!ramcfg.data || ver != 0x10 || ramcfg.size < 0x0e) {
175 nv_error(pfb, "invalid/missing ramcfg entry\n");
176 return -EINVAL;
177 }
178
179 /* lookup memory timings, if bios says they're present */
180 strap = nv_ro08(bios, ramcfg.data + 0x01);
181 if (strap != 0xff) {
182 timing.data = nvbios_timing_entry(bios, strap, &ver,
183 &timing.size);
184 if (!timing.data || ver != 0x10 || timing.size < 0x19) {
185 nv_error(pfb, "invalid/missing timing entry\n");
186 return -EINVAL;
187 }
188 } else {
189 timing.data = 0;
190 }
191
192 ret = ram_init(fuc, pfb);
193 if (ret)
194 return ret;
195
196 /* determine current mclk configuration */
197 from = !!(ram_rd32(fuc, 0x1373f0) & 0x00000002); /*XXX: ok? */
198
199 /* determine target mclk configuration */
200 if (!(ram_rd32(fuc, 0x137300) & 0x00000100))
201 ref = clk->read(clk, nv_clk_src_sppll0);
202 else
203 ref = clk->read(clk, nv_clk_src_sppll1);
204 div = max(min((ref * 2) / freq, (u32)65), (u32)2) - 2;
205 out = (ref * 2) / (div + 2);
206 mode = freq != out;
207
208 ram_mask(fuc, 0x137360, 0x00000002, 0x00000000);
209
210 if ((ram_rd32(fuc, 0x132000) & 0x00000002) || 0 /*XXX*/) {
211 ram_nuke(fuc, 0x132000);
212 ram_mask(fuc, 0x132000, 0x00000002, 0x00000002);
213 ram_mask(fuc, 0x132000, 0x00000002, 0x00000000);
214 }
215
216 if (mode == 1) {
217 ram_nuke(fuc, 0x10fe20);
218 ram_mask(fuc, 0x10fe20, 0x00000002, 0x00000002);
219 ram_mask(fuc, 0x10fe20, 0x00000002, 0x00000000);
220 }
221
222// 0x00020034 // 0x0000000a
223 ram_wr32(fuc, 0x132100, 0x00000001);
224
225 if (mode == 1 && from == 0) {
226 /* calculate refpll */
227 ret = nva3_pll_calc(nv_subdev(pfb), &ram->refpll,
228 ram->mempll.refclk, &N1, NULL, &M1, &P);
229 if (ret <= 0) {
230 nv_error(pfb, "unable to calc refpll\n");
231 return ret ? ret : -ERANGE;
232 }
233
234 ram_wr32(fuc, 0x10fe20, 0x20010000);
235 ram_wr32(fuc, 0x137320, 0x00000003);
236 ram_wr32(fuc, 0x137330, 0x81200006);
237 ram_wr32(fuc, 0x10fe24, (P << 16) | (N1 << 8) | M1);
238 ram_wr32(fuc, 0x10fe20, 0x20010001);
239 ram_wait(fuc, 0x137390, 0x00020000, 0x00020000, 64000);
240
241 /* calculate mempll */
242 ret = nva3_pll_calc(nv_subdev(pfb), &ram->mempll, freq,
243 &N1, NULL, &M1, &P);
244 if (ret <= 0) {
245 nv_error(pfb, "unable to calc refpll\n");
246 return ret ? ret : -ERANGE;
247 }
248
249 ram_wr32(fuc, 0x10fe20, 0x20010005);
250 ram_wr32(fuc, 0x132004, (P << 16) | (N1 << 8) | M1);
251 ram_wr32(fuc, 0x132000, 0x18010101);
252 ram_wait(fuc, 0x137390, 0x00000002, 0x00000002, 64000);
253 } else
254 if (mode == 0) {
255 ram_wr32(fuc, 0x137300, 0x00000003);
256 }
257
258 if (from == 0) {
259 ram_nuke(fuc, 0x10fb04);
260 ram_mask(fuc, 0x10fb04, 0x0000ffff, 0x00000000);
261 ram_nuke(fuc, 0x10fb08);
262 ram_mask(fuc, 0x10fb08, 0x0000ffff, 0x00000000);
263 ram_wr32(fuc, 0x10f988, 0x2004ff00);
264 ram_wr32(fuc, 0x10f98c, 0x003fc040);
265 ram_wr32(fuc, 0x10f990, 0x20012001);
266 ram_wr32(fuc, 0x10f998, 0x00011a00);
267 ram_wr32(fuc, 0x13d8f4, 0x00000000);
268 } else {
269 ram_wr32(fuc, 0x10f988, 0x20010000);
270 ram_wr32(fuc, 0x10f98c, 0x00000000);
271 ram_wr32(fuc, 0x10f990, 0x20012001);
272 ram_wr32(fuc, 0x10f998, 0x00010a00);
273 }
274
275 if (from == 0) {
276// 0x00020039 // 0x000000ba
277 }
278
279// 0x0002003a // 0x00000002
280 ram_wr32(fuc, 0x100b0c, 0x00080012);
281// 0x00030014 // 0x00000000 // 0x02b5f070
282// 0x00030014 // 0x00010000 // 0x02b5f070
283 ram_wr32(fuc, 0x611200, 0x00003300);
284// 0x00020034 // 0x0000000a
285// 0x00030020 // 0x00000001 // 0x00000000
286
287 ram_mask(fuc, 0x10f200, 0x00000800, 0x00000000);
288 ram_wr32(fuc, 0x10f210, 0x00000000);
289 ram_nsec(fuc, 1000);
290 if (mode == 0)
291 nvc0_ram_train(fuc, 0x000c1001);
292 ram_wr32(fuc, 0x10f310, 0x00000001);
293 ram_nsec(fuc, 1000);
294 ram_wr32(fuc, 0x10f090, 0x00000061);
295 ram_wr32(fuc, 0x10f090, 0xc000007f);
296 ram_nsec(fuc, 1000);
297
298 if (from == 0) {
299 ram_wr32(fuc, 0x10f824, 0x00007fd4);
300 } else {
301 ram_wr32(fuc, 0x1373ec, 0x00020404);
302 }
303
304 if (mode == 0) {
305 ram_mask(fuc, 0x10f808, 0x00080000, 0x00000000);
306 ram_mask(fuc, 0x10f200, 0x00008000, 0x00008000);
307 ram_wr32(fuc, 0x10f830, 0x41500010);
308 ram_mask(fuc, 0x10f830, 0x01000000, 0x00000000);
309 ram_mask(fuc, 0x132100, 0x00000100, 0x00000100);
310 ram_wr32(fuc, 0x10f050, 0xff000090);
311 ram_wr32(fuc, 0x1373ec, 0x00020f0f);
312 ram_wr32(fuc, 0x1373f0, 0x00000003);
313 ram_wr32(fuc, 0x137310, 0x81201616);
314 ram_wr32(fuc, 0x132100, 0x00000001);
315// 0x00020039 // 0x000000ba
316 ram_wr32(fuc, 0x10f830, 0x00300017);
317 ram_wr32(fuc, 0x1373f0, 0x00000001);
318 ram_wr32(fuc, 0x10f824, 0x00007e77);
319 ram_wr32(fuc, 0x132000, 0x18030001);
320 ram_wr32(fuc, 0x10f090, 0x4000007e);
321 ram_nsec(fuc, 2000);
322 ram_wr32(fuc, 0x10f314, 0x00000001);
323 ram_wr32(fuc, 0x10f210, 0x80000000);
324 ram_wr32(fuc, 0x10f338, 0x00300220);
325 ram_wr32(fuc, 0x10f300, 0x0000011d);
326 ram_nsec(fuc, 1000);
327 ram_wr32(fuc, 0x10f290, 0x02060505);
328 ram_wr32(fuc, 0x10f294, 0x34208288);
329 ram_wr32(fuc, 0x10f298, 0x44050411);
330 ram_wr32(fuc, 0x10f29c, 0x0000114c);
331 ram_wr32(fuc, 0x10f2a0, 0x42e10069);
332 ram_wr32(fuc, 0x10f614, 0x40044f77);
333 ram_wr32(fuc, 0x10f610, 0x40044f77);
334 ram_wr32(fuc, 0x10f344, 0x00600009);
335 ram_nsec(fuc, 1000);
336 ram_wr32(fuc, 0x10f348, 0x00700008);
337 ram_wr32(fuc, 0x61c140, 0x19240000);
338 ram_wr32(fuc, 0x10f830, 0x00300017);
339 nvc0_ram_train(fuc, 0x80021001);
340 nvc0_ram_train(fuc, 0x80081001);
341 ram_wr32(fuc, 0x10f340, 0x00500004);
342 ram_nsec(fuc, 1000);
343 ram_wr32(fuc, 0x10f830, 0x01300017);
344 ram_wr32(fuc, 0x10f830, 0x00300017);
345// 0x00030020 // 0x00000000 // 0x00000000
346// 0x00020034 // 0x0000000b
347 ram_wr32(fuc, 0x100b0c, 0x00080028);
348 ram_wr32(fuc, 0x611200, 0x00003330);
349 } else {
350 ram_wr32(fuc, 0x10f800, 0x00001800);
351 ram_wr32(fuc, 0x13d8f4, 0x00000000);
352 ram_wr32(fuc, 0x1373ec, 0x00020404);
353 ram_wr32(fuc, 0x1373f0, 0x00000003);
354 ram_wr32(fuc, 0x10f830, 0x40700010);
355 ram_wr32(fuc, 0x10f830, 0x40500010);
356 ram_wr32(fuc, 0x13d8f4, 0x00000000);
357 ram_wr32(fuc, 0x1373f8, 0x00000000);
358 ram_wr32(fuc, 0x132100, 0x00000101);
359 ram_wr32(fuc, 0x137310, 0x89201616);
360 ram_wr32(fuc, 0x10f050, 0xff000090);
361 ram_wr32(fuc, 0x1373ec, 0x00030404);
362 ram_wr32(fuc, 0x1373f0, 0x00000002);
363 // 0x00020039 // 0x00000011
364 ram_wr32(fuc, 0x132100, 0x00000001);
365 ram_wr32(fuc, 0x1373f8, 0x00002000);
366 ram_nsec(fuc, 2000);
367 ram_wr32(fuc, 0x10f808, 0x7aaa0050);
368 ram_wr32(fuc, 0x10f830, 0x00500010);
369 ram_wr32(fuc, 0x10f200, 0x00ce1000);
370 ram_wr32(fuc, 0x10f090, 0x4000007e);
371 ram_nsec(fuc, 2000);
372 ram_wr32(fuc, 0x10f314, 0x00000001);
373 ram_wr32(fuc, 0x10f210, 0x80000000);
374 ram_wr32(fuc, 0x10f338, 0x00300200);
375 ram_wr32(fuc, 0x10f300, 0x0000084d);
376 ram_nsec(fuc, 1000);
377 ram_wr32(fuc, 0x10f290, 0x0b343825);
378 ram_wr32(fuc, 0x10f294, 0x3483028e);
379 ram_wr32(fuc, 0x10f298, 0x440c0600);
380 ram_wr32(fuc, 0x10f29c, 0x0000214c);
381 ram_wr32(fuc, 0x10f2a0, 0x42e20069);
382 ram_wr32(fuc, 0x10f200, 0x00ce0000);
383 ram_wr32(fuc, 0x10f614, 0x60044e77);
384 ram_wr32(fuc, 0x10f610, 0x60044e77);
385 ram_wr32(fuc, 0x10f340, 0x00500000);
386 ram_nsec(fuc, 1000);
387 ram_wr32(fuc, 0x10f344, 0x00600228);
388 ram_nsec(fuc, 1000);
389 ram_wr32(fuc, 0x10f348, 0x00700000);
390 ram_wr32(fuc, 0x13d8f4, 0x00000000);
391 ram_wr32(fuc, 0x61c140, 0x09a40000);
392
393 nvc0_ram_train(fuc, 0x800e1008);
394
395 ram_nsec(fuc, 1000);
396 ram_wr32(fuc, 0x10f800, 0x00001804);
397 // 0x00030020 // 0x00000000 // 0x00000000
398 // 0x00020034 // 0x0000000b
399 ram_wr32(fuc, 0x13d8f4, 0x00000000);
400 ram_wr32(fuc, 0x100b0c, 0x00080028);
401 ram_wr32(fuc, 0x611200, 0x00003330);
402 ram_nsec(fuc, 100000);
403 ram_wr32(fuc, 0x10f9b0, 0x05313f41);
404 ram_wr32(fuc, 0x10f9b4, 0x00002f50);
405
406 nvc0_ram_train(fuc, 0x010c1001);
407 }
408
409 ram_mask(fuc, 0x10f200, 0x00000800, 0x00000800);
410// 0x00020016 // 0x00000000
411
412 if (mode == 0)
413 ram_mask(fuc, 0x132000, 0x00000001, 0x00000000);
414 return 0;
415}
416
417static int
418nvc0_ram_prog(struct nouveau_fb *pfb)
419{
420 struct nouveau_device *device = nv_device(pfb);
421 struct nvc0_ram *ram = (void *)pfb->ram;
422 struct nvc0_ramfuc *fuc = &ram->fuc;
423 ram_exec(fuc, nouveau_boolopt(device->cfgopt, "NvMemExec", false));
424 return 0;
425}
426
427static void
428nvc0_ram_tidy(struct nouveau_fb *pfb)
429{
430 struct nvc0_ram *ram = (void *)pfb->ram;
431 struct nvc0_ramfuc *fuc = &ram->fuc;
432 ram_exec(fuc, false);
433}
29 434
30extern const u8 nvc0_pte_storage_type_map[256]; 435extern const u8 nvc0_pte_storage_type_map[256];
31 436
@@ -110,10 +515,9 @@ nvc0_ram_get(struct nouveau_fb *pfb, u64 size, u32 align, u32 ncmin,
110 return 0; 515 return 0;
111} 516}
112 517
113static int 518int
114nvc0_ram_create(struct nouveau_object *parent, struct nouveau_object *engine, 519nvc0_ram_create_(struct nouveau_object *parent, struct nouveau_object *engine,
115 struct nouveau_oclass *oclass, void *data, u32 size, 520 struct nouveau_oclass *oclass, int size, void **pobject)
116 struct nouveau_object **pobject)
117{ 521{
118 struct nouveau_fb *pfb = nouveau_fb(parent); 522 struct nouveau_fb *pfb = nouveau_fb(parent);
119 struct nouveau_bios *bios = nouveau_bios(pfb); 523 struct nouveau_bios *bios = nouveau_bios(pfb);
@@ -127,8 +531,8 @@ nvc0_ram_create(struct nouveau_object *parent, struct nouveau_object *engine,
127 bool uniform = true; 531 bool uniform = true;
128 int ret, part; 532 int ret, part;
129 533
130 ret = nouveau_ram_create(parent, engine, oclass, &ram); 534 ret = nouveau_ram_create_(parent, engine, oclass, size, pobject);
131 *pobject = nv_object(ram); 535 ram = *pobject;
132 if (ret) 536 if (ret)
133 return ret; 537 return ret;
134 538
@@ -182,13 +586,158 @@ nvc0_ram_create(struct nouveau_object *parent, struct nouveau_object *engine,
182 return 0; 586 return 0;
183} 587}
184 588
589static int
590nvc0_ram_init(struct nouveau_object *object)
591{
592 struct nouveau_fb *pfb = (void *)object->parent;
593 struct nvc0_ram *ram = (void *)object;
594 int ret, i;
595
596 ret = nouveau_ram_init(&ram->base);
597 if (ret)
598 return ret;
599
600 /* prepare for ddr link training, and load training patterns */
601 switch (ram->base.type) {
602 case NV_MEM_TYPE_GDDR5: {
603 static const u8 train0[] = {
604 0x00, 0xff, 0x55, 0xaa, 0x33, 0xcc,
605 0x00, 0xff, 0xff, 0x00, 0xff, 0x00,
606 };
607 static const u32 train1[] = {
608 0x00000000, 0xffffffff,
609 0x55555555, 0xaaaaaaaa,
610 0x33333333, 0xcccccccc,
611 0xf0f0f0f0, 0x0f0f0f0f,
612 0x00ff00ff, 0xff00ff00,
613 0x0000ffff, 0xffff0000,
614 };
615
616 for (i = 0; i < 0x30; i++) {
617 nv_wr32(pfb, 0x10f968, 0x00000000 | (i << 8));
618 nv_wr32(pfb, 0x10f96c, 0x00000000 | (i << 8));
619 nv_wr32(pfb, 0x10f920, 0x00000100 | train0[i % 12]);
620 nv_wr32(pfb, 0x10f924, 0x00000100 | train0[i % 12]);
621 nv_wr32(pfb, 0x10f918, train1[i % 12]);
622 nv_wr32(pfb, 0x10f91c, train1[i % 12]);
623 nv_wr32(pfb, 0x10f920, 0x00000000 | train0[i % 12]);
624 nv_wr32(pfb, 0x10f924, 0x00000000 | train0[i % 12]);
625 nv_wr32(pfb, 0x10f918, train1[i % 12]);
626 nv_wr32(pfb, 0x10f91c, train1[i % 12]);
627 }
628 } break;
629 default:
630 break;
631 }
632
633 return 0;
634}
635
636static int
637nvc0_ram_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
638 struct nouveau_oclass *oclass, void *data, u32 size,
639 struct nouveau_object **pobject)
640{
641 struct nouveau_bios *bios = nouveau_bios(parent);
642 struct nvc0_ram *ram;
643 int ret;
644
645 ret = nvc0_ram_create(parent, engine, oclass, &ram);
646 *pobject = nv_object(ram);
647 if (ret)
648 return ret;
649
650 ret = nvbios_pll_parse(bios, 0x0c, &ram->refpll);
651 if (ret) {
652 nv_error(ram, "mclk refpll data not found\n");
653 return ret;
654 }
655
656 ret = nvbios_pll_parse(bios, 0x04, &ram->mempll);
657 if (ret) {
658 nv_error(ram, "mclk pll data not found\n");
659 return ret;
660 }
661
662 switch (ram->base.type) {
663 case NV_MEM_TYPE_GDDR5:
664 ram->base.calc = nvc0_ram_calc;
665 ram->base.prog = nvc0_ram_prog;
666 ram->base.tidy = nvc0_ram_tidy;
667 break;
668 default:
669 nv_warn(ram, "reclocking of this ram type unsupported\n");
670 return 0;
671 }
672
673 ram->fuc.r_0x10fe20 = ramfuc_reg(0x10fe20);
674 ram->fuc.r_0x10fe24 = ramfuc_reg(0x10fe24);
675 ram->fuc.r_0x137320 = ramfuc_reg(0x137320);
676 ram->fuc.r_0x137330 = ramfuc_reg(0x137330);
677
678 ram->fuc.r_0x132000 = ramfuc_reg(0x132000);
679 ram->fuc.r_0x132004 = ramfuc_reg(0x132004);
680 ram->fuc.r_0x132100 = ramfuc_reg(0x132100);
681
682 ram->fuc.r_0x137390 = ramfuc_reg(0x137390);
683
684 ram->fuc.r_0x10f290 = ramfuc_reg(0x10f290);
685 ram->fuc.r_0x10f294 = ramfuc_reg(0x10f294);
686 ram->fuc.r_0x10f298 = ramfuc_reg(0x10f298);
687 ram->fuc.r_0x10f29c = ramfuc_reg(0x10f29c);
688 ram->fuc.r_0x10f2a0 = ramfuc_reg(0x10f2a0);
689
690 ram->fuc.r_0x10f300 = ramfuc_reg(0x10f300);
691 ram->fuc.r_0x10f338 = ramfuc_reg(0x10f338);
692 ram->fuc.r_0x10f340 = ramfuc_reg(0x10f340);
693 ram->fuc.r_0x10f344 = ramfuc_reg(0x10f344);
694 ram->fuc.r_0x10f348 = ramfuc_reg(0x10f348);
695
696 ram->fuc.r_0x10f910 = ramfuc_reg(0x10f910);
697 ram->fuc.r_0x10f914 = ramfuc_reg(0x10f914);
698
699 ram->fuc.r_0x100b0c = ramfuc_reg(0x100b0c);
700 ram->fuc.r_0x10f050 = ramfuc_reg(0x10f050);
701 ram->fuc.r_0x10f090 = ramfuc_reg(0x10f090);
702 ram->fuc.r_0x10f200 = ramfuc_reg(0x10f200);
703 ram->fuc.r_0x10f210 = ramfuc_reg(0x10f210);
704 ram->fuc.r_0x10f310 = ramfuc_reg(0x10f310);
705 ram->fuc.r_0x10f314 = ramfuc_reg(0x10f314);
706 ram->fuc.r_0x10f610 = ramfuc_reg(0x10f610);
707 ram->fuc.r_0x10f614 = ramfuc_reg(0x10f614);
708 ram->fuc.r_0x10f800 = ramfuc_reg(0x10f800);
709 ram->fuc.r_0x10f808 = ramfuc_reg(0x10f808);
710 ram->fuc.r_0x10f824 = ramfuc_reg(0x10f824);
711 ram->fuc.r_0x10f830 = ramfuc_reg(0x10f830);
712 ram->fuc.r_0x10f988 = ramfuc_reg(0x10f988);
713 ram->fuc.r_0x10f98c = ramfuc_reg(0x10f98c);
714 ram->fuc.r_0x10f990 = ramfuc_reg(0x10f990);
715 ram->fuc.r_0x10f998 = ramfuc_reg(0x10f998);
716 ram->fuc.r_0x10f9b0 = ramfuc_reg(0x10f9b0);
717 ram->fuc.r_0x10f9b4 = ramfuc_reg(0x10f9b4);
718 ram->fuc.r_0x10fb04 = ramfuc_reg(0x10fb04);
719 ram->fuc.r_0x10fb08 = ramfuc_reg(0x10fb08);
720 ram->fuc.r_0x137310 = ramfuc_reg(0x137300);
721 ram->fuc.r_0x137310 = ramfuc_reg(0x137310);
722 ram->fuc.r_0x137360 = ramfuc_reg(0x137360);
723 ram->fuc.r_0x1373ec = ramfuc_reg(0x1373ec);
724 ram->fuc.r_0x1373f0 = ramfuc_reg(0x1373f0);
725 ram->fuc.r_0x1373f8 = ramfuc_reg(0x1373f8);
726
727 ram->fuc.r_0x61c140 = ramfuc_reg(0x61c140);
728 ram->fuc.r_0x611200 = ramfuc_reg(0x611200);
729
730 ram->fuc.r_0x13d8f4 = ramfuc_reg(0x13d8f4);
731 return 0;
732}
733
185struct nouveau_oclass 734struct nouveau_oclass
186nvc0_ram_oclass = { 735nvc0_ram_oclass = {
187 .handle = 0, 736 .handle = 0,
188 .ofuncs = &(struct nouveau_ofuncs) { 737 .ofuncs = &(struct nouveau_ofuncs) {
189 .ctor = nvc0_ram_create, 738 .ctor = nvc0_ram_ctor,
190 .dtor = _nouveau_ram_dtor, 739 .dtor = _nouveau_ram_dtor,
191 .init = _nouveau_ram_init, 740 .init = nvc0_ram_init,
192 .fini = _nouveau_ram_fini, 741 .fini = _nouveau_ram_fini,
193 } 742 }
194}; 743};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnve0.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnve0.c
new file mode 100644
index 000000000000..bc86cfd084f6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnve0.c
@@ -0,0 +1,1264 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/gpio.h>
26
27#include <subdev/bios.h>
28#include <subdev/bios/bit.h>
29#include <subdev/bios/pll.h>
30#include <subdev/bios/init.h>
31#include <subdev/bios/rammap.h>
32#include <subdev/bios/timing.h>
33
34#include <subdev/clock.h>
35#include <subdev/clock/pll.h>
36
37#include <subdev/timer.h>
38
39#include <core/option.h>
40
41#include "nvc0.h"
42
43#include "ramfuc.h"
44
45struct nve0_ramfuc {
46 struct ramfuc base;
47
48 struct nvbios_pll refpll;
49 struct nvbios_pll mempll;
50
51 struct ramfuc_reg r_gpioMV;
52 u32 r_funcMV[2];
53 struct ramfuc_reg r_gpio2E;
54 u32 r_func2E[2];
55 struct ramfuc_reg r_gpiotrig;
56
57 struct ramfuc_reg r_0x132020;
58 struct ramfuc_reg r_0x132028;
59 struct ramfuc_reg r_0x132024;
60 struct ramfuc_reg r_0x132030;
61 struct ramfuc_reg r_0x132034;
62 struct ramfuc_reg r_0x132000;
63 struct ramfuc_reg r_0x132004;
64 struct ramfuc_reg r_0x132040;
65
66 struct ramfuc_reg r_0x10f248;
67 struct ramfuc_reg r_0x10f290;
68 struct ramfuc_reg r_0x10f294;
69 struct ramfuc_reg r_0x10f298;
70 struct ramfuc_reg r_0x10f29c;
71 struct ramfuc_reg r_0x10f2a0;
72 struct ramfuc_reg r_0x10f2a4;
73 struct ramfuc_reg r_0x10f2a8;
74 struct ramfuc_reg r_0x10f2ac;
75 struct ramfuc_reg r_0x10f2cc;
76 struct ramfuc_reg r_0x10f2e8;
77 struct ramfuc_reg r_0x10f250;
78 struct ramfuc_reg r_0x10f24c;
79 struct ramfuc_reg r_0x10fec4;
80 struct ramfuc_reg r_0x10fec8;
81 struct ramfuc_reg r_0x10f604;
82 struct ramfuc_reg r_0x10f614;
83 struct ramfuc_reg r_0x10f610;
84 struct ramfuc_reg r_0x100770;
85 struct ramfuc_reg r_0x100778;
86 struct ramfuc_reg r_0x10f224;
87
88 struct ramfuc_reg r_0x10f870;
89 struct ramfuc_reg r_0x10f698;
90 struct ramfuc_reg r_0x10f694;
91 struct ramfuc_reg r_0x10f6b8;
92 struct ramfuc_reg r_0x10f808;
93 struct ramfuc_reg r_0x10f670;
94 struct ramfuc_reg r_0x10f60c;
95 struct ramfuc_reg r_0x10f830;
96 struct ramfuc_reg r_0x1373ec;
97 struct ramfuc_reg r_0x10f800;
98 struct ramfuc_reg r_0x10f82c;
99
100 struct ramfuc_reg r_0x10f978;
101 struct ramfuc_reg r_0x10f910;
102 struct ramfuc_reg r_0x10f914;
103
104 struct ramfuc_reg r_mr[16]; /* MR0 - MR8, MR15 */
105
106 struct ramfuc_reg r_0x62c000;
107 struct ramfuc_reg r_0x10f200;
108 struct ramfuc_reg r_0x10f210;
109 struct ramfuc_reg r_0x10f310;
110 struct ramfuc_reg r_0x10f314;
111 struct ramfuc_reg r_0x10f318;
112 struct ramfuc_reg r_0x10f090;
113 struct ramfuc_reg r_0x10f69c;
114 struct ramfuc_reg r_0x10f824;
115 struct ramfuc_reg r_0x1373f0;
116 struct ramfuc_reg r_0x1373f4;
117 struct ramfuc_reg r_0x137320;
118 struct ramfuc_reg r_0x10f65c;
119 struct ramfuc_reg r_0x10f6bc;
120 struct ramfuc_reg r_0x100710;
121 struct ramfuc_reg r_0x10f750;
122};
123
124struct nve0_ram {
125 struct nouveau_ram base;
126 struct nve0_ramfuc fuc;
127 int from;
128 int mode;
129 int N1, fN1, M1, P1;
130 int N2, M2, P2;
131};
132
133/*******************************************************************************
134 * GDDR5
135 ******************************************************************************/
136static void
137train(struct nve0_ramfuc *fuc, u32 magic)
138{
139 struct nve0_ram *ram = container_of(fuc, typeof(*ram), fuc);
140 struct nouveau_fb *pfb = nouveau_fb(ram);
141 const int mc = nv_rd32(pfb, 0x02243c);
142 int i;
143
144 ram_mask(fuc, 0x10f910, 0xbc0e0000, magic);
145 ram_mask(fuc, 0x10f914, 0xbc0e0000, magic);
146 for (i = 0; i < mc; i++) {
147 const u32 addr = 0x110974 + (i * 0x1000);
148 ram_wait(fuc, addr, 0x0000000f, 0x00000000, 500000);
149 }
150}
151
152static void
153r1373f4_init(struct nve0_ramfuc *fuc)
154{
155 struct nve0_ram *ram = container_of(fuc, typeof(*ram), fuc);
156 const u32 mcoef = ((--ram->P2 << 28) | (ram->N2 << 8) | ram->M2);
157 const u32 rcoef = (( ram->P1 << 16) | (ram->N1 << 8) | ram->M1);
158 const u32 runk0 = ram->fN1 << 16;
159 const u32 runk1 = ram->fN1;
160
161 if (ram->from == 2) {
162 ram_mask(fuc, 0x1373f4, 0x00000000, 0x00001100);
163 ram_mask(fuc, 0x1373f4, 0x00000000, 0x00000010);
164 } else {
165 ram_mask(fuc, 0x1373f4, 0x00000000, 0x00010010);
166 }
167
168 ram_mask(fuc, 0x1373f4, 0x00000003, 0x00000000);
169 ram_mask(fuc, 0x1373f4, 0x00000010, 0x00000000);
170
171 /* (re)program refpll, if required */
172 if ((ram_rd32(fuc, 0x132024) & 0xffffffff) != rcoef ||
173 (ram_rd32(fuc, 0x132034) & 0x0000ffff) != runk1) {
174 ram_mask(fuc, 0x132000, 0x00000001, 0x00000000);
175 ram_mask(fuc, 0x132020, 0x00000001, 0x00000000);
176 ram_wr32(fuc, 0x137320, 0x00000000);
177 ram_mask(fuc, 0x132030, 0xffff0000, runk0);
178 ram_mask(fuc, 0x132034, 0x0000ffff, runk1);
179 ram_wr32(fuc, 0x132024, rcoef);
180 ram_mask(fuc, 0x132028, 0x00080000, 0x00080000);
181 ram_mask(fuc, 0x132020, 0x00000001, 0x00000001);
182 ram_wait(fuc, 0x137390, 0x00020000, 0x00020000, 64000);
183 ram_mask(fuc, 0x132028, 0x00080000, 0x00000000);
184 }
185
186 /* (re)program mempll, if required */
187 if (ram->mode == 2) {
188 ram_mask(fuc, 0x1373f4, 0x00010000, 0x00000000);
189 ram_mask(fuc, 0x132000, 0x00000001, 0x00000000);
190 ram_mask(fuc, 0x132004, 0x103fffff, mcoef);
191 ram_mask(fuc, 0x132000, 0x00000001, 0x00000001);
192 ram_wait(fuc, 0x137390, 0x00000002, 0x00000002, 64000);
193 ram_mask(fuc, 0x1373f4, 0x00000000, 0x00001100);
194 } else {
195 ram_mask(fuc, 0x1373f4, 0x00000000, 0x00010100);
196 }
197
198 ram_mask(fuc, 0x1373f4, 0x00000000, 0x00000010);
199}
200
201static void
202r1373f4_fini(struct nve0_ramfuc *fuc, u32 ramcfg)
203{
204 struct nve0_ram *ram = container_of(fuc, typeof(*ram), fuc);
205 struct nouveau_bios *bios = nouveau_bios(ram);
206 u8 v0 = (nv_ro08(bios, ramcfg + 0x03) & 0xc0) >> 6;
207 u8 v1 = (nv_ro08(bios, ramcfg + 0x03) & 0x30) >> 4;
208 u32 tmp;
209
210 tmp = ram_rd32(fuc, 0x1373ec) & ~0x00030000;
211 ram_wr32(fuc, 0x1373ec, tmp | (v1 << 16));
212 ram_mask(fuc, 0x1373f0, (~ram->mode & 3), 0x00000000);
213 if (ram->mode == 2) {
214 ram_mask(fuc, 0x1373f4, 0x00000003, 0x000000002);
215 ram_mask(fuc, 0x1373f4, 0x00001100, 0x000000000);
216 } else {
217 ram_mask(fuc, 0x1373f4, 0x00000003, 0x000000001);
218 ram_mask(fuc, 0x1373f4, 0x00010000, 0x000000000);
219 }
220 ram_mask(fuc, 0x10f800, 0x00000030, (v0 ^ v1) << 4);
221}
222
223static int
224nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq)
225{
226 struct nouveau_bios *bios = nouveau_bios(pfb);
227 struct nve0_ram *ram = (void *)pfb->ram;
228 struct nve0_ramfuc *fuc = &ram->fuc;
229 const u32 rammap = ram->base.rammap.data;
230 const u32 ramcfg = ram->base.ramcfg.data;
231 const u32 timing = ram->base.timing.data;
232 int vc = !(nv_ro08(bios, ramcfg + 0x02) & 0x08);
233 int mv = 1; /*XXX*/
234 u32 mask, data;
235
236 ram_mask(fuc, 0x10f808, 0x40000000, 0x40000000);
237 ram_wr32(fuc, 0x62c000, 0x0f0f0000);
238
239 /* MR1: turn termination on early, for some reason.. */
240 if ((ram->base.mr[1] & 0x03c) != 0x030)
241 ram_mask(fuc, mr[1], 0x03c, ram->base.mr[1] & 0x03c);
242
243 if (vc == 1 && ram_have(fuc, gpio2E)) {
244 u32 temp = ram_mask(fuc, gpio2E, 0x3000, fuc->r_func2E[1]);
245 if (temp != ram_rd32(fuc, gpio2E)) {
246 ram_wr32(fuc, gpiotrig, 1);
247 ram_nsec(fuc, 20000);
248 }
249 }
250
251 ram_mask(fuc, 0x10f200, 0x00000800, 0x00000000);
252
253 ram_mask(fuc, 0x10f914, 0x01020000, 0x000c0000);
254 ram_mask(fuc, 0x10f910, 0x01020000, 0x000c0000);
255
256 ram_wr32(fuc, 0x10f210, 0x00000000); /* REFRESH_AUTO = 0 */
257 ram_nsec(fuc, 1000);
258 ram_wr32(fuc, 0x10f310, 0x00000001); /* REFRESH */
259 ram_nsec(fuc, 1000);
260
261 ram_mask(fuc, 0x10f200, 0x80000000, 0x80000000);
262 ram_wr32(fuc, 0x10f314, 0x00000001); /* PRECHARGE */
263 ram_mask(fuc, 0x10f200, 0x80000000, 0x00000000);
264 ram_wr32(fuc, 0x10f090, 0x00000061);
265 ram_wr32(fuc, 0x10f090, 0xc000007f);
266 ram_nsec(fuc, 1000);
267
268 ram_wr32(fuc, 0x10f698, 0x00000000);
269 ram_wr32(fuc, 0x10f69c, 0x00000000);
270
271 /*XXX: there does appear to be some kind of condition here, simply
272 * modifying these bits in the vbios from the default pl0
273 * entries shows no change. however, the data does appear to
274 * be correct and may be required for the transition back
275 */
276 mask = 0x800f07e0;
277 data = 0x00030000;
278 if (ram_rd32(fuc, 0x10f978) & 0x00800000)
279 data |= 0x00040000;
280
281 if (1) {
282 data |= 0x800807e0;
283 switch (nv_ro08(bios, ramcfg + 0x03) & 0xc0) {
284 case 0xc0: data &= ~0x00000040; break;
285 case 0x80: data &= ~0x00000100; break;
286 case 0x40: data &= ~0x80000000; break;
287 case 0x00: data &= ~0x00000400; break;
288 }
289
290 switch (nv_ro08(bios, ramcfg + 0x03) & 0x30) {
291 case 0x30: data &= ~0x00000020; break;
292 case 0x20: data &= ~0x00000080; break;
293 case 0x10: data &= ~0x00080000; break;
294 case 0x00: data &= ~0x00000200; break;
295 }
296 }
297
298 if (nv_ro08(bios, ramcfg + 0x02) & 0x80)
299 mask |= 0x03000000;
300 if (nv_ro08(bios, ramcfg + 0x02) & 0x40)
301 mask |= 0x00002000;
302 if (nv_ro08(bios, ramcfg + 0x07) & 0x10)
303 mask |= 0x00004000;
304 if (nv_ro08(bios, ramcfg + 0x07) & 0x08)
305 mask |= 0x00000003;
306 else {
307 mask |= 0x34000000;
308 if (ram_rd32(fuc, 0x10f978) & 0x00800000)
309 mask |= 0x40000000;
310 }
311 ram_mask(fuc, 0x10f824, mask, data);
312
313 ram_mask(fuc, 0x132040, 0x00010000, 0x00000000);
314
315 if (ram->from == 2 && ram->mode != 2) {
316 ram_mask(fuc, 0x10f808, 0x00080000, 0x00000000);
317 ram_mask(fuc, 0x10f200, 0x00008000, 0x00008000);
318 ram_mask(fuc, 0x10f800, 0x00000000, 0x00000004);
319 ram_mask(fuc, 0x10f830, 0x00008000, 0x01040010);
320 ram_mask(fuc, 0x10f830, 0x01000000, 0x00000000);
321 r1373f4_init(fuc);
322 ram_mask(fuc, 0x1373f0, 0x00000002, 0x00000001);
323 r1373f4_fini(fuc, ramcfg);
324 ram_mask(fuc, 0x10f830, 0x00c00000, 0x00240001);
325 } else
326 if (ram->from != 2 && ram->mode != 2) {
327 r1373f4_init(fuc);
328 r1373f4_fini(fuc, ramcfg);
329 }
330
331 if (ram_have(fuc, gpioMV)) {
332 u32 temp = ram_mask(fuc, gpioMV, 0x3000, fuc->r_funcMV[mv]);
333 if (temp != ram_rd32(fuc, gpioMV)) {
334 ram_wr32(fuc, gpiotrig, 1);
335 ram_nsec(fuc, 64000);
336 }
337 }
338
339 if ( (nv_ro08(bios, ramcfg + 0x02) & 0x40) ||
340 (nv_ro08(bios, ramcfg + 0x07) & 0x10)) {
341 ram_mask(fuc, 0x132040, 0x00010000, 0x00010000);
342 ram_nsec(fuc, 20000);
343 }
344
345 if (ram->from != 2 && ram->mode == 2) {
346 ram_mask(fuc, 0x10f800, 0x00000004, 0x00000000);
347 ram_mask(fuc, 0x1373f0, 0x00000000, 0x00000002);
348 ram_mask(fuc, 0x10f830, 0x00800001, 0x00408010);
349 r1373f4_init(fuc);
350 r1373f4_fini(fuc, ramcfg);
351 ram_mask(fuc, 0x10f808, 0x00000000, 0x00080000);
352 ram_mask(fuc, 0x10f200, 0x00808000, 0x00800000);
353 } else
354 if (ram->from == 2 && ram->mode == 2) {
355 ram_mask(fuc, 0x10f800, 0x00000004, 0x00000000);
356 r1373f4_init(fuc);
357 r1373f4_fini(fuc, ramcfg);
358 }
359
360 if (ram->mode != 2) /*XXX*/ {
361 if (nv_ro08(bios, ramcfg + 0x07) & 0x40)
362 ram_mask(fuc, 0x10f670, 0x80000000, 0x80000000);
363 }
364
365 data = (nv_ro08(bios, rammap + 0x11) & 0x0c) >> 2;
366 ram_wr32(fuc, 0x10f65c, 0x00000011 * data);
367 ram_wr32(fuc, 0x10f6b8, 0x01010101 * nv_ro08(bios, ramcfg + 0x09));
368 ram_wr32(fuc, 0x10f6bc, 0x01010101 * nv_ro08(bios, ramcfg + 0x09));
369
370 data = nv_ro08(bios, ramcfg + 0x04);
371 if (!(nv_ro08(bios, ramcfg + 0x07) & 0x08)) {
372 ram_wr32(fuc, 0x10f698, 0x01010101 * data);
373 ram_wr32(fuc, 0x10f69c, 0x01010101 * data);
374 }
375
376 if (ram->mode != 2) {
377 u32 temp = ram_rd32(fuc, 0x10f694) & ~0xff00ff00;
378 ram_wr32(fuc, 0x10f694, temp | (0x01000100 * data));
379 }
380
381 if (ram->mode == 2 && (nv_ro08(bios, ramcfg + 0x08) & 0x10))
382 data = 0x00000080;
383 else
384 data = 0x00000000;
385 ram_mask(fuc, 0x10f60c, 0x00000080, data);
386
387 mask = 0x00070000;
388 data = 0x00000000;
389 if (!(nv_ro08(bios, ramcfg + 0x02) & 0x80))
390 data |= 0x03000000;
391 if (!(nv_ro08(bios, ramcfg + 0x02) & 0x40))
392 data |= 0x00002000;
393 if (!(nv_ro08(bios, ramcfg + 0x07) & 0x10))
394 data |= 0x00004000;
395 if (!(nv_ro08(bios, ramcfg + 0x07) & 0x08))
396 data |= 0x00000003;
397 else
398 data |= 0x74000000;
399 ram_mask(fuc, 0x10f824, mask, data);
400
401 if (nv_ro08(bios, ramcfg + 0x01) & 0x08)
402 data = 0x00000000;
403 else
404 data = 0x00001000;
405 ram_mask(fuc, 0x10f200, 0x00001000, data);
406
407 if (ram_rd32(fuc, 0x10f670) & 0x80000000) {
408 ram_nsec(fuc, 10000);
409 ram_mask(fuc, 0x10f670, 0x80000000, 0x00000000);
410 }
411
412 if (nv_ro08(bios, ramcfg + 0x08) & 0x01)
413 data = 0x00100000;
414 else
415 data = 0x00000000;
416 ram_mask(fuc, 0x10f82c, 0x00100000, data);
417
418 data = 0x00000000;
419 if (nv_ro08(bios, ramcfg + 0x08) & 0x08)
420 data |= 0x00002000;
421 if (nv_ro08(bios, ramcfg + 0x08) & 0x04)
422 data |= 0x00001000;
423 if (nv_ro08(bios, ramcfg + 0x08) & 0x02)
424 data |= 0x00004000;
425 ram_mask(fuc, 0x10f830, 0x00007000, data);
426
427 /* PFB timing */
428 ram_mask(fuc, 0x10f248, 0xffffffff, nv_ro32(bios, timing + 0x28));
429 ram_mask(fuc, 0x10f290, 0xffffffff, nv_ro32(bios, timing + 0x00));
430 ram_mask(fuc, 0x10f294, 0xffffffff, nv_ro32(bios, timing + 0x04));
431 ram_mask(fuc, 0x10f298, 0xffffffff, nv_ro32(bios, timing + 0x08));
432 ram_mask(fuc, 0x10f29c, 0xffffffff, nv_ro32(bios, timing + 0x0c));
433 ram_mask(fuc, 0x10f2a0, 0xffffffff, nv_ro32(bios, timing + 0x10));
434 ram_mask(fuc, 0x10f2a4, 0xffffffff, nv_ro32(bios, timing + 0x14));
435 ram_mask(fuc, 0x10f2a8, 0xffffffff, nv_ro32(bios, timing + 0x18));
436 ram_mask(fuc, 0x10f2ac, 0xffffffff, nv_ro32(bios, timing + 0x1c));
437 ram_mask(fuc, 0x10f2cc, 0xffffffff, nv_ro32(bios, timing + 0x20));
438 ram_mask(fuc, 0x10f2e8, 0xffffffff, nv_ro32(bios, timing + 0x24));
439
440 data = (nv_ro08(bios, ramcfg + 0x02) & 0x03) << 8;
441 if (nv_ro08(bios, ramcfg + 0x01) & 0x10)
442 data |= 0x70000000;
443 ram_mask(fuc, 0x10f604, 0x70000300, data);
444
445 data = (nv_ro08(bios, timing + 0x30) & 0x07) << 28;
446 if (nv_ro08(bios, ramcfg + 0x01) & 0x01)
447 data |= 0x00000100;
448 ram_mask(fuc, 0x10f614, 0x70000000, data);
449
450 data = (nv_ro08(bios, timing + 0x30) & 0x07) << 28;
451 if (nv_ro08(bios, ramcfg + 0x01) & 0x02)
452 data |= 0x00000100;
453 ram_mask(fuc, 0x10f610, 0x70000000, data);
454
455 mask = 0x33f00000;
456 data = 0x00000000;
457 if (!(nv_ro08(bios, ramcfg + 0x01) & 0x04))
458 data |= 0x20200000;
459 if (!(nv_ro08(bios, ramcfg + 0x07) & 0x80))
460 data |= 0x12800000;
461 /*XXX: see note above about there probably being some condition
462 * for the 10f824 stuff that uses ramcfg 3...
463 */
464 if ( (nv_ro08(bios, ramcfg + 0x03) & 0xf0)) {
465 if (nv_ro08(bios, rammap + 0x08) & 0x0c) {
466 if (!(nv_ro08(bios, ramcfg + 0x07) & 0x80))
467 mask |= 0x00000020;
468 else
469 data |= 0x00000020;
470 mask |= 0x00000004;
471 }
472 } else {
473 mask |= 0x40000020;
474 data |= 0x00000004;
475 }
476
477 ram_mask(fuc, 0x10f808, mask, data);
478
479 data = nv_ro08(bios, ramcfg + 0x03) & 0x0f;
480 ram_wr32(fuc, 0x10f870, 0x11111111 * data);
481
482 data = nv_ro08(bios, ramcfg + 0x02) & 0x03;
483 if (nv_ro08(bios, ramcfg + 0x01) & 0x10)
484 data |= 0x00000004;
485 if ((nv_rd32(bios, 0x100770) & 0x00000004) != (data & 0x00000004)) {
486 ram_wr32(fuc, 0x10f750, 0x04000009);
487 ram_wr32(fuc, 0x100710, 0x00000000);
488 ram_wait(fuc, 0x100710, 0x80000000, 0x80000000, 200000);
489 }
490 ram_mask(fuc, 0x100770, 0x00000007, data);
491
492 data = (nv_ro08(bios, timing + 0x30) & 0x07) << 8;
493 if (nv_ro08(bios, ramcfg + 0x01) & 0x01)
494 data |= 0x80000000;
495 ram_mask(fuc, 0x100778, 0x00000700, data);
496
497 data = nv_ro16(bios, timing + 0x2c);
498 ram_mask(fuc, 0x10f250, 0x000003f0, (data & 0x003f) << 4);
499 ram_mask(fuc, 0x10f24c, 0x7f000000, (data & 0x1fc0) << 18);
500
501 data = nv_ro08(bios, timing + 0x30);
502 ram_mask(fuc, 0x10f224, 0x001f0000, (data & 0xf8) << 13);
503
504 data = nv_ro16(bios, timing + 0x31);
505 ram_mask(fuc, 0x10fec4, 0x041e0f07, (data & 0x0800) << 15 |
506 (data & 0x0780) << 10 |
507 (data & 0x0078) << 5 |
508 (data & 0x0007));
509 ram_mask(fuc, 0x10fec8, 0x00000027, (data & 0x8000) >> 10 |
510 (data & 0x7000) >> 12);
511
512 ram_wr32(fuc, 0x10f090, 0x4000007e);
513 ram_nsec(fuc, 1000);
514 ram_wr32(fuc, 0x10f314, 0x00000001); /* PRECHARGE */
515 ram_wr32(fuc, 0x10f310, 0x00000001); /* REFRESH */
516 ram_nsec(fuc, 2000);
517 ram_wr32(fuc, 0x10f210, 0x80000000); /* REFRESH_AUTO = 1 */
518
519 if ((nv_ro08(bios, ramcfg + 0x08) & 0x10) && (ram->mode == 2) /*XXX*/) {
520 u32 temp = ram_mask(fuc, 0x10f294, 0xff000000, 0x24000000);
521 train(fuc, 0xa4010000); /*XXX*/
522 ram_nsec(fuc, 1000);
523 ram_wr32(fuc, 0x10f294, temp);
524 }
525
526 ram_mask(fuc, mr[3], 0xfff, ram->base.mr[3]);
527 ram_wr32(fuc, mr[0], ram->base.mr[0]);
528 ram_mask(fuc, mr[8], 0xfff, ram->base.mr[8]);
529 ram_nsec(fuc, 1000);
530 ram_mask(fuc, mr[1], 0xfff, ram->base.mr[1]);
531 ram_mask(fuc, mr[5], 0xfff, ram->base.mr[5]);
532 ram_mask(fuc, mr[6], 0xfff, ram->base.mr[6]);
533 ram_mask(fuc, mr[7], 0xfff, ram->base.mr[7]);
534
535 if (vc == 0 && ram_have(fuc, gpio2E)) {
536 u32 temp = ram_mask(fuc, gpio2E, 0x3000, fuc->r_func2E[0]);
537 if (temp != ram_rd32(fuc, gpio2E)) {
538 ram_wr32(fuc, gpiotrig, 1);
539 ram_nsec(fuc, 20000);
540 }
541 }
542
543 ram_mask(fuc, 0x10f200, 0x80000000, 0x80000000);
544 ram_wr32(fuc, 0x10f318, 0x00000001); /* NOP? */
545 ram_mask(fuc, 0x10f200, 0x80000000, 0x00000000);
546 ram_nsec(fuc, 1000);
547
548 data = ram_rd32(fuc, 0x10f978);
549 data &= ~0x00046144;
550 data |= 0x0000000b;
551 if (!(nv_ro08(bios, ramcfg + 0x07) & 0x08)) {
552 if (!(nv_ro08(bios, ramcfg + 0x07) & 0x04))
553 data |= 0x0000200c;
554 else
555 data |= 0x00000000;
556 } else {
557 data |= 0x00040044;
558 }
559 ram_wr32(fuc, 0x10f978, data);
560
561 if (ram->mode == 1) {
562 data = ram_rd32(fuc, 0x10f830) | 0x00000001;
563 ram_wr32(fuc, 0x10f830, data);
564 }
565
566 if (!(nv_ro08(bios, ramcfg + 0x07) & 0x08)) {
567 data = 0x88020000;
568 if ( (nv_ro08(bios, ramcfg + 0x07) & 0x04))
569 data |= 0x10000000;
570 if (!(nv_ro08(bios, rammap + 0x08) & 0x10))
571 data |= 0x00080000;
572 } else {
573 data = 0xa40e0000;
574 }
575 train(fuc, data);
576 ram_nsec(fuc, 1000);
577
578 if (ram->mode == 2) { /*XXX*/
579 ram_mask(fuc, 0x10f800, 0x00000004, 0x00000004);
580 }
581
582 /* MR5: (re)enable LP3 if necessary
583 * XXX: need to find the switch, keeping off for now
584 */
585 ram_mask(fuc, mr[5], 0x00000004, 0x00000000);
586
587 if (ram->mode != 2) {
588 ram_mask(fuc, 0x10f830, 0x01000000, 0x01000000);
589 ram_mask(fuc, 0x10f830, 0x01000000, 0x00000000);
590 }
591
592 if (nv_ro08(bios, ramcfg + 0x07) & 0x02) {
593 ram_mask(fuc, 0x10f910, 0x80020000, 0x01000000);
594 ram_mask(fuc, 0x10f914, 0x80020000, 0x01000000);
595 }
596
597 ram_wr32(fuc, 0x62c000, 0x0f0f0f00);
598
599 if (nv_ro08(bios, rammap + 0x08) & 0x01)
600 data = 0x00000800;
601 else
602 data = 0x00000000;
603 ram_mask(fuc, 0x10f200, 0x00000800, data);
604 return 0;
605}
606
607/*******************************************************************************
608 * DDR3
609 ******************************************************************************/
610
611static int
612nve0_ram_calc_sddr3(struct nouveau_fb *pfb, u32 freq)
613{
614 struct nouveau_bios *bios = nouveau_bios(pfb);
615 struct nve0_ram *ram = (void *)pfb->ram;
616 struct nve0_ramfuc *fuc = &ram->fuc;
617 const u32 rcoef = (( ram->P1 << 16) | (ram->N1 << 8) | ram->M1);
618 const u32 runk0 = ram->fN1 << 16;
619 const u32 runk1 = ram->fN1;
620 const u32 rammap = ram->base.rammap.data;
621 const u32 ramcfg = ram->base.ramcfg.data;
622 const u32 timing = ram->base.timing.data;
623 int vc = !(nv_ro08(bios, ramcfg + 0x02) & 0x08);
624 int mv = 1; /*XXX*/
625 u32 mask, data;
626
627 ram_mask(fuc, 0x10f808, 0x40000000, 0x40000000);
628 ram_wr32(fuc, 0x62c000, 0x0f0f0000);
629
630 if (vc == 1 && ram_have(fuc, gpio2E)) {
631 u32 temp = ram_mask(fuc, gpio2E, 0x3000, fuc->r_func2E[1]);
632 if (temp != ram_rd32(fuc, gpio2E)) {
633 ram_wr32(fuc, gpiotrig, 1);
634 ram_nsec(fuc, 20000);
635 }
636 }
637
638 ram_mask(fuc, 0x10f200, 0x00000800, 0x00000000);
639 if ((nv_ro08(bios, ramcfg + 0x03) & 0xf0))
640 ram_mask(fuc, 0x10f808, 0x04000000, 0x04000000);
641
642 ram_wr32(fuc, 0x10f314, 0x00000001); /* PRECHARGE */
643 ram_wr32(fuc, 0x10f210, 0x00000000); /* REFRESH_AUTO = 0 */
644 ram_wr32(fuc, 0x10f310, 0x00000001); /* REFRESH */
645 ram_mask(fuc, 0x10f200, 0x80000000, 0x80000000);
646 ram_wr32(fuc, 0x10f310, 0x00000001); /* REFRESH */
647 ram_mask(fuc, 0x10f200, 0x80000000, 0x00000000);
648 ram_nsec(fuc, 1000);
649
650 ram_wr32(fuc, 0x10f090, 0x00000060);
651 ram_wr32(fuc, 0x10f090, 0xc000007e);
652
653 /*XXX: there does appear to be some kind of condition here, simply
654 * modifying these bits in the vbios from the default pl0
655 * entries shows no change. however, the data does appear to
656 * be correct and may be required for the transition back
657 */
658 mask = 0x00010000;
659 data = 0x00010000;
660
661 if (1) {
662 mask |= 0x800807e0;
663 data |= 0x800807e0;
664 switch (nv_ro08(bios, ramcfg + 0x03) & 0xc0) {
665 case 0xc0: data &= ~0x00000040; break;
666 case 0x80: data &= ~0x00000100; break;
667 case 0x40: data &= ~0x80000000; break;
668 case 0x00: data &= ~0x00000400; break;
669 }
670
671 switch (nv_ro08(bios, ramcfg + 0x03) & 0x30) {
672 case 0x30: data &= ~0x00000020; break;
673 case 0x20: data &= ~0x00000080; break;
674 case 0x10: data &= ~0x00080000; break;
675 case 0x00: data &= ~0x00000200; break;
676 }
677 }
678
679 if (nv_ro08(bios, ramcfg + 0x02) & 0x80)
680 mask |= 0x03000000;
681 if (nv_ro08(bios, ramcfg + 0x02) & 0x40)
682 mask |= 0x00002000;
683 if (nv_ro08(bios, ramcfg + 0x07) & 0x10)
684 mask |= 0x00004000;
685 if (nv_ro08(bios, ramcfg + 0x07) & 0x08)
686 mask |= 0x00000003;
687 else
688 mask |= 0x14000000;
689 ram_mask(fuc, 0x10f824, mask, data);
690
691 ram_mask(fuc, 0x132040, 0x00010000, 0x00000000);
692
693 ram_mask(fuc, 0x1373f4, 0x00000000, 0x00010010);
694 data = ram_rd32(fuc, 0x1373ec) & ~0x00030000;
695 data |= (nv_ro08(bios, ramcfg + 0x03) & 0x30) << 12;
696 ram_wr32(fuc, 0x1373ec, data);
697 ram_mask(fuc, 0x1373f4, 0x00000003, 0x00000000);
698 ram_mask(fuc, 0x1373f4, 0x00000010, 0x00000000);
699
700 /* (re)program refpll, if required */
701 if ((ram_rd32(fuc, 0x132024) & 0xffffffff) != rcoef ||
702 (ram_rd32(fuc, 0x132034) & 0x0000ffff) != runk1) {
703 ram_mask(fuc, 0x132000, 0x00000001, 0x00000000);
704 ram_mask(fuc, 0x132020, 0x00000001, 0x00000000);
705 ram_wr32(fuc, 0x137320, 0x00000000);
706 ram_mask(fuc, 0x132030, 0xffff0000, runk0);
707 ram_mask(fuc, 0x132034, 0x0000ffff, runk1);
708 ram_wr32(fuc, 0x132024, rcoef);
709 ram_mask(fuc, 0x132028, 0x00080000, 0x00080000);
710 ram_mask(fuc, 0x132020, 0x00000001, 0x00000001);
711 ram_wait(fuc, 0x137390, 0x00020000, 0x00020000, 64000);
712 ram_mask(fuc, 0x132028, 0x00080000, 0x00000000);
713 }
714
715 ram_mask(fuc, 0x1373f4, 0x00000010, 0x00000010);
716 ram_mask(fuc, 0x1373f4, 0x00000003, 0x00000001);
717 ram_mask(fuc, 0x1373f4, 0x00010000, 0x00000000);
718
719 if (ram_have(fuc, gpioMV)) {
720 u32 temp = ram_mask(fuc, gpioMV, 0x3000, fuc->r_funcMV[mv]);
721 if (temp != ram_rd32(fuc, gpioMV)) {
722 ram_wr32(fuc, gpiotrig, 1);
723 ram_nsec(fuc, 64000);
724 }
725 }
726
727 if ( (nv_ro08(bios, ramcfg + 0x02) & 0x40) ||
728 (nv_ro08(bios, ramcfg + 0x07) & 0x10)) {
729 ram_mask(fuc, 0x132040, 0x00010000, 0x00010000);
730 ram_nsec(fuc, 20000);
731 }
732
733 if (ram->mode != 2) /*XXX*/ {
734 if (nv_ro08(bios, ramcfg + 0x07) & 0x40)
735 ram_mask(fuc, 0x10f670, 0x80000000, 0x80000000);
736 }
737
738 data = (nv_ro08(bios, rammap + 0x11) & 0x0c) >> 2;
739 ram_wr32(fuc, 0x10f65c, 0x00000011 * data);
740 ram_wr32(fuc, 0x10f6b8, 0x01010101 * nv_ro08(bios, ramcfg + 0x09));
741 ram_wr32(fuc, 0x10f6bc, 0x01010101 * nv_ro08(bios, ramcfg + 0x09));
742
743 mask = 0x00010000;
744 data = 0x00000000;
745 if (!(nv_ro08(bios, ramcfg + 0x02) & 0x80))
746 data |= 0x03000000;
747 if (!(nv_ro08(bios, ramcfg + 0x02) & 0x40))
748 data |= 0x00002000;
749 if (!(nv_ro08(bios, ramcfg + 0x07) & 0x10))
750 data |= 0x00004000;
751 if (!(nv_ro08(bios, ramcfg + 0x07) & 0x08))
752 data |= 0x00000003;
753 else
754 data |= 0x14000000;
755 ram_mask(fuc, 0x10f824, mask, data);
756 ram_nsec(fuc, 1000);
757
758 if (nv_ro08(bios, ramcfg + 0x08) & 0x01)
759 data = 0x00100000;
760 else
761 data = 0x00000000;
762 ram_mask(fuc, 0x10f82c, 0x00100000, data);
763
764 /* PFB timing */
765 ram_mask(fuc, 0x10f248, 0xffffffff, nv_ro32(bios, timing + 0x28));
766 ram_mask(fuc, 0x10f290, 0xffffffff, nv_ro32(bios, timing + 0x00));
767 ram_mask(fuc, 0x10f294, 0xffffffff, nv_ro32(bios, timing + 0x04));
768 ram_mask(fuc, 0x10f298, 0xffffffff, nv_ro32(bios, timing + 0x08));
769 ram_mask(fuc, 0x10f29c, 0xffffffff, nv_ro32(bios, timing + 0x0c));
770 ram_mask(fuc, 0x10f2a0, 0xffffffff, nv_ro32(bios, timing + 0x10));
771 ram_mask(fuc, 0x10f2a4, 0xffffffff, nv_ro32(bios, timing + 0x14));
772 ram_mask(fuc, 0x10f2a8, 0xffffffff, nv_ro32(bios, timing + 0x18));
773 ram_mask(fuc, 0x10f2ac, 0xffffffff, nv_ro32(bios, timing + 0x1c));
774 ram_mask(fuc, 0x10f2cc, 0xffffffff, nv_ro32(bios, timing + 0x20));
775 ram_mask(fuc, 0x10f2e8, 0xffffffff, nv_ro32(bios, timing + 0x24));
776
777 mask = 0x33f00000;
778 data = 0x00000000;
779 if (!(nv_ro08(bios, ramcfg + 0x01) & 0x04))
780 data |= 0x20200000;
781 if (!(nv_ro08(bios, ramcfg + 0x07) & 0x80))
782 data |= 0x12800000;
783 /*XXX: see note above about there probably being some condition
784 * for the 10f824 stuff that uses ramcfg 3...
785 */
786 if ( (nv_ro08(bios, ramcfg + 0x03) & 0xf0)) {
787 if (nv_ro08(bios, rammap + 0x08) & 0x0c) {
788 if (!(nv_ro08(bios, ramcfg + 0x07) & 0x80))
789 mask |= 0x00000020;
790 else
791 data |= 0x00000020;
792 mask |= 0x08000004;
793 }
794 data |= 0x04000000;
795 } else {
796 mask |= 0x44000020;
797 data |= 0x08000004;
798 }
799
800 ram_mask(fuc, 0x10f808, mask, data);
801
802 data = nv_ro08(bios, ramcfg + 0x03) & 0x0f;
803 ram_wr32(fuc, 0x10f870, 0x11111111 * data);
804
805 data = nv_ro16(bios, timing + 0x2c);
806 ram_mask(fuc, 0x10f250, 0x000003f0, (data & 0x003f) << 4);
807
808 if (((nv_ro32(bios, timing + 0x2c) & 0x00001fc0) >> 6) >
809 ((nv_ro32(bios, timing + 0x28) & 0x7f000000) >> 24))
810 data = (nv_ro32(bios, timing + 0x2c) & 0x00001fc0) >> 6;
811 else
812 data = (nv_ro32(bios, timing + 0x28) & 0x1f000000) >> 24;
813 ram_mask(fuc, 0x10f24c, 0x7f000000, data << 24);
814
815 data = nv_ro08(bios, timing + 0x30);
816 ram_mask(fuc, 0x10f224, 0x001f0000, (data & 0xf8) << 13);
817
818 ram_wr32(fuc, 0x10f090, 0x4000007f);
819 ram_nsec(fuc, 1000);
820
821 ram_wr32(fuc, 0x10f314, 0x00000001); /* PRECHARGE */
822 ram_wr32(fuc, 0x10f310, 0x00000001); /* REFRESH */
823 ram_wr32(fuc, 0x10f210, 0x80000000); /* REFRESH_AUTO = 1 */
824 ram_nsec(fuc, 1000);
825
826 ram_nuke(fuc, mr[0]);
827 ram_mask(fuc, mr[0], 0x100, 0x100);
828 ram_mask(fuc, mr[0], 0x100, 0x000);
829
830 ram_mask(fuc, mr[2], 0xfff, ram->base.mr[2]);
831 ram_wr32(fuc, mr[0], ram->base.mr[0]);
832 ram_nsec(fuc, 1000);
833
834 ram_nuke(fuc, mr[0]);
835 ram_mask(fuc, mr[0], 0x100, 0x100);
836 ram_mask(fuc, mr[0], 0x100, 0x000);
837
838 if (vc == 0 && ram_have(fuc, gpio2E)) {
839 u32 temp = ram_mask(fuc, gpio2E, 0x3000, fuc->r_func2E[0]);
840 if (temp != ram_rd32(fuc, gpio2E)) {
841 ram_wr32(fuc, gpiotrig, 1);
842 ram_nsec(fuc, 20000);
843 }
844 }
845
846 if (ram->mode != 2) {
847 ram_mask(fuc, 0x10f830, 0x01000000, 0x01000000);
848 ram_mask(fuc, 0x10f830, 0x01000000, 0x00000000);
849 }
850
851 ram_mask(fuc, 0x10f200, 0x80000000, 0x80000000);
852 ram_wr32(fuc, 0x10f318, 0x00000001); /* NOP? */
853 ram_mask(fuc, 0x10f200, 0x80000000, 0x00000000);
854 ram_nsec(fuc, 1000);
855
856 ram_wr32(fuc, 0x62c000, 0x0f0f0f00);
857
858 if (nv_ro08(bios, rammap + 0x08) & 0x01)
859 data = 0x00000800;
860 else
861 data = 0x00000000;
862 ram_mask(fuc, 0x10f200, 0x00000800, data);
863 return 0;
864}
865
866/*******************************************************************************
867 * main hooks
868 ******************************************************************************/
869
870static int
871nve0_ram_calc(struct nouveau_fb *pfb, u32 freq)
872{
873 struct nouveau_bios *bios = nouveau_bios(pfb);
874 struct nve0_ram *ram = (void *)pfb->ram;
875 struct nve0_ramfuc *fuc = &ram->fuc;
876 struct bit_entry M;
877 int ret, refclk, strap, i;
878 u32 data;
879 u8 cnt;
880
881 /* lookup memory config data relevant to the target frequency */
882 ram->base.rammap.data = nvbios_rammap_match(bios, freq / 1000,
883 &ram->base.rammap.version,
884 &ram->base.rammap.size, &cnt,
885 &ram->base.ramcfg.size);
886 if (!ram->base.rammap.data || ram->base.rammap.version != 0x11 ||
887 ram->base.rammap.size < 0x09) {
888 nv_error(pfb, "invalid/missing rammap entry\n");
889 return -EINVAL;
890 }
891
892 /* locate specific data set for the attached memory */
893 if (bit_entry(bios, 'M', &M) || M.version != 2 || M.length < 3) {
894 nv_error(pfb, "invalid/missing memory table\n");
895 return -EINVAL;
896 }
897
898 strap = (nv_rd32(pfb, 0x101000) & 0x0000003c) >> 2;
899 data = nv_ro16(bios, M.offset + 1);
900 if (data)
901 strap = nv_ro08(bios, data + strap);
902
903 if (strap >= cnt) {
904 nv_error(pfb, "invalid ramcfg strap\n");
905 return -EINVAL;
906 }
907
908 ram->base.ramcfg.version = ram->base.rammap.version;
909 ram->base.ramcfg.data = ram->base.rammap.data + ram->base.rammap.size +
910 (ram->base.ramcfg.size * strap);
911 if (!ram->base.ramcfg.data || ram->base.ramcfg.version != 0x11 ||
912 ram->base.ramcfg.size < 0x08) {
913 nv_error(pfb, "invalid/missing ramcfg entry\n");
914 return -EINVAL;
915 }
916
917 /* lookup memory timings, if bios says they're present */
918 strap = nv_ro08(bios, ram->base.ramcfg.data + 0x00);
919 if (strap != 0xff) {
920 ram->base.timing.data =
921 nvbios_timing_entry(bios, strap,
922 &ram->base.timing.version,
923 &ram->base.timing.size);
924 if (!ram->base.timing.data ||
925 ram->base.timing.version != 0x20 ||
926 ram->base.timing.size < 0x33) {
927 nv_error(pfb, "invalid/missing timing entry\n");
928 return -EINVAL;
929 }
930 } else {
931 ram->base.timing.data = 0;
932 }
933
934 ret = ram_init(fuc, pfb);
935 if (ret)
936 return ret;
937
938 ram->mode = (freq > fuc->refpll.vco1.max_freq) ? 2 : 1;
939 ram->from = ram_rd32(fuc, 0x1373f4) & 0x0000000f;
940
941 /* XXX: this is *not* what nvidia do. on fermi nvidia generally
942 * select, based on some unknown condition, one of the two possible
943 * reference frequencies listed in the vbios table for mempll and
944 * program refpll to that frequency.
945 *
946 * so far, i've seen very weird values being chosen by nvidia on
947 * kepler boards, no idea how/why they're chosen.
948 */
949 refclk = freq;
950 if (ram->mode == 2)
951 refclk = fuc->mempll.refclk;
952
953 /* calculate refpll coefficients */
954 ret = nva3_pll_calc(nv_subdev(pfb), &fuc->refpll, refclk, &ram->N1,
955 &ram->fN1, &ram->M1, &ram->P1);
956 fuc->mempll.refclk = ret;
957 if (ret <= 0) {
958 nv_error(pfb, "unable to calc refpll\n");
959 return -EINVAL;
960 }
961
962 /* calculate mempll coefficients, if we're using it */
963 if (ram->mode == 2) {
964 /* post-divider doesn't work... the reg takes the values but
965 * appears to completely ignore it. there *is* a bit at
966 * bit 28 that appears to divide the clock by 2 if set.
967 */
968 fuc->mempll.min_p = 1;
969 fuc->mempll.max_p = 2;
970
971 ret = nva3_pll_calc(nv_subdev(pfb), &fuc->mempll, freq,
972 &ram->N2, NULL, &ram->M2, &ram->P2);
973 if (ret <= 0) {
974 nv_error(pfb, "unable to calc mempll\n");
975 return -EINVAL;
976 }
977 }
978
979 for (i = 0; i < ARRAY_SIZE(fuc->r_mr); i++) {
980 if (ram_have(fuc, mr[i]))
981 ram->base.mr[i] = ram_rd32(fuc, mr[i]);
982 }
983
984 switch (ram->base.type) {
985 case NV_MEM_TYPE_DDR3:
986 ret = nouveau_sddr3_calc(&ram->base);
987 if (ret == 0)
988 ret = nve0_ram_calc_sddr3(pfb, freq);
989 break;
990 case NV_MEM_TYPE_GDDR5:
991 ret = nouveau_gddr5_calc(&ram->base);
992 if (ret == 0)
993 ret = nve0_ram_calc_gddr5(pfb, freq);
994 break;
995 default:
996 ret = -ENOSYS;
997 break;
998 }
999
1000 return ret;
1001}
1002
1003static int
1004nve0_ram_prog(struct nouveau_fb *pfb)
1005{
1006 struct nouveau_device *device = nv_device(pfb);
1007 struct nve0_ram *ram = (void *)pfb->ram;
1008 struct nve0_ramfuc *fuc = &ram->fuc;
1009 ram_exec(fuc, nouveau_boolopt(device->cfgopt, "NvMemExec", false));
1010 return 0;
1011}
1012
1013static void
1014nve0_ram_tidy(struct nouveau_fb *pfb)
1015{
1016 struct nve0_ram *ram = (void *)pfb->ram;
1017 struct nve0_ramfuc *fuc = &ram->fuc;
1018 ram_exec(fuc, false);
1019}
1020
1021static int
1022nve0_ram_init(struct nouveau_object *object)
1023{
1024 struct nouveau_fb *pfb = (void *)object->parent;
1025 struct nve0_ram *ram = (void *)object;
1026 struct nouveau_bios *bios = nouveau_bios(pfb);
1027 static const u8 train0[] = {
1028 0x00, 0xff, 0xff, 0x00, 0xff, 0x00,
1029 0x00, 0xff, 0xff, 0x00, 0xff, 0x00,
1030 };
1031 static const u32 train1[] = {
1032 0x00000000, 0xffffffff,
1033 0x55555555, 0xaaaaaaaa,
1034 0x33333333, 0xcccccccc,
1035 0xf0f0f0f0, 0x0f0f0f0f,
1036 0x00ff00ff, 0xff00ff00,
1037 0x0000ffff, 0xffff0000,
1038 };
1039 u8 ver, hdr, cnt, len, snr, ssz;
1040 u32 data, save;
1041 int ret, i;
1042
1043 ret = nouveau_ram_init(&ram->base);
1044 if (ret)
1045 return ret;
1046
1047 /* run a bunch of tables from rammap table. there's actually
1048 * individual pointers for each rammap entry too, but, nvidia
1049 * seem to just run the last two entries' scripts early on in
1050 * their init, and never again.. we'll just run 'em all once
1051 * for now.
1052 *
1053 * i strongly suspect that each script is for a separate mode
1054 * (likely selected by 0x10f65c's lower bits?), and the
1055 * binary driver skips the one that's already been setup by
1056 * the init tables.
1057 */
1058 data = nvbios_rammap_table(bios, &ver, &hdr, &cnt, &len, &snr, &ssz);
1059 if (!data || hdr < 0x15)
1060 return -EINVAL;
1061
1062 cnt = nv_ro08(bios, data + 0x14); /* guess at count */
1063 data = nv_ro32(bios, data + 0x10); /* guess u32... */
1064 save = nv_rd32(pfb, 0x10f65c);
1065 for (i = 0; i < cnt; i++) {
1066 nv_mask(pfb, 0x10f65c, 0x000000f0, i << 4);
1067 nvbios_exec(&(struct nvbios_init) {
1068 .subdev = nv_subdev(pfb),
1069 .bios = bios,
1070 .offset = nv_ro32(bios, data), /* guess u32 */
1071 .execute = 1,
1072 });
1073 data += 4;
1074 }
1075 nv_wr32(pfb, 0x10f65c, save);
1076
1077 switch (ram->base.type) {
1078 case NV_MEM_TYPE_GDDR5:
1079 for (i = 0; i < 0x30; i++) {
1080 nv_wr32(pfb, 0x10f968, 0x00000000 | (i << 8));
1081 nv_wr32(pfb, 0x10f920, 0x00000000 | train0[i % 12]);
1082 nv_wr32(pfb, 0x10f918, train1[i % 12]);
1083 nv_wr32(pfb, 0x10f920, 0x00000100 | train0[i % 12]);
1084 nv_wr32(pfb, 0x10f918, train1[i % 12]);
1085
1086 nv_wr32(pfb, 0x10f96c, 0x00000000 | (i << 8));
1087 nv_wr32(pfb, 0x10f924, 0x00000000 | train0[i % 12]);
1088 nv_wr32(pfb, 0x10f91c, train1[i % 12]);
1089 nv_wr32(pfb, 0x10f924, 0x00000100 | train0[i % 12]);
1090 nv_wr32(pfb, 0x10f91c, train1[i % 12]);
1091 }
1092
1093 for (i = 0; i < 0x100; i++) {
1094 nv_wr32(pfb, 0x10f968, i);
1095 nv_wr32(pfb, 0x10f900, train1[2 + (i & 1)]);
1096 }
1097
1098 for (i = 0; i < 0x100; i++) {
1099 nv_wr32(pfb, 0x10f96c, i);
1100 nv_wr32(pfb, 0x10f900, train1[2 + (i & 1)]);
1101 }
1102 break;
1103 default:
1104 break;
1105 }
1106
1107 return 0;
1108}
1109
1110static int
1111nve0_ram_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
1112 struct nouveau_oclass *oclass, void *data, u32 size,
1113 struct nouveau_object **pobject)
1114{
1115 struct nouveau_fb *pfb = nouveau_fb(parent);
1116 struct nouveau_bios *bios = nouveau_bios(pfb);
1117 struct nouveau_gpio *gpio = nouveau_gpio(pfb);
1118 struct dcb_gpio_func func;
1119 struct nve0_ram *ram;
1120 int ret;
1121
1122 ret = nvc0_ram_create(parent, engine, oclass, &ram);
1123 *pobject = nv_object(ram);
1124 if (ret)
1125 return ret;
1126
1127 switch (ram->base.type) {
1128 case NV_MEM_TYPE_DDR3:
1129 case NV_MEM_TYPE_GDDR5:
1130 ram->base.calc = nve0_ram_calc;
1131 ram->base.prog = nve0_ram_prog;
1132 ram->base.tidy = nve0_ram_tidy;
1133 break;
1134 default:
1135 nv_warn(pfb, "reclocking of this RAM type is unsupported\n");
1136 break;
1137 }
1138
1139 // parse bios data for both pll's
1140 ret = nvbios_pll_parse(bios, 0x0c, &ram->fuc.refpll);
1141 if (ret) {
1142 nv_error(pfb, "mclk refpll data not found\n");
1143 return ret;
1144 }
1145
1146 ret = nvbios_pll_parse(bios, 0x04, &ram->fuc.mempll);
1147 if (ret) {
1148 nv_error(pfb, "mclk pll data not found\n");
1149 return ret;
1150 }
1151
1152 ret = gpio->find(gpio, 0, 0x18, DCB_GPIO_UNUSED, &func);
1153 if (ret == 0) {
1154 ram->fuc.r_gpioMV = ramfuc_reg(0x00d610 + (func.line * 0x04));
1155 ram->fuc.r_funcMV[0] = (func.log[0] ^ 2) << 12;
1156 ram->fuc.r_funcMV[1] = (func.log[1] ^ 2) << 12;
1157 }
1158
1159 ret = gpio->find(gpio, 0, 0x2e, DCB_GPIO_UNUSED, &func);
1160 if (ret == 0) {
1161 ram->fuc.r_gpio2E = ramfuc_reg(0x00d610 + (func.line * 0x04));
1162 ram->fuc.r_func2E[0] = (func.log[0] ^ 2) << 12;
1163 ram->fuc.r_func2E[1] = (func.log[1] ^ 2) << 12;
1164 }
1165
1166 ram->fuc.r_gpiotrig = ramfuc_reg(0x00d604);
1167
1168 ram->fuc.r_0x132020 = ramfuc_reg(0x132020);
1169 ram->fuc.r_0x132028 = ramfuc_reg(0x132028);
1170 ram->fuc.r_0x132024 = ramfuc_reg(0x132024);
1171 ram->fuc.r_0x132030 = ramfuc_reg(0x132030);
1172 ram->fuc.r_0x132034 = ramfuc_reg(0x132034);
1173 ram->fuc.r_0x132000 = ramfuc_reg(0x132000);
1174 ram->fuc.r_0x132004 = ramfuc_reg(0x132004);
1175 ram->fuc.r_0x132040 = ramfuc_reg(0x132040);
1176
1177 ram->fuc.r_0x10f248 = ramfuc_reg(0x10f248);
1178 ram->fuc.r_0x10f290 = ramfuc_reg(0x10f290);
1179 ram->fuc.r_0x10f294 = ramfuc_reg(0x10f294);
1180 ram->fuc.r_0x10f298 = ramfuc_reg(0x10f298);
1181 ram->fuc.r_0x10f29c = ramfuc_reg(0x10f29c);
1182 ram->fuc.r_0x10f2a0 = ramfuc_reg(0x10f2a0);
1183 ram->fuc.r_0x10f2a4 = ramfuc_reg(0x10f2a4);
1184 ram->fuc.r_0x10f2a8 = ramfuc_reg(0x10f2a8);
1185 ram->fuc.r_0x10f2ac = ramfuc_reg(0x10f2ac);
1186 ram->fuc.r_0x10f2cc = ramfuc_reg(0x10f2cc);
1187 ram->fuc.r_0x10f2e8 = ramfuc_reg(0x10f2e8);
1188 ram->fuc.r_0x10f250 = ramfuc_reg(0x10f250);
1189 ram->fuc.r_0x10f24c = ramfuc_reg(0x10f24c);
1190 ram->fuc.r_0x10fec4 = ramfuc_reg(0x10fec4);
1191 ram->fuc.r_0x10fec8 = ramfuc_reg(0x10fec8);
1192 ram->fuc.r_0x10f604 = ramfuc_reg(0x10f604);
1193 ram->fuc.r_0x10f614 = ramfuc_reg(0x10f614);
1194 ram->fuc.r_0x10f610 = ramfuc_reg(0x10f610);
1195 ram->fuc.r_0x100770 = ramfuc_reg(0x100770);
1196 ram->fuc.r_0x100778 = ramfuc_reg(0x100778);
1197 ram->fuc.r_0x10f224 = ramfuc_reg(0x10f224);
1198
1199 ram->fuc.r_0x10f870 = ramfuc_reg(0x10f870);
1200 ram->fuc.r_0x10f698 = ramfuc_reg(0x10f698);
1201 ram->fuc.r_0x10f694 = ramfuc_reg(0x10f694);
1202 ram->fuc.r_0x10f6b8 = ramfuc_reg(0x10f6b8);
1203 ram->fuc.r_0x10f808 = ramfuc_reg(0x10f808);
1204 ram->fuc.r_0x10f670 = ramfuc_reg(0x10f670);
1205 ram->fuc.r_0x10f60c = ramfuc_reg(0x10f60c);
1206 ram->fuc.r_0x10f830 = ramfuc_reg(0x10f830);
1207 ram->fuc.r_0x1373ec = ramfuc_reg(0x1373ec);
1208 ram->fuc.r_0x10f800 = ramfuc_reg(0x10f800);
1209 ram->fuc.r_0x10f82c = ramfuc_reg(0x10f82c);
1210
1211 ram->fuc.r_0x10f978 = ramfuc_reg(0x10f978);
1212 ram->fuc.r_0x10f910 = ramfuc_reg(0x10f910);
1213 ram->fuc.r_0x10f914 = ramfuc_reg(0x10f914);
1214
1215 switch (ram->base.type) {
1216 case NV_MEM_TYPE_GDDR5:
1217 ram->fuc.r_mr[0] = ramfuc_reg(0x10f300);
1218 ram->fuc.r_mr[1] = ramfuc_reg(0x10f330);
1219 ram->fuc.r_mr[2] = ramfuc_reg(0x10f334);
1220 ram->fuc.r_mr[3] = ramfuc_reg(0x10f338);
1221 ram->fuc.r_mr[4] = ramfuc_reg(0x10f33c);
1222 ram->fuc.r_mr[5] = ramfuc_reg(0x10f340);
1223 ram->fuc.r_mr[6] = ramfuc_reg(0x10f344);
1224 ram->fuc.r_mr[7] = ramfuc_reg(0x10f348);
1225 ram->fuc.r_mr[8] = ramfuc_reg(0x10f354);
1226 ram->fuc.r_mr[15] = ramfuc_reg(0x10f34c);
1227 break;
1228 case NV_MEM_TYPE_DDR3:
1229 ram->fuc.r_mr[0] = ramfuc_reg(0x10f300);
1230 ram->fuc.r_mr[2] = ramfuc_reg(0x10f320);
1231 break;
1232 default:
1233 break;
1234 }
1235
1236 ram->fuc.r_0x62c000 = ramfuc_reg(0x62c000);
1237 ram->fuc.r_0x10f200 = ramfuc_reg(0x10f200);
1238 ram->fuc.r_0x10f210 = ramfuc_reg(0x10f210);
1239 ram->fuc.r_0x10f310 = ramfuc_reg(0x10f310);
1240 ram->fuc.r_0x10f314 = ramfuc_reg(0x10f314);
1241 ram->fuc.r_0x10f318 = ramfuc_reg(0x10f318);
1242 ram->fuc.r_0x10f090 = ramfuc_reg(0x10f090);
1243 ram->fuc.r_0x10f69c = ramfuc_reg(0x10f69c);
1244 ram->fuc.r_0x10f824 = ramfuc_reg(0x10f824);
1245 ram->fuc.r_0x1373f0 = ramfuc_reg(0x1373f0);
1246 ram->fuc.r_0x1373f4 = ramfuc_reg(0x1373f4);
1247 ram->fuc.r_0x137320 = ramfuc_reg(0x137320);
1248 ram->fuc.r_0x10f65c = ramfuc_reg(0x10f65c);
1249 ram->fuc.r_0x10f6bc = ramfuc_reg(0x10f6bc);
1250 ram->fuc.r_0x100710 = ramfuc_reg(0x100710);
1251 ram->fuc.r_0x10f750 = ramfuc_reg(0x10f750);
1252 return 0;
1253}
1254
1255struct nouveau_oclass
1256nve0_ram_oclass = {
1257 .handle = 0,
1258 .ofuncs = &(struct nouveau_ofuncs) {
1259 .ctor = nve0_ram_ctor,
1260 .dtor = _nouveau_ram_dtor,
1261 .init = nve0_ram_init,
1262 .fini = _nouveau_ram_fini,
1263 }
1264};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramseq.h b/drivers/gpu/drm/nouveau/core/subdev/fb/ramseq.h
new file mode 100644
index 000000000000..571077e39071
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramseq.h
@@ -0,0 +1,18 @@
1#ifndef __NVKM_FBRAM_SEQ_H__
2#define __NVKM_FBRAM_SEQ_H__
3
4#include <subdev/bus.h>
5#include <subdev/bus/hwsq.h>
6
7#define ram_init(s,p) hwsq_init(&(s)->base, (p))
8#define ram_exec(s,e) hwsq_exec(&(s)->base, (e))
9#define ram_have(s,r) ((s)->r_##r.addr != 0x000000)
10#define ram_rd32(s,r) hwsq_rd32(&(s)->base, &(s)->r_##r)
11#define ram_wr32(s,r,d) hwsq_wr32(&(s)->base, &(s)->r_##r, (d))
12#define ram_nuke(s,r) hwsq_nuke(&(s)->base, &(s)->r_##r)
13#define ram_mask(s,r,m,d) hwsq_mask(&(s)->base, &(s)->r_##r, (m), (d))
14#define ram_setf(s,f,d) hwsq_setf(&(s)->base, (f), (d))
15#define ram_wait(s,f,d) hwsq_wait(&(s)->base, (f), (d))
16#define ram_nsec(s,n) hwsq_nsec(&(s)->base, (n))
17
18#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/sddr3.c b/drivers/gpu/drm/nouveau/core/subdev/fb/sddr3.c
new file mode 100644
index 000000000000..ebd4cd9c35d9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/sddr3.c
@@ -0,0 +1,99 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */
24
25#include <subdev/bios.h>
26#include "priv.h"
27
28struct ramxlat {
29 int id;
30 u8 enc;
31};
32
33static inline int
34ramxlat(const struct ramxlat *xlat, int id)
35{
36 while (xlat->id >= 0) {
37 if (xlat->id == id)
38 return xlat->enc;
39 xlat++;
40 }
41 return -EINVAL;
42}
43
44static const struct ramxlat
45ramddr3_cl[] = {
46 { 5, 2 }, { 6, 4 }, { 7, 6 }, { 8, 8 }, { 9, 10 }, { 10, 12 },
47 { 11, 14 },
48 /* the below are mentioned in some, but not all, ddr3 docs */
49 { 12, 1 }, { 13, 3 }, { 14, 5 },
50 { -1 }
51};
52
53static const struct ramxlat
54ramddr3_wr[] = {
55 { 5, 1 }, { 6, 2 }, { 7, 3 }, { 8, 4 }, { 10, 5 }, { 12, 6 },
56 /* the below are mentioned in some, but not all, ddr3 docs */
57 { 14, 7 }, { 16, 0 },
58 { -1 }
59};
60
61static const struct ramxlat
62ramddr3_cwl[] = {
63 { 5, 0 }, { 6, 1 }, { 7, 2 }, { 8, 3 },
64 /* the below are mentioned in some, but not all, ddr3 docs */
65 { 9, 4 },
66 { -1 }
67};
68
69int
70nouveau_sddr3_calc(struct nouveau_ram *ram)
71{
72 struct nouveau_bios *bios = nouveau_bios(ram);
73 int WL, CL, WR;
74
75 switch (!!ram->timing.data * ram->timing.version) {
76 case 0x20:
77 WL = (nv_ro16(bios, ram->timing.data + 0x04) & 0x0f80) >> 7;
78 CL = nv_ro08(bios, ram->timing.data + 0x04) & 0x1f;
79 WR = nv_ro08(bios, ram->timing.data + 0x0a) & 0x7f;
80 break;
81 default:
82 return -ENOSYS;
83 }
84
85 WL = ramxlat(ramddr3_cwl, WL);
86 CL = ramxlat(ramddr3_cl, CL);
87 WR = ramxlat(ramddr3_wr, WR);
88 if (WL < 0 || CL < 0 || WR < 0)
89 return -EINVAL;
90
91 ram->mr[0] &= ~0xe74;
92 ram->mr[0] |= (WR & 0x07) << 9;
93 ram->mr[0] |= (CL & 0x0e) << 3;
94 ram->mr[0] |= (CL & 0x01) << 2;
95
96 ram->mr[2] &= ~0x038;
97 ram->mr[2] |= (WL & 0x07) << 3;
98 return 0;
99}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/gpio/base.c b/drivers/gpu/drm/nouveau/core/subdev/gpio/base.c
index d422acc9af15..f572c2804c32 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/gpio/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/gpio/base.c
@@ -67,7 +67,7 @@ nouveau_gpio_find(struct nouveau_gpio *gpio, int idx, u8 tag, u8 line,
67 } 67 }
68 } 68 }
69 69
70 return -EINVAL; 70 return -ENOENT;
71} 71}
72 72
73static int 73static int
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
index 2895c19bb152..041fd5edaebf 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
@@ -195,7 +195,7 @@ nouveau_i2c_find_type(struct nouveau_i2c *i2c, u16 type)
195 195
196static int 196static int
197nouveau_i2c_identify(struct nouveau_i2c *i2c, int index, const char *what, 197nouveau_i2c_identify(struct nouveau_i2c *i2c, int index, const char *what,
198 struct i2c_board_info *info, 198 struct nouveau_i2c_board_info *info,
199 bool (*match)(struct nouveau_i2c_port *, 199 bool (*match)(struct nouveau_i2c_port *,
200 struct i2c_board_info *)) 200 struct i2c_board_info *))
201{ 201{
@@ -208,12 +208,29 @@ nouveau_i2c_identify(struct nouveau_i2c *i2c, int index, const char *what,
208 } 208 }
209 209
210 nv_debug(i2c, "probing %ss on bus: %d\n", what, port->index); 210 nv_debug(i2c, "probing %ss on bus: %d\n", what, port->index);
211 for (i = 0; info[i].addr; i++) { 211 for (i = 0; info[i].dev.addr; i++) {
212 if (nv_probe_i2c(port, info[i].addr) && 212 u8 orig_udelay = 0;
213 (!match || match(port, &info[i]))) { 213
214 nv_info(i2c, "detected %s: %s\n", what, info[i].type); 214 if ((port->adapter.algo == &i2c_bit_algo) &&
215 (info[i].udelay != 0)) {
216 struct i2c_algo_bit_data *algo = port->adapter.algo_data;
217 nv_debug(i2c, "using custom udelay %d instead of %d\n",
218 info[i].udelay, algo->udelay);
219 orig_udelay = algo->udelay;
220 algo->udelay = info[i].udelay;
221 }
222
223 if (nv_probe_i2c(port, info[i].dev.addr) &&
224 (!match || match(port, &info[i].dev))) {
225 nv_info(i2c, "detected %s: %s\n", what,
226 info[i].dev.type);
215 return i; 227 return i;
216 } 228 }
229
230 if (orig_udelay) {
231 struct i2c_algo_bit_data *algo = port->adapter.algo_data;
232 algo->udelay = orig_udelay;
233 }
217 } 234 }
218 235
219 nv_debug(i2c, "no devices found.\n"); 236 nv_debug(i2c, "no devices found.\n");
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/base.c b/drivers/gpu/drm/nouveau/core/subdev/mc/base.c
index e290cfa4acee..b4b9943773bc 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/base.c
@@ -25,38 +25,48 @@
25#include <subdev/mc.h> 25#include <subdev/mc.h>
26#include <core/option.h> 26#include <core/option.h>
27 27
28static inline u32
29nouveau_mc_intr_mask(struct nouveau_mc *pmc)
30{
31 u32 intr = nv_rd32(pmc, 0x000100);
32 if (intr == 0xffffffff) /* likely fallen off the bus */
33 intr = 0x00000000;
34 return intr;
35}
36
28static irqreturn_t 37static irqreturn_t
29nouveau_mc_intr(int irq, void *arg) 38nouveau_mc_intr(int irq, void *arg)
30{ 39{
31 struct nouveau_mc *pmc = arg; 40 struct nouveau_mc *pmc = arg;
32 const struct nouveau_mc_intr *map = pmc->intr_map; 41 const struct nouveau_mc_oclass *oclass = (void *)nv_object(pmc)->oclass;
33 struct nouveau_device *device = nv_device(pmc); 42 const struct nouveau_mc_intr *map = oclass->intr;
34 struct nouveau_subdev *unit; 43 struct nouveau_subdev *unit;
35 u32 stat, intr; 44 u32 intr;
36
37 intr = stat = nv_rd32(pmc, 0x000100);
38 if (intr == 0xffffffff)
39 return IRQ_NONE;
40 while (stat && map->stat) {
41 if (stat & map->stat) {
42 unit = nouveau_subdev(pmc, map->unit);
43 if (unit && unit->intr)
44 unit->intr(unit);
45 intr &= ~map->stat;
46 }
47 map++;
48 }
49 45
46 nv_wr32(pmc, 0x000140, 0x00000000);
47 nv_rd32(pmc, 0x000140);
48 intr = nouveau_mc_intr_mask(pmc);
50 if (pmc->use_msi) 49 if (pmc->use_msi)
51 nv_wr08(pmc->base.base.parent, 0x00088068, 0xff); 50 oclass->msi_rearm(pmc);
52 51
53 if (intr) { 52 if (intr) {
54 nv_error(pmc, "unknown intr 0x%08x\n", stat); 53 u32 stat = intr = nouveau_mc_intr_mask(pmc);
54 while (map->stat) {
55 if (intr & map->stat) {
56 unit = nouveau_subdev(pmc, map->unit);
57 if (unit && unit->intr)
58 unit->intr(unit);
59 stat &= ~map->stat;
60 }
61 map++;
62 }
63
64 if (stat)
65 nv_error(pmc, "unknown intr 0x%08x\n", stat);
55 } 66 }
56 67
57 if (stat == IRQ_HANDLED) 68 nv_wr32(pmc, 0x000140, 0x00000001);
58 pm_runtime_mark_last_busy(&device->pdev->dev); 69 return intr ? IRQ_HANDLED : IRQ_NONE;
59 return stat ? IRQ_HANDLED : IRQ_NONE;
60} 70}
61 71
62int 72int
@@ -91,37 +101,42 @@ _nouveau_mc_dtor(struct nouveau_object *object)
91 101
92int 102int
93nouveau_mc_create_(struct nouveau_object *parent, struct nouveau_object *engine, 103nouveau_mc_create_(struct nouveau_object *parent, struct nouveau_object *engine,
94 struct nouveau_oclass *oclass, 104 struct nouveau_oclass *bclass, int length, void **pobject)
95 const struct nouveau_mc_intr *intr_map,
96 int length, void **pobject)
97{ 105{
106 const struct nouveau_mc_oclass *oclass = (void *)bclass;
98 struct nouveau_device *device = nv_device(parent); 107 struct nouveau_device *device = nv_device(parent);
99 struct nouveau_mc *pmc; 108 struct nouveau_mc *pmc;
100 int ret; 109 int ret;
101 110
102 ret = nouveau_subdev_create_(parent, engine, oclass, 0, "PMC", 111 ret = nouveau_subdev_create_(parent, engine, bclass, 0, "PMC",
103 "master", length, pobject); 112 "master", length, pobject);
104 pmc = *pobject; 113 pmc = *pobject;
105 if (ret) 114 if (ret)
106 return ret; 115 return ret;
107 116
108 pmc->intr_map = intr_map;
109
110 switch (device->pdev->device & 0x0ff0) { 117 switch (device->pdev->device & 0x0ff0) {
111 case 0x00f0: /* BR02? */ 118 case 0x00f0:
112 case 0x02e0: /* BR02? */ 119 case 0x02e0:
113 pmc->use_msi = false; 120 /* BR02? NFI how these would be handled yet exactly */
114 break; 121 break;
115 default: 122 default:
116 pmc->use_msi = nouveau_boolopt(device->cfgopt, "NvMSI", false); 123 switch (device->chipset) {
124 case 0xaa: break; /* reported broken, nv also disable it */
125 default:
126 pmc->use_msi = true;
127 break;
128 }
129 }
130
131 pmc->use_msi = nouveau_boolopt(device->cfgopt, "NvMSI", pmc->use_msi);
132 if (pmc->use_msi && oclass->msi_rearm) {
133 pmc->use_msi = pci_enable_msi(device->pdev) == 0;
117 if (pmc->use_msi) { 134 if (pmc->use_msi) {
118 pmc->use_msi = pci_enable_msi(device->pdev) == 0; 135 nv_info(pmc, "MSI interrupts enabled\n");
119 if (pmc->use_msi) { 136 oclass->msi_rearm(pmc);
120 nv_info(pmc, "MSI interrupts enabled\n");
121 nv_wr08(device, 0x00088068, 0xff);
122 }
123 } 137 }
124 break; 138 } else {
139 pmc->use_msi = false;
125 } 140 }
126 141
127 ret = request_irq(device->pdev->irq, nouveau_mc_intr, 142 ret = request_irq(device->pdev->irq, nouveau_mc_intr,
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c
index 64aa4edb0d9d..2d787e4dfefa 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c
@@ -22,17 +22,14 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include <subdev/mc.h> 25#include "nv04.h"
26
27struct nv04_mc_priv {
28 struct nouveau_mc base;
29};
30 26
31const struct nouveau_mc_intr 27const struct nouveau_mc_intr
32nv04_mc_intr[] = { 28nv04_mc_intr[] = {
33 { 0x00000001, NVDEV_ENGINE_MPEG }, /* NV17- MPEG/ME */ 29 { 0x00000001, NVDEV_ENGINE_MPEG }, /* NV17- MPEG/ME */
34 { 0x00000100, NVDEV_ENGINE_FIFO }, 30 { 0x00000100, NVDEV_ENGINE_FIFO },
35 { 0x00001000, NVDEV_ENGINE_GR }, 31 { 0x00001000, NVDEV_ENGINE_GR },
32 { 0x00010000, NVDEV_ENGINE_DISP },
36 { 0x00020000, NVDEV_ENGINE_VP }, /* NV40- */ 33 { 0x00020000, NVDEV_ENGINE_VP }, /* NV40- */
37 { 0x00100000, NVDEV_SUBDEV_TIMER }, 34 { 0x00100000, NVDEV_SUBDEV_TIMER },
38 { 0x01000000, NVDEV_ENGINE_DISP }, /* NV04- PCRTC0 */ 35 { 0x01000000, NVDEV_ENGINE_DISP }, /* NV04- PCRTC0 */
@@ -42,7 +39,18 @@ nv04_mc_intr[] = {
42 {} 39 {}
43}; 40};
44 41
45static int 42int
43nv04_mc_init(struct nouveau_object *object)
44{
45 struct nv04_mc_priv *priv = (void *)object;
46
47 nv_wr32(priv, 0x000200, 0xffffffff); /* everything enabled */
48 nv_wr32(priv, 0x001850, 0x00000001); /* disable rom access */
49
50 return nouveau_mc_init(&priv->base);
51}
52
53int
46nv04_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine, 54nv04_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
47 struct nouveau_oclass *oclass, void *data, u32 size, 55 struct nouveau_oclass *oclass, void *data, u32 size,
48 struct nouveau_object **pobject) 56 struct nouveau_object **pobject)
@@ -50,7 +58,7 @@ nv04_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
50 struct nv04_mc_priv *priv; 58 struct nv04_mc_priv *priv;
51 int ret; 59 int ret;
52 60
53 ret = nouveau_mc_create(parent, engine, oclass, nv04_mc_intr, &priv); 61 ret = nouveau_mc_create(parent, engine, oclass, &priv);
54 *pobject = nv_object(priv); 62 *pobject = nv_object(priv);
55 if (ret) 63 if (ret)
56 return ret; 64 return ret;
@@ -58,24 +66,14 @@ nv04_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
58 return 0; 66 return 0;
59} 67}
60 68
61int 69struct nouveau_oclass *
62nv04_mc_init(struct nouveau_object *object) 70nv04_mc_oclass = &(struct nouveau_mc_oclass) {
63{ 71 .base.handle = NV_SUBDEV(MC, 0x04),
64 struct nv04_mc_priv *priv = (void *)object; 72 .base.ofuncs = &(struct nouveau_ofuncs) {
65
66 nv_wr32(priv, 0x000200, 0xffffffff); /* everything enabled */
67 nv_wr32(priv, 0x001850, 0x00000001); /* disable rom access */
68
69 return nouveau_mc_init(&priv->base);
70}
71
72struct nouveau_oclass
73nv04_mc_oclass = {
74 .handle = NV_SUBDEV(MC, 0x04),
75 .ofuncs = &(struct nouveau_ofuncs) {
76 .ctor = nv04_mc_ctor, 73 .ctor = nv04_mc_ctor,
77 .dtor = _nouveau_mc_dtor, 74 .dtor = _nouveau_mc_dtor,
78 .init = nv04_mc_init, 75 .init = nv04_mc_init,
79 .fini = _nouveau_mc_fini, 76 .fini = _nouveau_mc_fini,
80 }, 77 },
81}; 78 .intr = nv04_mc_intr,
79}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.h b/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.h
new file mode 100644
index 000000000000..b0d5c31606c1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.h
@@ -0,0 +1,21 @@
1#ifndef __NVKM_MC_NV04_H__
2#define __NVKM_MC_NV04_H__
3
4#include <subdev/mc.h>
5
6struct nv04_mc_priv {
7 struct nouveau_mc base;
8};
9
10int nv04_mc_ctor(struct nouveau_object *, struct nouveau_object *,
11 struct nouveau_oclass *, void *, u32,
12 struct nouveau_object **);
13
14extern const struct nouveau_mc_intr nv04_mc_intr[];
15int nv04_mc_init(struct nouveau_object *);
16void nv40_mc_msi_rearm(struct nouveau_mc *);
17int nv50_mc_init(struct nouveau_object *);
18extern const struct nouveau_mc_intr nv50_mc_intr[];
19extern const struct nouveau_mc_intr nvc0_mc_intr[];
20
21#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv40.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv40.c
new file mode 100644
index 000000000000..5b1faecfed2d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv40.c
@@ -0,0 +1,45 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "nv04.h"
26
27void
28nv40_mc_msi_rearm(struct nouveau_mc *pmc)
29{
30 struct nv04_mc_priv *priv = (void *)pmc;
31 nv_wr08(priv, 0x088068, 0xff);
32}
33
34struct nouveau_oclass *
35nv40_mc_oclass = &(struct nouveau_mc_oclass) {
36 .base.handle = NV_SUBDEV(MC, 0x40),
37 .base.ofuncs = &(struct nouveau_ofuncs) {
38 .ctor = nv04_mc_ctor,
39 .dtor = _nouveau_mc_dtor,
40 .init = nv04_mc_init,
41 .fini = _nouveau_mc_fini,
42 },
43 .intr = nv04_mc_intr,
44 .msi_rearm = nv40_mc_msi_rearm,
45}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c
index d9891782bf28..3bfee5c6c4f2 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c
@@ -22,32 +22,12 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include <subdev/mc.h> 25#include "nv04.h"
26
27struct nv44_mc_priv {
28 struct nouveau_mc base;
29};
30
31static int
32nv44_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
33 struct nouveau_oclass *oclass, void *data, u32 size,
34 struct nouveau_object **pobject)
35{
36 struct nv44_mc_priv *priv;
37 int ret;
38
39 ret = nouveau_mc_create(parent, engine, oclass, nv04_mc_intr, &priv);
40 *pobject = nv_object(priv);
41 if (ret)
42 return ret;
43
44 return 0;
45}
46 26
47static int 27static int
48nv44_mc_init(struct nouveau_object *object) 28nv44_mc_init(struct nouveau_object *object)
49{ 29{
50 struct nv44_mc_priv *priv = (void *)object; 30 struct nv04_mc_priv *priv = (void *)object;
51 u32 tmp = nv_rd32(priv, 0x10020c); 31 u32 tmp = nv_rd32(priv, 0x10020c);
52 32
53 nv_wr32(priv, 0x000200, 0xffffffff); /* everything enabled */ 33 nv_wr32(priv, 0x000200, 0xffffffff); /* everything enabled */
@@ -60,13 +40,15 @@ nv44_mc_init(struct nouveau_object *object)
60 return nouveau_mc_init(&priv->base); 40 return nouveau_mc_init(&priv->base);
61} 41}
62 42
63struct nouveau_oclass 43struct nouveau_oclass *
64nv44_mc_oclass = { 44nv44_mc_oclass = &(struct nouveau_mc_oclass) {
65 .handle = NV_SUBDEV(MC, 0x44), 45 .base.handle = NV_SUBDEV(MC, 0x44),
66 .ofuncs = &(struct nouveau_ofuncs) { 46 .base.ofuncs = &(struct nouveau_ofuncs) {
67 .ctor = nv44_mc_ctor, 47 .ctor = nv04_mc_ctor,
68 .dtor = _nouveau_mc_dtor, 48 .dtor = _nouveau_mc_dtor,
69 .init = nv44_mc_init, 49 .init = nv44_mc_init,
70 .fini = _nouveau_mc_fini, 50 .fini = _nouveau_mc_fini,
71 }, 51 },
72}; 52 .intr = nv04_mc_intr,
53 .msi_rearm = nv40_mc_msi_rearm,
54}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c
index 2b1afe225db8..e8822a934c48 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c
@@ -22,13 +22,9 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include <subdev/mc.h> 25#include "nv04.h"
26 26
27struct nv50_mc_priv { 27const struct nouveau_mc_intr
28 struct nouveau_mc base;
29};
30
31static const struct nouveau_mc_intr
32nv50_mc_intr[] = { 28nv50_mc_intr[] = {
33 { 0x00000001, NVDEV_ENGINE_MPEG }, 29 { 0x00000001, NVDEV_ENGINE_MPEG },
34 { 0x00000100, NVDEV_ENGINE_FIFO }, 30 { 0x00000100, NVDEV_ENGINE_FIFO },
@@ -45,37 +41,30 @@ nv50_mc_intr[] = {
45 {}, 41 {},
46}; 42};
47 43
48static int 44static void
49nv50_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine, 45nv50_mc_msi_rearm(struct nouveau_mc *pmc)
50 struct nouveau_oclass *oclass, void *data, u32 size,
51 struct nouveau_object **pobject)
52{ 46{
53 struct nv50_mc_priv *priv; 47 struct nouveau_device *device = nv_device(pmc);
54 int ret; 48 pci_write_config_byte(device->pdev, 0x68, 0xff);
55
56 ret = nouveau_mc_create(parent, engine, oclass, nv50_mc_intr, &priv);
57 *pobject = nv_object(priv);
58 if (ret)
59 return ret;
60
61 return 0;
62} 49}
63 50
64int 51int
65nv50_mc_init(struct nouveau_object *object) 52nv50_mc_init(struct nouveau_object *object)
66{ 53{
67 struct nv50_mc_priv *priv = (void *)object; 54 struct nv04_mc_priv *priv = (void *)object;
68 nv_wr32(priv, 0x000200, 0xffffffff); /* everything on */ 55 nv_wr32(priv, 0x000200, 0xffffffff); /* everything on */
69 return nouveau_mc_init(&priv->base); 56 return nouveau_mc_init(&priv->base);
70} 57}
71 58
72struct nouveau_oclass 59struct nouveau_oclass *
73nv50_mc_oclass = { 60nv50_mc_oclass = &(struct nouveau_mc_oclass) {
74 .handle = NV_SUBDEV(MC, 0x50), 61 .base.handle = NV_SUBDEV(MC, 0x50),
75 .ofuncs = &(struct nouveau_ofuncs) { 62 .base.ofuncs = &(struct nouveau_ofuncs) {
76 .ctor = nv50_mc_ctor, 63 .ctor = nv04_mc_ctor,
77 .dtor = _nouveau_mc_dtor, 64 .dtor = _nouveau_mc_dtor,
78 .init = nv50_mc_init, 65 .init = nv50_mc_init,
79 .fini = _nouveau_mc_fini, 66 .fini = _nouveau_mc_fini,
80 }, 67 },
81}; 68 .intr = nv50_mc_intr,
69 .msi_rearm = nv50_mc_msi_rearm,
70}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv94.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv94.c
new file mode 100644
index 000000000000..5f4541105e73
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv94.c
@@ -0,0 +1,38 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "nv04.h"
26
27struct nouveau_oclass *
28nv94_mc_oclass = &(struct nouveau_mc_oclass) {
29 .base.handle = NV_SUBDEV(MC, 0x94),
30 .base.ofuncs = &(struct nouveau_ofuncs) {
31 .ctor = nv04_mc_ctor,
32 .dtor = _nouveau_mc_dtor,
33 .init = nv50_mc_init,
34 .fini = _nouveau_mc_fini,
35 },
36 .intr = nv50_mc_intr,
37 .msi_rearm = nv40_mc_msi_rearm,
38}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c
index 06710419a59b..f8a6f18e2d34 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c
@@ -22,11 +22,7 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include <subdev/mc.h> 25#include "nv04.h"
26
27struct nv98_mc_priv {
28 struct nouveau_mc base;
29};
30 26
31static const struct nouveau_mc_intr 27static const struct nouveau_mc_intr
32nv98_mc_intr[] = { 28nv98_mc_intr[] = {
@@ -36,6 +32,7 @@ nv98_mc_intr[] = {
36 { 0x00004000, NVDEV_ENGINE_CRYPT }, /* NV84:NVA3 */ 32 { 0x00004000, NVDEV_ENGINE_CRYPT }, /* NV84:NVA3 */
37 { 0x00008000, NVDEV_ENGINE_BSP }, 33 { 0x00008000, NVDEV_ENGINE_BSP },
38 { 0x00020000, NVDEV_ENGINE_VP }, 34 { 0x00020000, NVDEV_ENGINE_VP },
35 { 0x00040000, NVDEV_SUBDEV_PWR }, /* NVA3:NVC0 */
39 { 0x00080000, NVDEV_SUBDEV_THERM }, /* NVA3:NVC0 */ 36 { 0x00080000, NVDEV_SUBDEV_THERM }, /* NVA3:NVC0 */
40 { 0x00100000, NVDEV_SUBDEV_TIMER }, 37 { 0x00100000, NVDEV_SUBDEV_TIMER },
41 { 0x00200000, NVDEV_SUBDEV_GPIO }, 38 { 0x00200000, NVDEV_SUBDEV_GPIO },
@@ -47,29 +44,15 @@ nv98_mc_intr[] = {
47 {}, 44 {},
48}; 45};
49 46
50static int 47struct nouveau_oclass *
51nv98_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine, 48nv98_mc_oclass = &(struct nouveau_mc_oclass) {
52 struct nouveau_oclass *oclass, void *data, u32 size, 49 .base.handle = NV_SUBDEV(MC, 0x98),
53 struct nouveau_object **pobject) 50 .base.ofuncs = &(struct nouveau_ofuncs) {
54{ 51 .ctor = nv04_mc_ctor,
55 struct nv98_mc_priv *priv;
56 int ret;
57
58 ret = nouveau_mc_create(parent, engine, oclass, nv98_mc_intr, &priv);
59 *pobject = nv_object(priv);
60 if (ret)
61 return ret;
62
63 return 0;
64}
65
66struct nouveau_oclass
67nv98_mc_oclass = {
68 .handle = NV_SUBDEV(MC, 0x98),
69 .ofuncs = &(struct nouveau_ofuncs) {
70 .ctor = nv98_mc_ctor,
71 .dtor = _nouveau_mc_dtor, 52 .dtor = _nouveau_mc_dtor,
72 .init = nv50_mc_init, 53 .init = nv50_mc_init,
73 .fini = _nouveau_mc_fini, 54 .fini = _nouveau_mc_fini,
74 }, 55 },
75}; 56 .intr = nv98_mc_intr,
57 .msi_rearm = nv40_mc_msi_rearm,
58}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
index 104175c5a2dd..c02b4763a2d5 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
@@ -22,13 +22,9 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include <subdev/mc.h> 25#include "nv04.h"
26 26
27struct nvc0_mc_priv { 27const struct nouveau_mc_intr
28 struct nouveau_mc base;
29};
30
31static const struct nouveau_mc_intr
32nvc0_mc_intr[] = { 28nvc0_mc_intr[] = {
33 { 0x00000001, NVDEV_ENGINE_PPP }, 29 { 0x00000001, NVDEV_ENGINE_PPP },
34 { 0x00000020, NVDEV_ENGINE_COPY0 }, 30 { 0x00000020, NVDEV_ENGINE_COPY0 },
@@ -41,6 +37,7 @@ nvc0_mc_intr[] = {
41 { 0x00020000, NVDEV_ENGINE_VP }, 37 { 0x00020000, NVDEV_ENGINE_VP },
42 { 0x00100000, NVDEV_SUBDEV_TIMER }, 38 { 0x00100000, NVDEV_SUBDEV_TIMER },
43 { 0x00200000, NVDEV_SUBDEV_GPIO }, 39 { 0x00200000, NVDEV_SUBDEV_GPIO },
40 { 0x01000000, NVDEV_SUBDEV_PWR },
44 { 0x02000000, NVDEV_SUBDEV_LTCG }, 41 { 0x02000000, NVDEV_SUBDEV_LTCG },
45 { 0x04000000, NVDEV_ENGINE_DISP }, 42 { 0x04000000, NVDEV_ENGINE_DISP },
46 { 0x10000000, NVDEV_SUBDEV_BUS }, 43 { 0x10000000, NVDEV_SUBDEV_BUS },
@@ -49,29 +46,22 @@ nvc0_mc_intr[] = {
49 {}, 46 {},
50}; 47};
51 48
52static int 49static void
53nvc0_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine, 50nvc0_mc_msi_rearm(struct nouveau_mc *pmc)
54 struct nouveau_oclass *oclass, void *data, u32 size,
55 struct nouveau_object **pobject)
56{ 51{
57 struct nvc0_mc_priv *priv; 52 struct nv04_mc_priv *priv = (void *)pmc;
58 int ret; 53 nv_wr32(priv, 0x088704, 0x00000000);
59
60 ret = nouveau_mc_create(parent, engine, oclass, nvc0_mc_intr, &priv);
61 *pobject = nv_object(priv);
62 if (ret)
63 return ret;
64
65 return 0;
66} 54}
67 55
68struct nouveau_oclass 56struct nouveau_oclass *
69nvc0_mc_oclass = { 57nvc0_mc_oclass = &(struct nouveau_mc_oclass) {
70 .handle = NV_SUBDEV(MC, 0xc0), 58 .base.handle = NV_SUBDEV(MC, 0xc0),
71 .ofuncs = &(struct nouveau_ofuncs) { 59 .base.ofuncs = &(struct nouveau_ofuncs) {
72 .ctor = nvc0_mc_ctor, 60 .ctor = nv04_mc_ctor,
73 .dtor = _nouveau_mc_dtor, 61 .dtor = _nouveau_mc_dtor,
74 .init = nv50_mc_init, 62 .init = nv50_mc_init,
75 .fini = _nouveau_mc_fini, 63 .fini = _nouveau_mc_fini,
76 }, 64 },
77}; 65 .intr = nvc0_mc_intr,
66 .msi_rearm = nvc0_mc_msi_rearm,
67}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nvc3.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nvc3.c
new file mode 100644
index 000000000000..837e545aeb9f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nvc3.c
@@ -0,0 +1,38 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "nv04.h"
26
27struct nouveau_oclass *
28nvc3_mc_oclass = &(struct nouveau_mc_oclass) {
29 .base.handle = NV_SUBDEV(MC, 0xc3),
30 .base.ofuncs = &(struct nouveau_ofuncs) {
31 .ctor = nv04_mc_ctor,
32 .dtor = _nouveau_mc_dtor,
33 .init = nv50_mc_init,
34 .fini = _nouveau_mc_fini,
35 },
36 .intr = nvc0_mc_intr,
37 .msi_rearm = nv40_mc_msi_rearm,
38}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/base.c b/drivers/gpu/drm/nouveau/core/subdev/pwr/base.c
new file mode 100644
index 000000000000..9908f1f05a00
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/base.c
@@ -0,0 +1,247 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/pwr.h>
26#include <subdev/timer.h>
27
28static int
29nouveau_pwr_send(struct nouveau_pwr *ppwr, u32 reply[2],
30 u32 process, u32 message, u32 data0, u32 data1)
31{
32 struct nouveau_subdev *subdev = nv_subdev(ppwr);
33 u32 addr;
34
35 /* we currently only support a single process at a time waiting
36 * on a synchronous reply, take the PPWR mutex and tell the
37 * receive handler what we're waiting for
38 */
39 if (reply) {
40 mutex_lock(&subdev->mutex);
41 ppwr->recv.message = message;
42 ppwr->recv.process = process;
43 }
44
45 /* wait for a free slot in the fifo */
46 addr = nv_rd32(ppwr, 0x10a4a0);
47 if (!nv_wait_ne(ppwr, 0x10a4b0, 0xffffffff, addr ^ 8))
48 return -EBUSY;
49
50 /* acquire data segment access */
51 do {
52 nv_wr32(ppwr, 0x10a580, 0x00000001);
53 } while (nv_rd32(ppwr, 0x10a580) != 0x00000001);
54
55 /* write the packet */
56 nv_wr32(ppwr, 0x10a1c0, 0x01000000 | (((addr & 0x07) << 4) +
57 ppwr->send.base));
58 nv_wr32(ppwr, 0x10a1c4, process);
59 nv_wr32(ppwr, 0x10a1c4, message);
60 nv_wr32(ppwr, 0x10a1c4, data0);
61 nv_wr32(ppwr, 0x10a1c4, data1);
62 nv_wr32(ppwr, 0x10a4a0, (addr + 1) & 0x0f);
63
64 /* release data segment access */
65 nv_wr32(ppwr, 0x10a580, 0x00000000);
66
67 /* wait for reply, if requested */
68 if (reply) {
69 wait_event(ppwr->recv.wait, (ppwr->recv.process == 0));
70 reply[0] = ppwr->recv.data[0];
71 reply[1] = ppwr->recv.data[1];
72 mutex_unlock(&subdev->mutex);
73 }
74
75 return 0;
76}
77
78static void
79nouveau_pwr_recv(struct work_struct *work)
80{
81 struct nouveau_pwr *ppwr =
82 container_of(work, struct nouveau_pwr, recv.work);
83 u32 process, message, data0, data1;
84
85 /* nothing to do if GET == PUT */
86 u32 addr = nv_rd32(ppwr, 0x10a4cc);
87 if (addr == nv_rd32(ppwr, 0x10a4c8))
88 return;
89
90 /* acquire data segment access */
91 do {
92 nv_wr32(ppwr, 0x10a580, 0x00000002);
93 } while (nv_rd32(ppwr, 0x10a580) != 0x00000002);
94
95 /* read the packet */
96 nv_wr32(ppwr, 0x10a1c0, 0x02000000 | (((addr & 0x07) << 4) +
97 ppwr->recv.base));
98 process = nv_rd32(ppwr, 0x10a1c4);
99 message = nv_rd32(ppwr, 0x10a1c4);
100 data0 = nv_rd32(ppwr, 0x10a1c4);
101 data1 = nv_rd32(ppwr, 0x10a1c4);
102 nv_wr32(ppwr, 0x10a4cc, (addr + 1) & 0x0f);
103
104 /* release data segment access */
105 nv_wr32(ppwr, 0x10a580, 0x00000000);
106
107 /* wake process if it's waiting on a synchronous reply */
108 if (ppwr->recv.process) {
109 if (process == ppwr->recv.process &&
110 message == ppwr->recv.message) {
111 ppwr->recv.data[0] = data0;
112 ppwr->recv.data[1] = data1;
113 ppwr->recv.process = 0;
114 wake_up(&ppwr->recv.wait);
115 return;
116 }
117 }
118
119 /* right now there's no other expected responses from the engine,
120 * so assume that any unexpected message is an error.
121 */
122 nv_warn(ppwr, "%c%c%c%c 0x%08x 0x%08x 0x%08x 0x%08x\n",
123 (char)((process & 0x000000ff) >> 0),
124 (char)((process & 0x0000ff00) >> 8),
125 (char)((process & 0x00ff0000) >> 16),
126 (char)((process & 0xff000000) >> 24),
127 process, message, data0, data1);
128}
129
130static void
131nouveau_pwr_intr(struct nouveau_subdev *subdev)
132{
133 struct nouveau_pwr *ppwr = (void *)subdev;
134 u32 disp = nv_rd32(ppwr, 0x10a01c);
135 u32 intr = nv_rd32(ppwr, 0x10a008) & disp & ~(disp >> 16);
136
137 if (intr & 0x00000020) {
138 u32 stat = nv_rd32(ppwr, 0x10a16c);
139 if (stat & 0x80000000) {
140 nv_error(ppwr, "UAS fault at 0x%06x addr 0x%08x\n",
141 stat & 0x00ffffff, nv_rd32(ppwr, 0x10a168));
142 nv_wr32(ppwr, 0x10a16c, 0x00000000);
143 intr &= ~0x00000020;
144 }
145 }
146
147 if (intr & 0x00000040) {
148 schedule_work(&ppwr->recv.work);
149 nv_wr32(ppwr, 0x10a004, 0x00000040);
150 intr &= ~0x00000040;
151 }
152
153 if (intr & 0x00000080) {
154 nv_info(ppwr, "wr32 0x%06x 0x%08x\n", nv_rd32(ppwr, 0x10a7a0),
155 nv_rd32(ppwr, 0x10a7a4));
156 nv_wr32(ppwr, 0x10a004, 0x00000080);
157 intr &= ~0x00000080;
158 }
159
160 if (intr) {
161 nv_error(ppwr, "intr 0x%08x\n", intr);
162 nv_wr32(ppwr, 0x10a004, intr);
163 }
164}
165
166int
167_nouveau_pwr_fini(struct nouveau_object *object, bool suspend)
168{
169 struct nouveau_pwr *ppwr = (void *)object;
170
171 nv_wr32(ppwr, 0x10a014, 0x00000060);
172 flush_work(&ppwr->recv.work);
173
174 return nouveau_subdev_fini(&ppwr->base, suspend);
175}
176
177int
178_nouveau_pwr_init(struct nouveau_object *object)
179{
180 struct nouveau_pwr *ppwr = (void *)object;
181 int ret, i;
182
183 ret = nouveau_subdev_init(&ppwr->base);
184 if (ret)
185 return ret;
186
187 nv_subdev(ppwr)->intr = nouveau_pwr_intr;
188 ppwr->message = nouveau_pwr_send;
189
190 /* prevent previous ucode from running, wait for idle, reset */
191 nv_wr32(ppwr, 0x10a014, 0x0000ffff); /* INTR_EN_CLR = ALL */
192 nv_wait(ppwr, 0x10a04c, 0xffffffff, 0x00000000);
193 nv_mask(ppwr, 0x000200, 0x00002000, 0x00000000);
194 nv_mask(ppwr, 0x000200, 0x00002000, 0x00002000);
195
196 /* upload data segment */
197 nv_wr32(ppwr, 0x10a1c0, 0x01000000);
198 for (i = 0; i < ppwr->data.size / 4; i++)
199 nv_wr32(ppwr, 0x10a1c4, ppwr->data.data[i]);
200
201 /* upload code segment */
202 nv_wr32(ppwr, 0x10a180, 0x01000000);
203 for (i = 0; i < ppwr->code.size / 4; i++) {
204 if ((i & 0x3f) == 0)
205 nv_wr32(ppwr, 0x10a188, i >> 6);
206 nv_wr32(ppwr, 0x10a184, ppwr->code.data[i]);
207 }
208
209 /* start it running */
210 nv_wr32(ppwr, 0x10a10c, 0x00000000);
211 nv_wr32(ppwr, 0x10a104, 0x00000000);
212 nv_wr32(ppwr, 0x10a100, 0x00000002);
213
214 /* wait for valid host->pwr ring configuration */
215 if (!nv_wait_ne(ppwr, 0x10a4d0, 0xffffffff, 0x00000000))
216 return -EBUSY;
217 ppwr->send.base = nv_rd32(ppwr, 0x10a4d0) & 0x0000ffff;
218 ppwr->send.size = nv_rd32(ppwr, 0x10a4d0) >> 16;
219
220 /* wait for valid pwr->host ring configuration */
221 if (!nv_wait_ne(ppwr, 0x10a4dc, 0xffffffff, 0x00000000))
222 return -EBUSY;
223 ppwr->recv.base = nv_rd32(ppwr, 0x10a4dc) & 0x0000ffff;
224 ppwr->recv.size = nv_rd32(ppwr, 0x10a4dc) >> 16;
225
226 nv_wr32(ppwr, 0x10a010, 0x000000e0);
227 return 0;
228}
229
230int
231nouveau_pwr_create_(struct nouveau_object *parent,
232 struct nouveau_object *engine,
233 struct nouveau_oclass *oclass, int length, void **pobject)
234{
235 struct nouveau_pwr *ppwr;
236 int ret;
237
238 ret = nouveau_subdev_create_(parent, engine, oclass, 0, "PPWR",
239 "pwr", length, pobject);
240 ppwr = *pobject;
241 if (ret)
242 return ret;
243
244 INIT_WORK(&ppwr->recv.work, nouveau_pwr_recv);
245 init_waitqueue_head(&ppwr->recv.wait);
246 return 0;
247}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/host.fuc b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/host.fuc
new file mode 100644
index 000000000000..2284ecb1c9b8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/host.fuc
@@ -0,0 +1,151 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#ifdef INCLUDE_PROC
26process(PROC_HOST, #host_init, #host_recv)
27#endif
28
29/******************************************************************************
30 * HOST data segment
31 *****************************************************************************/
32#ifdef INCLUDE_DATA
33// HOST (R)FIFO packet format
34.equ #fifo_process 0x00
35.equ #fifo_message 0x04
36.equ #fifo_data0 0x08
37.equ #fifo_data1 0x0c
38
39// HOST HOST->PWR queue description
40.equ #fifo_qlen 4 // log2(size of queue entry in bytes)
41.equ #fifo_qnum 3 // log2(max number of entries in queue)
42.equ #fifo_qmaskb (1 << #fifo_qnum) // max number of entries in queue
43.equ #fifo_qmaskp (#fifo_qmaskb - 1)
44.equ #fifo_qmaskf ((#fifo_qmaskb << 1) - 1)
45.equ #fifo_qsize (1 << (#fifo_qlen + #fifo_qnum))
46fifo_queue: .skip 128 // #fifo_qsize
47
48// HOST PWR->HOST queue description
49.equ #rfifo_qlen 4 // log2(size of queue entry in bytes)
50.equ #rfifo_qnum 3 // log2(max number of entries in queue)
51.equ #rfifo_qmaskb (1 << #rfifo_qnum) // max number of entries in queue
52.equ #rfifo_qmaskp (#rfifo_qmaskb - 1)
53.equ #rfifo_qmaskf ((#rfifo_qmaskb << 1) - 1)
54.equ #rfifo_qsize (1 << (#rfifo_qlen + #rfifo_qnum))
55rfifo_queue: .skip 128 // #rfifo_qsize
56#endif
57
58/******************************************************************************
59 * HOST code segment
60 *****************************************************************************/
61#ifdef INCLUDE_CODE
62// HOST->PWR comms - dequeue message(s) for process(es) from FIFO
63//
64// $r15 - current (host)
65// $r0 - zero
66host_send:
67 nv_iord($r1, NV_PPWR_FIFO_GET(0))
68 nv_iord($r2, NV_PPWR_FIFO_PUT(0))
69 cmp b32 $r1 $r2
70 bra e #host_send_done
71 // calculate address of message
72 and $r14 $r1 #fifo_qmaskp
73 shl b32 $r14 $r14 #fifo_qlen
74 add b32 $r14 #fifo_queue
75
76 // read message data, and pass to appropriate process
77 ld b32 $r11 D[$r14 + #fifo_data1]
78 ld b32 $r12 D[$r14 + #fifo_data0]
79 ld b32 $r13 D[$r14 + #fifo_message]
80 ld b32 $r14 D[$r14 + #fifo_process]
81 call(send)
82
83 // increment GET
84 add b32 $r1 0x1
85 and $r14 $r1 #fifo_qmaskf
86 nv_iowr(NV_PPWR_FIFO_GET(0), $r1)
87 bra #host_send
88 host_send_done:
89 ret
90
91// PWR->HOST comms - enqueue message for HOST to RFIFO
92//
93// $r15 - current (host)
94// $r14 - process
95// $r13 - message
96// $r12 - message data 0
97// $r11 - message data 1
98// $r0 - zero
99host_recv:
100 // message from intr handler == HOST->PWR comms pending
101 mov $r1 (PROC_KERN & 0x0000ffff)
102 sethi $r1 (PROC_KERN & 0xffff0000)
103 cmp b32 $r14 $r1
104 bra e #host_send
105
106 // wait for space in RFIFO
107 host_recv_wait:
108 nv_iord($r1, NV_PPWR_RFIFO_GET)
109 nv_iord($r2, NV_PPWR_RFIFO_PUT)
110 xor $r1 #rfifo_qmaskb
111 cmp b32 $r1 $r2
112 bra e #host_recv_wait
113
114 and $r3 $r2 #rfifo_qmaskp
115 shl b32 $r3 #rfifo_qlen
116 add b32 $r3 #rfifo_queue
117
118 // enqueue message
119 st b32 D[$r3 + #fifo_data1] $r11
120 st b32 D[$r3 + #fifo_data0] $r12
121 st b32 D[$r3 + #fifo_message] $r13
122 st b32 D[$r3 + #fifo_process] $r14
123
124 add b32 $r2 0x1
125 and $r2 #rfifo_qmaskf
126 nv_iowr(NV_PPWR_RFIFO_PUT, $r2)
127
128 // notify host of pending message
129 mov $r2 NV_PPWR_INTR_TRIGGER_USER0
130 nv_iowr(NV_PPWR_INTR_TRIGGER, $r2)
131 ret
132
133// $r15 - current (host)
134// $r0 - zero
135host_init:
136 // store each fifo's base/size in H2D/D2H scratch regs
137 mov $r1 #fifo_qsize
138 shl b32 $r1 16
139 or $r1 #fifo_queue
140 nv_iowr(NV_PPWR_H2D, $r1);
141
142 mov $r1 #rfifo_qsize
143 shl b32 $r1 16
144 or $r1 #rfifo_queue
145 nv_iowr(NV_PPWR_D2H, $r1);
146
147 // enable fifo subintr for first fifo
148 mov $r1 1
149 nv_iowr(NV_PPWR_FIFO_INTR_EN, $r1)
150 ret
151#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/idle.fuc b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/idle.fuc
new file mode 100644
index 000000000000..98f1c3738b42
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/idle.fuc
@@ -0,0 +1,84 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#ifdef INCLUDE_PROC
26process(PROC_IDLE, #idle, #idle_recv)
27#endif
28
29/******************************************************************************
30 * IDLE data segment
31 *****************************************************************************/
32#ifdef INCLUDE_DATA
33#endif
34
35/******************************************************************************
36 * IDLE code segment
37 *****************************************************************************/
38#ifdef INCLUDE_CODE
39// description
40//
41// $r15 - current (idle)
42// $r14 - message
43// $r0 - zero
44idle_recv:
45 ret
46
47// description
48//
49// $r15 - current (idle)
50// $r0 - zero
51idle:
52 // set our "no interrupt has occurred during our execution" flag
53 bset $flags $p0
54
55 // count IDLE invocations for debugging purposes
56 nv_iord($r1, NV_PPWR_DSCRATCH(1))
57 add b32 $r1 1
58 nv_iowr(NV_PPWR_DSCRATCH(1), $r1)
59
60 // keep looping while there's pending messages for any process
61 idle_loop:
62 mov $r1 #proc_list_head
63 bclr $flags $p2
64 idle_proc:
65 // process the process' messages until there's none left
66 idle_proc_exec:
67 push $r1
68 mov b32 $r14 $r1
69 call(recv)
70 pop $r1
71 bra not $p1 #idle_proc_next
72 bset $flags $p2
73 bra #idle_proc_exec
74 // next process!
75 idle_proc_next:
76 add b32 $r1 #proc_size
77 cmp b32 $r1 $r15
78 bra ne #idle_proc
79 bra $p2 #idle_loop
80
81 // sleep if no interrupts have occurred
82 sleep $p0
83 bra #idle
84#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/kernel.fuc b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/kernel.fuc
new file mode 100644
index 000000000000..0a7b05fa5c11
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/kernel.fuc
@@ -0,0 +1,452 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25/******************************************************************************
26 * kernel data segment
27 *****************************************************************************/
28#ifdef INCLUDE_PROC
29proc_kern:
30process(PROC_KERN, 0, 0)
31proc_list_head:
32#endif
33
34#ifdef INCLUDE_DATA
35proc_list_tail:
36time_prev: .b32 0
37time_next: .b32 0
38#endif
39
40/******************************************************************************
41 * kernel code segment
42 *****************************************************************************/
43#ifdef INCLUDE_CODE
44 bra #init
45
46// read nv register
47//
48// $r15 - current
49// $r14 - addr
50// $r13 - data (return)
51// $r0 - zero
52rd32:
53 nv_iowr(NV_PPWR_MMIO_ADDR, $r14)
54 mov $r14 NV_PPWR_MMIO_CTRL_OP_RD
55 sethi $r14 NV_PPWR_MMIO_CTRL_TRIGGER
56 nv_iowr(NV_PPWR_MMIO_CTRL, $r14)
57 rd32_wait:
58 nv_iord($r14, NV_PPWR_MMIO_CTRL)
59 and $r14 NV_PPWR_MMIO_CTRL_STATUS
60 bra nz #rd32_wait
61 nv_iord($r13, NV_PPWR_MMIO_DATA)
62 ret
63
64// write nv register
65//
66// $r15 - current
67// $r14 - addr
68// $r13 - data
69// $r0 - zero
70wr32:
71 nv_iowr(NV_PPWR_MMIO_ADDR, $r14)
72 nv_iowr(NV_PPWR_MMIO_DATA, $r13)
73 mov $r14 NV_PPWR_MMIO_CTRL_OP_WR
74 or $r14 NV_PPWR_MMIO_CTRL_MASK_B32_0
75 sethi $r14 NV_PPWR_MMIO_CTRL_TRIGGER
76
77#ifdef NVKM_FALCON_MMIO_TRAP
78 mov $r8 NV_PPWR_INTR_TRIGGER_USER1
79 nv_iowr(NV_PPWR_INTR_TRIGGER, $r8)
80 wr32_host:
81 nv_iord($r8, NV_PPWR_INTR)
82 and $r8 NV_PPWR_INTR_USER1
83 bra nz #wr32_host
84#endif
85
86 nv_iowr(NV_PPWR_MMIO_CTRL, $r14)
87 wr32_wait:
88 nv_iord($r14, NV_PPWR_MMIO_CTRL)
89 and $r14 NV_PPWR_MMIO_CTRL_STATUS
90 bra nz #wr32_wait
91 ret
92
93// busy-wait for a period of time
94//
95// $r15 - current
96// $r14 - ns
97// $r0 - zero
98nsec:
99 nv_iord($r8, NV_PPWR_TIMER_LOW)
100 nsec_loop:
101 nv_iord($r9, NV_PPWR_TIMER_LOW)
102 sub b32 $r9 $r8
103 cmp b32 $r9 $r14
104 bra l #nsec_loop
105 ret
106
107// busy-wait for a period of time
108//
109// $r15 - current
110// $r14 - addr
111// $r13 - mask
112// $r12 - data
113// $r11 - timeout (ns)
114// $r0 - zero
115wait:
116 nv_iord($r8, NV_PPWR_TIMER_LOW)
117 wait_loop:
118 nv_rd32($r10, $r14)
119 and $r10 $r13
120 cmp b32 $r10 $r12
121 bra e #wait_done
122 nv_iord($r9, NV_PPWR_TIMER_LOW)
123 sub b32 $r9 $r8
124 cmp b32 $r9 $r11
125 bra l #wait_loop
126 wait_done:
127 ret
128
129// $r15 - current (kern)
130// $r14 - process
131// $r8 - NV_PPWR_INTR
132intr_watchdog:
133 // read process' timer status, skip if not enabled
134 ld b32 $r9 D[$r14 + #proc_time]
135 cmp b32 $r9 0
136 bra z #intr_watchdog_next_proc
137
138 // subtract last timer's value from process' timer,
139 // if it's <= 0 then the timer has expired
140 ld b32 $r10 D[$r0 + #time_prev]
141 sub b32 $r9 $r10
142 bra g #intr_watchdog_next_time
143 mov $r13 KMSG_ALARM
144 call(send_proc)
145 clear b32 $r9
146 bra #intr_watchdog_next_proc
147
148 // otherwise, update the next timer's value if this
149 // process' timer is the soonest
150 intr_watchdog_next_time:
151 // ... or if there's no next timer yet
152 ld b32 $r10 D[$r0 + #time_next]
153 cmp b32 $r10 0
154 bra z #intr_watchdog_next_time_set
155
156 cmp b32 $r9 $r10
157 bra g #intr_watchdog_next_proc
158 intr_watchdog_next_time_set:
159 st b32 D[$r0 + #time_next] $r9
160
161 // update process' timer status, and advance
162 intr_watchdog_next_proc:
163 st b32 D[$r14 + #proc_time] $r9
164 add b32 $r14 #proc_size
165 cmp b32 $r14 #proc_list_tail
166 bra ne #intr_watchdog
167 ret
168
169intr:
170 push $r0
171 clear b32 $r0
172 push $r8
173 push $r9
174 push $r10
175 push $r11
176 push $r12
177 push $r13
178 push $r14
179 push $r15
180 mov $r15 #proc_kern
181 mov $r8 $flags
182 push $r8
183
184 nv_iord($r8, NV_PPWR_DSCRATCH(0))
185 add b32 $r8 1
186 nv_iowr(NV_PPWR_DSCRATCH(0), $r8)
187
188 nv_iord($r8, NV_PPWR_INTR)
189 and $r9 $r8 NV_PPWR_INTR_WATCHDOG
190 bra z #intr_skip_watchdog
191 st b32 D[$r0 + #time_next] $r0
192 mov $r14 #proc_list_head
193 call(intr_watchdog)
194 ld b32 $r9 D[$r0 + #time_next]
195 cmp b32 $r9 0
196 bra z #intr_skip_watchdog
197 nv_iowr(NV_PPWR_WATCHDOG_TIME, $r9)
198 st b32 D[$r0 + #time_prev] $r9
199
200 intr_skip_watchdog:
201 and $r9 $r8 NV_PPWR_INTR_SUBINTR
202 bra z #intr_skip_subintr
203 nv_iord($r9, NV_PPWR_SUBINTR)
204 and $r10 $r9 NV_PPWR_SUBINTR_FIFO
205 bra z #intr_subintr_skip_fifo
206 nv_iord($r12, NV_PPWR_FIFO_INTR)
207 push $r12
208 mov $r14 (PROC_HOST & 0x0000ffff)
209 sethi $r14 (PROC_HOST & 0xffff0000)
210 mov $r13 KMSG_FIFO
211 call(send)
212 pop $r12
213 nv_iowr(NV_PPWR_FIFO_INTR, $r12)
214 intr_subintr_skip_fifo:
215 nv_iowr(NV_PPWR_SUBINTR, $r9)
216
217 intr_skip_subintr:
218 and $r9 $r8 NV_PPWR_INTR_PAUSE
219 bra z #intr_skip_pause
220 and $r10 0xffbf
221
222 intr_skip_pause:
223 and $r9 $r8 NV_PPWR_INTR_USER0
224 bra z #intr_skip_user0
225 and $r10 0xffbf
226
227 intr_skip_user0:
228 nv_iowr(NV_PPWR_INTR_ACK, $r8)
229 pop $r8
230 mov $flags $r8
231 pop $r15
232 pop $r14
233 pop $r13
234 pop $r12
235 pop $r11
236 pop $r10
237 pop $r9
238 pop $r8
239 pop $r0
240 bclr $flags $p0
241 iret
242
243// request the current process be sent a message after a timeout expires
244//
245// $r15 - current
246// $r14 - ticks
247// $r0 - zero
248timer:
249 // interrupts off to prevent racing with timer isr
250 bclr $flags ie0
251
252 // if current process already has a timer set, bail
253 ld b32 $r8 D[$r15 + #proc_time]
254 cmp b32 $r8 0
255 bra g #timer_done
256 st b32 D[$r15 + #proc_time] $r14
257
258 // halt watchdog timer temporarily and check for a pending
259 // interrupt. if there's one already pending, we can just
260 // bail since the timer isr will queue the next soonest
261 // right after it's done
262 nv_iowr(NV_PPWR_WATCHDOG_ENABLE, $r8)
263 nv_iord($r8, NV_PPWR_INTR)
264 and $r8 NV_PPWR_INTR_WATCHDOG
265 bra nz #timer_enable
266
267 // update the watchdog if this timer should expire first,
268 // or if there's no timeout already set
269 nv_iord($r8, NV_PPWR_WATCHDOG_TIME)
270 cmp b32 $r14 $r0
271 bra e #timer_reset
272 cmp b32 $r14 $r8
273 bra l #timer_done
274 timer_reset:
275 nv_iowr(NV_PPWR_WATCHDOG_TIME, $r14)
276 st b32 D[$r0 + #time_prev] $r14
277
278 // re-enable the watchdog timer
279 timer_enable:
280 mov $r8 1
281 nv_iowr(NV_PPWR_WATCHDOG_ENABLE, $r8)
282
283 // interrupts back on
284 timer_done:
285 bset $flags ie0
286 ret
287
288// send message to another process
289//
290// $r15 - current
291// $r14 - process
292// $r13 - message
293// $r12 - message data 0
294// $r11 - message data 1
295// $r0 - zero
296send_proc:
297 push $r8
298 push $r9
299 // check for space in queue
300 ld b32 $r8 D[$r14 + #proc_qget]
301 ld b32 $r9 D[$r14 + #proc_qput]
302 xor $r8 #proc_qmaskb
303 cmp b32 $r8 $r9
304 bra e #send_done
305
306 // enqueue message
307 and $r8 $r9 #proc_qmaskp
308 shl b32 $r8 $r8 #proc_qlen
309 add b32 $r8 #proc_queue
310 add b32 $r8 $r14
311
312 ld b32 $r10 D[$r15 + #proc_id]
313 st b32 D[$r8 + #msg_process] $r10
314 st b32 D[$r8 + #msg_message] $r13
315 st b32 D[$r8 + #msg_data0] $r12
316 st b32 D[$r8 + #msg_data1] $r11
317
318 // increment PUT
319 add b32 $r9 1
320 and $r9 #proc_qmaskf
321 st b32 D[$r14 + #proc_qput] $r9
322 bset $flags $p2
323 send_done:
324 pop $r9
325 pop $r8
326 ret
327
328// lookup process structure by its name
329//
330// $r15 - current
331// $r14 - process name
332// $r0 - zero
333//
334// $r14 - process
335// $p1 - success
336find:
337 push $r8
338 mov $r8 #proc_list_head
339 bset $flags $p1
340 find_loop:
341 ld b32 $r10 D[$r8 + #proc_id]
342 cmp b32 $r10 $r14
343 bra e #find_done
344 add b32 $r8 #proc_size
345 cmp b32 $r8 #proc_list_tail
346 bra ne #find_loop
347 bclr $flags $p1
348 find_done:
349 mov b32 $r14 $r8
350 pop $r8
351 ret
352
353// send message to another process
354//
355// $r15 - current
356// $r14 - process id
357// $r13 - message
358// $r12 - message data 0
359// $r11 - message data 1
360// $r0 - zero
361send:
362 call(find)
363 bra $p1 #send_proc
364 ret
365
366// process single message for a given process
367//
368// $r15 - current
369// $r14 - process
370// $r0 - zero
371recv:
372 ld b32 $r8 D[$r14 + #proc_qget]
373 ld b32 $r9 D[$r14 + #proc_qput]
374 bclr $flags $p1
375 cmp b32 $r8 $r9
376 bra e #recv_done
377 // dequeue message
378 and $r9 $r8 #proc_qmaskp
379 add b32 $r8 1
380 and $r8 #proc_qmaskf
381 st b32 D[$r14 + #proc_qget] $r8
382 ld b32 $r10 D[$r14 + #proc_recv]
383
384 push $r15
385 mov $r15 $flags
386 push $r15
387 mov b32 $r15 $r14
388
389 shl b32 $r9 $r9 #proc_qlen
390 add b32 $r14 $r9
391 add b32 $r14 #proc_queue
392 ld b32 $r11 D[$r14 + #msg_data1]
393 ld b32 $r12 D[$r14 + #msg_data0]
394 ld b32 $r13 D[$r14 + #msg_message]
395 ld b32 $r14 D[$r14 + #msg_process]
396
397 // process it
398 call $r10
399 pop $r15
400 mov $flags $r15
401 bset $flags $p1
402 pop $r15
403 recv_done:
404 ret
405
406init:
407 // setup stack
408 nv_iord($r1, NV_PPWR_CAPS)
409 extr $r1 $r1 9:17
410 shl b32 $r1 8
411 mov $sp $r1
412
413#ifdef NVKM_FALCON_MMIO_UAS
414 // somehow allows the magic "access mmio via D[]" stuff that's
415 // used by the nv_rd32/nv_wr32 macros to work
416 mov $r1 0x0010
417 sethi $r1 NV_PPWR_UAS_CONFIG_ENABLE
418 nv_iowrs(NV_PPWR_UAS_CONFIG, $r1)
419#endif
420
421 // route all interrupts except user0/1 and pause to fuc
422 mov $r1 0x00e0
423 sethi $r1 0x00000000
424 nv_iowr(NV_PPWR_INTR_ROUTE, $r1)
425
426 // enable watchdog and subintr intrs
427 mov $r1 NV_PPWR_INTR_EN_CLR_MASK
428 nv_iowr(NV_PPWR_INTR_EN_CLR, $r1)
429 mov $r1 NV_PPWR_INTR_EN_SET_WATCHDOG
430 or $r1 NV_PPWR_INTR_EN_SET_SUBINTR
431 nv_iowr(NV_PPWR_INTR_EN_SET, $r1)
432
433 // enable interrupts globally
434 mov $r1 #intr
435 sethi $r1 0x00000000
436 mov $iv0 $r1
437 bset $flags ie0
438
439 // enable watchdog timer
440 mov $r1 1
441 nv_iowr(NV_PPWR_WATCHDOG_ENABLE, $r1)
442
443 // bootstrap processes, idle process will be last, and not return
444 mov $r15 #proc_list_head
445 init_proc:
446 ld b32 $r1 D[$r15 + #proc_init]
447 cmp b32 $r1 0
448 bra z #init_proc
449 call $r1
450 add b32 $r15 #proc_size
451 bra #init_proc
452#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/macros.fuc b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/macros.fuc
new file mode 100644
index 000000000000..2a74ea907604
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/macros.fuc
@@ -0,0 +1,199 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#define GT215 0xa3
26#define GF100 0xc0
27#define GF119 0xd9
28#define GK208 0x108
29
30#include "os.h"
31
32// IO addresses
33#define NV_PPWR_INTR_TRIGGER 0x0000
34#define NV_PPWR_INTR_TRIGGER_USER1 0x00000080
35#define NV_PPWR_INTR_TRIGGER_USER0 0x00000040
36#define NV_PPWR_INTR_ACK 0x0004
37#define NV_PPWR_INTR_ACK_SUBINTR 0x00000800
38#define NV_PPWR_INTR_ACK_WATCHDOG 0x00000002
39#define NV_PPWR_INTR 0x0008
40#define NV_PPWR_INTR_SUBINTR 0x00000800
41#define NV_PPWR_INTR_USER1 0x00000080
42#define NV_PPWR_INTR_USER0 0x00000040
43#define NV_PPWR_INTR_PAUSE 0x00000020
44#define NV_PPWR_INTR_WATCHDOG 0x00000002
45#define NV_PPWR_INTR_EN_SET 0x0010
46#define NV_PPWR_INTR_EN_SET_SUBINTR 0x00000800
47#define NV_PPWR_INTR_EN_SET_WATCHDOG 0x00000002
48#define NV_PPWR_INTR_EN_CLR 0x0014
49#define NV_PPWR_INTR_EN_CLR_MASK /* fuck i hate envyas */ -1
50#define NV_PPWR_INTR_ROUTE 0x001c
51#define NV_PPWR_TIMER_LOW 0x002c
52#define NV_PPWR_WATCHDOG_TIME 0x0034
53#define NV_PPWR_WATCHDOG_ENABLE 0x0038
54#define NV_PPWR_CAPS 0x0108
55#define NV_PPWR_UAS_CONFIG 0x0164
56#define NV_PPWR_UAS_CONFIG_ENABLE 0x00010000
57#if NVKM_PPWR_CHIPSET >= GK208
58#define NV_PPWR_DSCRATCH(i) (4 * (i) + 0x0450)
59#endif
60#define NV_PPWR_FIFO_PUT(i) (4 * (i) + 0x04a0)
61#define NV_PPWR_FIFO_GET(i) (4 * (i) + 0x04b0)
62#define NV_PPWR_FIFO_INTR 0x04c0
63#define NV_PPWR_FIFO_INTR_EN 0x04c4
64#define NV_PPWR_RFIFO_PUT 0x04c8
65#define NV_PPWR_RFIFO_GET 0x04cc
66#define NV_PPWR_H2D 0x04d0
67#define NV_PPWR_D2H 0x04dc
68#if NVKM_PPWR_CHIPSET < GK208
69#define NV_PPWR_DSCRATCH(i) (4 * (i) + 0x05d0)
70#endif
71#define NV_PPWR_SUBINTR 0x0688
72#define NV_PPWR_SUBINTR_FIFO 0x00000002
73#define NV_PPWR_MMIO_ADDR 0x07a0
74#define NV_PPWR_MMIO_DATA 0x07a4
75#define NV_PPWR_MMIO_CTRL 0x07ac
76#define NV_PPWR_MMIO_CTRL_TRIGGER 0x00010000
77#define NV_PPWR_MMIO_CTRL_STATUS 0x00007000
78#define NV_PPWR_MMIO_CTRL_STATUS_IDLE 0x00000000
79#define NV_PPWR_MMIO_CTRL_MASK 0x000000f0
80#define NV_PPWR_MMIO_CTRL_MASK_B32_0 0x000000f0
81#define NV_PPWR_MMIO_CTRL_OP 0x00000003
82#define NV_PPWR_MMIO_CTRL_OP_RD 0x00000001
83#define NV_PPWR_MMIO_CTRL_OP_WR 0x00000002
84#define NV_PPWR_OUTPUT 0x07c0
85#define NV_PPWR_OUTPUT_FB_PAUSE 0x00000004
86#define NV_PPWR_OUTPUT_SET 0x07e0
87#define NV_PPWR_OUTPUT_SET_FB_PAUSE 0x00000004
88#define NV_PPWR_OUTPUT_CLR 0x07e4
89#define NV_PPWR_OUTPUT_CLR_FB_PAUSE 0x00000004
90
91// Inter-process message format
92.equ #msg_process 0x00 /* send() target, recv() sender */
93.equ #msg_message 0x04
94.equ #msg_data0 0x08
95.equ #msg_data1 0x0c
96
97// Kernel message IDs
98#define KMSG_FIFO 0x00000000
99#define KMSG_ALARM 0x00000001
100
101// Process message queue description
102.equ #proc_qlen 4 // log2(size of queue entry in bytes)
103.equ #proc_qnum 2 // log2(max number of entries in queue)
104.equ #proc_qmaskb (1 << #proc_qnum) // max number of entries in queue
105.equ #proc_qmaskp (#proc_qmaskb - 1)
106.equ #proc_qmaskf ((#proc_qmaskb << 1) - 1)
107.equ #proc_qsize (1 << (#proc_qlen + #proc_qnum))
108
109// Process table entry
110.equ #proc_id 0x00
111.equ #proc_init 0x04
112.equ #proc_recv 0x08
113.equ #proc_time 0x0c
114.equ #proc_qput 0x10
115.equ #proc_qget 0x14
116.equ #proc_queue 0x18
117.equ #proc_size (0x18 + #proc_qsize)
118
119#define process(id,init,recv) /*
120*/ .b32 id /*
121*/ .b32 init /*
122*/ .b32 recv /*
123*/ .b32 0 /*
124*/ .b32 0 /*
125*/ .b32 0 /*
126*/ .skip 64
127
128#ifndef NVKM_FALCON_UNSHIFTED_IO
129#define nv_iord(reg,ior) /*
130*/ mov reg ior /*
131*/ shl b32 reg 6 /*
132*/ iord reg I[reg + 0x000]
133#else
134#define nv_iord(reg,ior) /*
135*/ mov reg ior /*
136*/ iord reg I[reg + 0x000]
137#endif
138
139#ifndef NVKM_FALCON_UNSHIFTED_IO
140#define nv_iowr(ior,reg) /*
141*/ mov $r0 ior /*
142*/ shl b32 $r0 6 /*
143*/ iowr I[$r0 + 0x000] reg /*
144*/ clear b32 $r0
145#else
146#define nv_iowr(ior,reg) /*
147*/ mov $r0 ior /*
148*/ iowr I[$r0 + 0x000] reg /*
149*/ clear b32 $r0
150#endif
151
152#ifndef NVKM_FALCON_UNSHIFTED_IO
153#define nv_iowrs(ior,reg) /*
154*/ mov $r0 ior /*
155*/ shl b32 $r0 6 /*
156*/ iowrs I[$r0 + 0x000] reg /*
157*/ clear b32 $r0
158#else
159#define nv_iowrs(ior,reg) /*
160*/ mov $r0 ior /*
161*/ iowrs I[$r0 + 0x000] reg /*
162*/ clear b32 $r0
163#endif
164
165#define hash #
166#define fn(a) a
167#ifndef NVKM_FALCON_PC24
168#define call(a) call fn(hash)a
169#else
170#define call(a) lcall fn(hash)a
171#endif
172
173#ifndef NVKM_FALCON_MMIO_UAS
174#define nv_rd32(reg,addr) /*
175*/ mov b32 $r14 addr /*
176*/ call(rd32) /*
177*/ mov b32 reg $r13
178#else
179#define nv_rd32(reg,addr) /*
180*/ sethi $r0 0x14000000 /*
181*/ or $r0 addr /*
182*/ ld b32 reg D[$r0] /*
183*/ clear b32 $r0
184#endif
185
186#if !defined(NVKM_FALCON_MMIO_UAS) || defined(NVKM_FALCON_MMIO_TRAP)
187#define nv_wr32(addr,reg) /*
188*/ push addr /*
189*/ push reg /*
190*/ pop $r13 /*
191*/ pop $r14 /*
192*/ call(wr32) /*
193#else
194#define nv_wr32(addr,reg) /*
195*/ sethi $r0 0x14000000 /*
196*/ or $r0 addr /*
197*/ st b32 D[$r0] reg /*
198*/ clear b32 $r0
199#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/memx.fuc b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/memx.fuc
new file mode 100644
index 000000000000..d43741eccb11
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/memx.fuc
@@ -0,0 +1,219 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#ifdef INCLUDE_PROC
26process(PROC_MEMX, #memx_init, #memx_recv)
27#endif
28
29/******************************************************************************
30 * MEMX data segment
31 *****************************************************************************/
32#ifdef INCLUDE_DATA
33.equ #memx_opcode 0
34.equ #memx_header 2
35.equ #memx_length 4
36.equ #memx_func 8
37
38#define handler(cmd,hdr,len,func) /*
39*/ .b16 MEMX_##cmd /*
40*/ .b16 hdr /*
41*/ .b16 len /*
42*/ .b16 0 /*
43*/ .b32 func
44
45memx_func_head:
46handler(ENTER , 0x0001, 0x0000, #memx_func_enter)
47memx_func_next:
48handler(LEAVE , 0x0000, 0x0000, #memx_func_leave)
49handler(WR32 , 0x0000, 0x0002, #memx_func_wr32)
50handler(WAIT , 0x0004, 0x0000, #memx_func_wait)
51handler(DELAY , 0x0001, 0x0000, #memx_func_delay)
52memx_func_tail:
53
54.equ #memx_func_size #memx_func_next - #memx_func_head
55.equ #memx_func_num (#memx_func_tail - #memx_func_head) / #memx_func_size
56
57memx_data_head:
58.skip 0x0800
59memx_data_tail:
60#endif
61
62/******************************************************************************
63 * MEMX code segment
64 *****************************************************************************/
65#ifdef INCLUDE_CODE
66// description
67//
68// $r15 - current (memx)
69// $r4 - packet length
70// +00: bitmask of heads to wait for vblank on
71// $r3 - opcode desciption
72// $r0 - zero
73memx_func_enter:
74 mov $r6 NV_PPWR_OUTPUT_SET_FB_PAUSE
75 nv_iowr(NV_PPWR_OUTPUT_SET, $r6)
76 memx_func_enter_wait:
77 nv_iord($r6, NV_PPWR_OUTPUT)
78 and $r6 NV_PPWR_OUTPUT_FB_PAUSE
79 bra z #memx_func_enter_wait
80 //XXX: TODO
81 ld b32 $r6 D[$r1 + 0x00]
82 add b32 $r1 0x04
83 ret
84
85// description
86//
87// $r15 - current (memx)
88// $r4 - packet length
89// $r3 - opcode desciption
90// $r0 - zero
91memx_func_leave:
92 mov $r6 NV_PPWR_OUTPUT_CLR_FB_PAUSE
93 nv_iowr(NV_PPWR_OUTPUT_CLR, $r6)
94 memx_func_leave_wait:
95 nv_iord($r6, NV_PPWR_OUTPUT)
96 and $r6 NV_PPWR_OUTPUT_FB_PAUSE
97 bra nz #memx_func_leave_wait
98 ret
99
100// description
101//
102// $r15 - current (memx)
103// $r4 - packet length
104// +00*n: addr
105// +04*n: data
106// $r3 - opcode desciption
107// $r0 - zero
108memx_func_wr32:
109 ld b32 $r6 D[$r1 + 0x00]
110 ld b32 $r5 D[$r1 + 0x04]
111 add b32 $r1 0x08
112 nv_wr32($r6, $r5)
113 sub b32 $r4 0x02
114 bra nz #memx_func_wr32
115 ret
116
117// description
118//
119// $r15 - current (memx)
120// $r4 - packet length
121// +00: addr
122// +04: mask
123// +08: data
124// +0c: timeout (ns)
125// $r3 - opcode desciption
126// $r0 - zero
127memx_func_wait:
128 nv_iord($r8, NV_PPWR_TIMER_LOW)
129 ld b32 $r14 D[$r1 + 0x00]
130 ld b32 $r13 D[$r1 + 0x04]
131 ld b32 $r12 D[$r1 + 0x08]
132 ld b32 $r11 D[$r1 + 0x0c]
133 add b32 $r1 0x10
134 call(wait)
135 ret
136
137// description
138//
139// $r15 - current (memx)
140// $r4 - packet length
141// +00: time (ns)
142// $r3 - opcode desciption
143// $r0 - zero
144memx_func_delay:
145 ld b32 $r14 D[$r1 + 0x00]
146 add b32 $r1 0x04
147 call(nsec)
148 ret
149
150// description
151//
152// $r15 - current (memx)
153// $r14 - sender process name
154// $r13 - message (exec)
155// $r12 - head of script
156// $r11 - tail of script
157// $r0 - zero
158memx_exec:
159 push $r14
160 push $r13
161 mov b32 $r1 $r12
162 mov b32 $r2 $r11
163 memx_exec_next:
164 // fetch the packet header, and locate opcode info
165 ld b32 $r3 D[$r1]
166 add b32 $r1 4
167 shr b32 $r4 $r3 16
168 mulu $r3 #memx_func_size
169
170 // execute the opcode handler
171 ld b32 $r5 D[$r3 + #memx_func_head + #memx_func]
172 call $r5
173
174 // keep going, if we haven't reached the end
175 cmp b32 $r1 $r2
176 bra l #memx_exec_next
177
178 // send completion reply
179 pop $r13
180 pop $r14
181 call(send)
182 ret
183
184// description
185//
186// $r15 - current (memx)
187// $r14 - sender process name
188// $r13 - message
189// $r12 - data0
190// $r11 - data1
191// $r0 - zero
192memx_info:
193 mov $r12 #memx_data_head
194 mov $r11 #memx_data_tail - #memx_data_head
195 call(send)
196 ret
197
198// description
199//
200// $r15 - current (memx)
201// $r14 - sender process name
202// $r13 - message
203// $r12 - data0
204// $r11 - data1
205// $r0 - zero
206memx_recv:
207 cmp b32 $r13 MEMX_MSG_EXEC
208 bra e #memx_exec
209 cmp b32 $r13 MEMX_MSG_INFO
210 bra e #memx_info
211 ret
212
213// description
214//
215// $r15 - current (memx)
216// $r0 - zero
217memx_init:
218 ret
219#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc
new file mode 100644
index 000000000000..947be536daef
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc
@@ -0,0 +1,63 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#define NVKM_PPWR_CHIPSET GK208
26
27#define NVKM_FALCON_PC24
28#define NVKM_FALCON_UNSHIFTED_IO
29//#define NVKM_FALCON_MMIO_UAS
30//#define NVKM_FALCON_MMIO_TRAP
31
32#include "macros.fuc"
33
34.section #nv108_pwr_data
35#define INCLUDE_PROC
36#include "kernel.fuc"
37#include "host.fuc"
38#include "memx.fuc"
39#include "perf.fuc"
40#include "test.fuc"
41#include "idle.fuc"
42#undef INCLUDE_PROC
43
44#define INCLUDE_DATA
45#include "kernel.fuc"
46#include "host.fuc"
47#include "memx.fuc"
48#include "perf.fuc"
49#include "test.fuc"
50#include "idle.fuc"
51#undef INCLUDE_DATA
52.align 256
53
54.section #nv108_pwr_code
55#define INCLUDE_CODE
56#include "kernel.fuc"
57#include "host.fuc"
58#include "memx.fuc"
59#include "perf.fuc"
60#include "test.fuc"
61#include "idle.fuc"
62#undef INCLUDE_CODE
63.align 256
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc.h b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc.h
new file mode 100644
index 000000000000..9342e2d7d3b7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc.h
@@ -0,0 +1,1165 @@
1uint32_t nv108_pwr_data[] = {
2/* 0x0000: proc_kern */
3 0x52544e49,
4 0x00000000,
5 0x00000000,
6 0x00000000,
7 0x00000000,
8 0x00000000,
9 0x00000000,
10 0x00000000,
11 0x00000000,
12 0x00000000,
13 0x00000000,
14 0x00000000,
15 0x00000000,
16 0x00000000,
17 0x00000000,
18 0x00000000,
19 0x00000000,
20 0x00000000,
21 0x00000000,
22 0x00000000,
23 0x00000000,
24 0x00000000,
25/* 0x0058: proc_list_head */
26 0x54534f48,
27 0x00000379,
28 0x0000032a,
29 0x00000000,
30 0x00000000,
31 0x00000000,
32 0x00000000,
33 0x00000000,
34 0x00000000,
35 0x00000000,
36 0x00000000,
37 0x00000000,
38 0x00000000,
39 0x00000000,
40 0x00000000,
41 0x00000000,
42 0x00000000,
43 0x00000000,
44 0x00000000,
45 0x00000000,
46 0x00000000,
47 0x00000000,
48 0x584d454d,
49 0x0000046f,
50 0x00000461,
51 0x00000000,
52 0x00000000,
53 0x00000000,
54 0x00000000,
55 0x00000000,
56 0x00000000,
57 0x00000000,
58 0x00000000,
59 0x00000000,
60 0x00000000,
61 0x00000000,
62 0x00000000,
63 0x00000000,
64 0x00000000,
65 0x00000000,
66 0x00000000,
67 0x00000000,
68 0x00000000,
69 0x00000000,
70 0x46524550,
71 0x00000473,
72 0x00000471,
73 0x00000000,
74 0x00000000,
75 0x00000000,
76 0x00000000,
77 0x00000000,
78 0x00000000,
79 0x00000000,
80 0x00000000,
81 0x00000000,
82 0x00000000,
83 0x00000000,
84 0x00000000,
85 0x00000000,
86 0x00000000,
87 0x00000000,
88 0x00000000,
89 0x00000000,
90 0x00000000,
91 0x00000000,
92 0x54534554,
93 0x00000494,
94 0x00000475,
95 0x00000000,
96 0x00000000,
97 0x00000000,
98 0x00000000,
99 0x00000000,
100 0x00000000,
101 0x00000000,
102 0x00000000,
103 0x00000000,
104 0x00000000,
105 0x00000000,
106 0x00000000,
107 0x00000000,
108 0x00000000,
109 0x00000000,
110 0x00000000,
111 0x00000000,
112 0x00000000,
113 0x00000000,
114 0x454c4449,
115 0x0000049f,
116 0x0000049d,
117 0x00000000,
118 0x00000000,
119 0x00000000,
120 0x00000000,
121 0x00000000,
122 0x00000000,
123 0x00000000,
124 0x00000000,
125 0x00000000,
126 0x00000000,
127 0x00000000,
128 0x00000000,
129 0x00000000,
130 0x00000000,
131 0x00000000,
132 0x00000000,
133 0x00000000,
134 0x00000000,
135 0x00000000,
136/* 0x0210: proc_list_tail */
137/* 0x0210: time_prev */
138 0x00000000,
139/* 0x0214: time_next */
140 0x00000000,
141/* 0x0218: fifo_queue */
142 0x00000000,
143 0x00000000,
144 0x00000000,
145 0x00000000,
146 0x00000000,
147 0x00000000,
148 0x00000000,
149 0x00000000,
150 0x00000000,
151 0x00000000,
152 0x00000000,
153 0x00000000,
154 0x00000000,
155 0x00000000,
156 0x00000000,
157 0x00000000,
158 0x00000000,
159 0x00000000,
160 0x00000000,
161 0x00000000,
162 0x00000000,
163 0x00000000,
164 0x00000000,
165 0x00000000,
166 0x00000000,
167 0x00000000,
168 0x00000000,
169 0x00000000,
170 0x00000000,
171 0x00000000,
172 0x00000000,
173 0x00000000,
174/* 0x0298: rfifo_queue */
175 0x00000000,
176 0x00000000,
177 0x00000000,
178 0x00000000,
179 0x00000000,
180 0x00000000,
181 0x00000000,
182 0x00000000,
183 0x00000000,
184 0x00000000,
185 0x00000000,
186 0x00000000,
187 0x00000000,
188 0x00000000,
189 0x00000000,
190 0x00000000,
191 0x00000000,
192 0x00000000,
193 0x00000000,
194 0x00000000,
195 0x00000000,
196 0x00000000,
197 0x00000000,
198 0x00000000,
199 0x00000000,
200 0x00000000,
201 0x00000000,
202 0x00000000,
203 0x00000000,
204 0x00000000,
205 0x00000000,
206 0x00000000,
207/* 0x0318: memx_func_head */
208 0x00010000,
209 0x00000000,
210 0x000003a9,
211/* 0x0324: memx_func_next */
212 0x00000001,
213 0x00000000,
214 0x000003c7,
215 0x00000002,
216 0x00000002,
217 0x000003df,
218 0x00040003,
219 0x00000000,
220 0x00000407,
221 0x00010004,
222 0x00000000,
223 0x00000421,
224/* 0x0354: memx_func_tail */
225/* 0x0354: memx_data_head */
226 0x00000000,
227 0x00000000,
228 0x00000000,
229 0x00000000,
230 0x00000000,
231 0x00000000,
232 0x00000000,
233 0x00000000,
234 0x00000000,
235 0x00000000,
236 0x00000000,
237 0x00000000,
238 0x00000000,
239 0x00000000,
240 0x00000000,
241 0x00000000,
242 0x00000000,
243 0x00000000,
244 0x00000000,
245 0x00000000,
246 0x00000000,
247 0x00000000,
248 0x00000000,
249 0x00000000,
250 0x00000000,
251 0x00000000,
252 0x00000000,
253 0x00000000,
254 0x00000000,
255 0x00000000,
256 0x00000000,
257 0x00000000,
258 0x00000000,
259 0x00000000,
260 0x00000000,
261 0x00000000,
262 0x00000000,
263 0x00000000,
264 0x00000000,
265 0x00000000,
266 0x00000000,
267 0x00000000,
268 0x00000000,
269 0x00000000,
270 0x00000000,
271 0x00000000,
272 0x00000000,
273 0x00000000,
274 0x00000000,
275 0x00000000,
276 0x00000000,
277 0x00000000,
278 0x00000000,
279 0x00000000,
280 0x00000000,
281 0x00000000,
282 0x00000000,
283 0x00000000,
284 0x00000000,
285 0x00000000,
286 0x00000000,
287 0x00000000,
288 0x00000000,
289 0x00000000,
290 0x00000000,
291 0x00000000,
292 0x00000000,
293 0x00000000,
294 0x00000000,
295 0x00000000,
296 0x00000000,
297 0x00000000,
298 0x00000000,
299 0x00000000,
300 0x00000000,
301 0x00000000,
302 0x00000000,
303 0x00000000,
304 0x00000000,
305 0x00000000,
306 0x00000000,
307 0x00000000,
308 0x00000000,
309 0x00000000,
310 0x00000000,
311 0x00000000,
312 0x00000000,
313 0x00000000,
314 0x00000000,
315 0x00000000,
316 0x00000000,
317 0x00000000,
318 0x00000000,
319 0x00000000,
320 0x00000000,
321 0x00000000,
322 0x00000000,
323 0x00000000,
324 0x00000000,
325 0x00000000,
326 0x00000000,
327 0x00000000,
328 0x00000000,
329 0x00000000,
330 0x00000000,
331 0x00000000,
332 0x00000000,
333 0x00000000,
334 0x00000000,
335 0x00000000,
336 0x00000000,
337 0x00000000,
338 0x00000000,
339 0x00000000,
340 0x00000000,
341 0x00000000,
342 0x00000000,
343 0x00000000,
344 0x00000000,
345 0x00000000,
346 0x00000000,
347 0x00000000,
348 0x00000000,
349 0x00000000,
350 0x00000000,
351 0x00000000,
352 0x00000000,
353 0x00000000,
354 0x00000000,
355 0x00000000,
356 0x00000000,
357 0x00000000,
358 0x00000000,
359 0x00000000,
360 0x00000000,
361 0x00000000,
362 0x00000000,
363 0x00000000,
364 0x00000000,
365 0x00000000,
366 0x00000000,
367 0x00000000,
368 0x00000000,
369 0x00000000,
370 0x00000000,
371 0x00000000,
372 0x00000000,
373 0x00000000,
374 0x00000000,
375 0x00000000,
376 0x00000000,
377 0x00000000,
378 0x00000000,
379 0x00000000,
380 0x00000000,
381 0x00000000,
382 0x00000000,
383 0x00000000,
384 0x00000000,
385 0x00000000,
386 0x00000000,
387 0x00000000,
388 0x00000000,
389 0x00000000,
390 0x00000000,
391 0x00000000,
392 0x00000000,
393 0x00000000,
394 0x00000000,
395 0x00000000,
396 0x00000000,
397 0x00000000,
398 0x00000000,
399 0x00000000,
400 0x00000000,
401 0x00000000,
402 0x00000000,
403 0x00000000,
404 0x00000000,
405 0x00000000,
406 0x00000000,
407 0x00000000,
408 0x00000000,
409 0x00000000,
410 0x00000000,
411 0x00000000,
412 0x00000000,
413 0x00000000,
414 0x00000000,
415 0x00000000,
416 0x00000000,
417 0x00000000,
418 0x00000000,
419 0x00000000,
420 0x00000000,
421 0x00000000,
422 0x00000000,
423 0x00000000,
424 0x00000000,
425 0x00000000,
426 0x00000000,
427 0x00000000,
428 0x00000000,
429 0x00000000,
430 0x00000000,
431 0x00000000,
432 0x00000000,
433 0x00000000,
434 0x00000000,
435 0x00000000,
436 0x00000000,
437 0x00000000,
438 0x00000000,
439 0x00000000,
440 0x00000000,
441 0x00000000,
442 0x00000000,
443 0x00000000,
444 0x00000000,
445 0x00000000,
446 0x00000000,
447 0x00000000,
448 0x00000000,
449 0x00000000,
450 0x00000000,
451 0x00000000,
452 0x00000000,
453 0x00000000,
454 0x00000000,
455 0x00000000,
456 0x00000000,
457 0x00000000,
458 0x00000000,
459 0x00000000,
460 0x00000000,
461 0x00000000,
462 0x00000000,
463 0x00000000,
464 0x00000000,
465 0x00000000,
466 0x00000000,
467 0x00000000,
468 0x00000000,
469 0x00000000,
470 0x00000000,
471 0x00000000,
472 0x00000000,
473 0x00000000,
474 0x00000000,
475 0x00000000,
476 0x00000000,
477 0x00000000,
478 0x00000000,
479 0x00000000,
480 0x00000000,
481 0x00000000,
482 0x00000000,
483 0x00000000,
484 0x00000000,
485 0x00000000,
486 0x00000000,
487 0x00000000,
488 0x00000000,
489 0x00000000,
490 0x00000000,
491 0x00000000,
492 0x00000000,
493 0x00000000,
494 0x00000000,
495 0x00000000,
496 0x00000000,
497 0x00000000,
498 0x00000000,
499 0x00000000,
500 0x00000000,
501 0x00000000,
502 0x00000000,
503 0x00000000,
504 0x00000000,
505 0x00000000,
506 0x00000000,
507 0x00000000,
508 0x00000000,
509 0x00000000,
510 0x00000000,
511 0x00000000,
512 0x00000000,
513 0x00000000,
514 0x00000000,
515 0x00000000,
516 0x00000000,
517 0x00000000,
518 0x00000000,
519 0x00000000,
520 0x00000000,
521 0x00000000,
522 0x00000000,
523 0x00000000,
524 0x00000000,
525 0x00000000,
526 0x00000000,
527 0x00000000,
528 0x00000000,
529 0x00000000,
530 0x00000000,
531 0x00000000,
532 0x00000000,
533 0x00000000,
534 0x00000000,
535 0x00000000,
536 0x00000000,
537 0x00000000,
538 0x00000000,
539 0x00000000,
540 0x00000000,
541 0x00000000,
542 0x00000000,
543 0x00000000,
544 0x00000000,
545 0x00000000,
546 0x00000000,
547 0x00000000,
548 0x00000000,
549 0x00000000,
550 0x00000000,
551 0x00000000,
552 0x00000000,
553 0x00000000,
554 0x00000000,
555 0x00000000,
556 0x00000000,
557 0x00000000,
558 0x00000000,
559 0x00000000,
560 0x00000000,
561 0x00000000,
562 0x00000000,
563 0x00000000,
564 0x00000000,
565 0x00000000,
566 0x00000000,
567 0x00000000,
568 0x00000000,
569 0x00000000,
570 0x00000000,
571 0x00000000,
572 0x00000000,
573 0x00000000,
574 0x00000000,
575 0x00000000,
576 0x00000000,
577 0x00000000,
578 0x00000000,
579 0x00000000,
580 0x00000000,
581 0x00000000,
582 0x00000000,
583 0x00000000,
584 0x00000000,
585 0x00000000,
586 0x00000000,
587 0x00000000,
588 0x00000000,
589 0x00000000,
590 0x00000000,
591 0x00000000,
592 0x00000000,
593 0x00000000,
594 0x00000000,
595 0x00000000,
596 0x00000000,
597 0x00000000,
598 0x00000000,
599 0x00000000,
600 0x00000000,
601 0x00000000,
602 0x00000000,
603 0x00000000,
604 0x00000000,
605 0x00000000,
606 0x00000000,
607 0x00000000,
608 0x00000000,
609 0x00000000,
610 0x00000000,
611 0x00000000,
612 0x00000000,
613 0x00000000,
614 0x00000000,
615 0x00000000,
616 0x00000000,
617 0x00000000,
618 0x00000000,
619 0x00000000,
620 0x00000000,
621 0x00000000,
622 0x00000000,
623 0x00000000,
624 0x00000000,
625 0x00000000,
626 0x00000000,
627 0x00000000,
628 0x00000000,
629 0x00000000,
630 0x00000000,
631 0x00000000,
632 0x00000000,
633 0x00000000,
634 0x00000000,
635 0x00000000,
636 0x00000000,
637 0x00000000,
638 0x00000000,
639 0x00000000,
640 0x00000000,
641 0x00000000,
642 0x00000000,
643 0x00000000,
644 0x00000000,
645 0x00000000,
646 0x00000000,
647 0x00000000,
648 0x00000000,
649 0x00000000,
650 0x00000000,
651 0x00000000,
652 0x00000000,
653 0x00000000,
654 0x00000000,
655 0x00000000,
656 0x00000000,
657 0x00000000,
658 0x00000000,
659 0x00000000,
660 0x00000000,
661 0x00000000,
662 0x00000000,
663 0x00000000,
664 0x00000000,
665 0x00000000,
666 0x00000000,
667 0x00000000,
668 0x00000000,
669 0x00000000,
670 0x00000000,
671 0x00000000,
672 0x00000000,
673 0x00000000,
674 0x00000000,
675 0x00000000,
676 0x00000000,
677 0x00000000,
678 0x00000000,
679 0x00000000,
680 0x00000000,
681 0x00000000,
682 0x00000000,
683 0x00000000,
684 0x00000000,
685 0x00000000,
686 0x00000000,
687 0x00000000,
688 0x00000000,
689 0x00000000,
690 0x00000000,
691 0x00000000,
692 0x00000000,
693 0x00000000,
694 0x00000000,
695 0x00000000,
696 0x00000000,
697 0x00000000,
698 0x00000000,
699 0x00000000,
700 0x00000000,
701 0x00000000,
702 0x00000000,
703 0x00000000,
704 0x00000000,
705 0x00000000,
706 0x00000000,
707 0x00000000,
708 0x00000000,
709 0x00000000,
710 0x00000000,
711 0x00000000,
712 0x00000000,
713 0x00000000,
714 0x00000000,
715 0x00000000,
716 0x00000000,
717 0x00000000,
718 0x00000000,
719 0x00000000,
720 0x00000000,
721 0x00000000,
722 0x00000000,
723 0x00000000,
724 0x00000000,
725 0x00000000,
726 0x00000000,
727 0x00000000,
728 0x00000000,
729 0x00000000,
730 0x00000000,
731 0x00000000,
732 0x00000000,
733 0x00000000,
734 0x00000000,
735 0x00000000,
736 0x00000000,
737 0x00000000,
738/* 0x0b54: memx_data_tail */
739 0x00000000,
740 0x00000000,
741 0x00000000,
742 0x00000000,
743 0x00000000,
744 0x00000000,
745 0x00000000,
746 0x00000000,
747 0x00000000,
748 0x00000000,
749 0x00000000,
750 0x00000000,
751 0x00000000,
752 0x00000000,
753 0x00000000,
754 0x00000000,
755 0x00000000,
756 0x00000000,
757 0x00000000,
758 0x00000000,
759 0x00000000,
760 0x00000000,
761 0x00000000,
762 0x00000000,
763 0x00000000,
764 0x00000000,
765 0x00000000,
766 0x00000000,
767 0x00000000,
768 0x00000000,
769 0x00000000,
770 0x00000000,
771 0x00000000,
772 0x00000000,
773 0x00000000,
774 0x00000000,
775 0x00000000,
776 0x00000000,
777 0x00000000,
778 0x00000000,
779 0x00000000,
780 0x00000000,
781 0x00000000,
782};
783
784uint32_t nv108_pwr_code[] = {
785 0x02910ef5,
786/* 0x0004: rd32 */
787 0xf607a040,
788 0x04bd000e,
789 0xe3f0010e,
790 0x07ac4001,
791 0xbd000ef6,
792/* 0x0019: rd32_wait */
793 0x07ac4e04,
794 0xf100eecf,
795 0xf47000e4,
796 0xa44df61b,
797 0x00ddcf07,
798/* 0x002e: wr32 */
799 0xa04000f8,
800 0x000ef607,
801 0xa44004bd,
802 0x000df607,
803 0x020e04bd,
804 0xf0f0e5f0,
805 0xac4001e3,
806 0x000ef607,
807/* 0x004e: wr32_wait */
808 0xac4e04bd,
809 0x00eecf07,
810 0x7000e4f1,
811 0xf8f61bf4,
812/* 0x005d: nsec */
813 0xcf2c0800,
814/* 0x0062: nsec_loop */
815 0x2c090088,
816 0xbb0099cf,
817 0x9ea60298,
818 0xf8f61ef4,
819/* 0x0071: wait */
820 0xcf2c0800,
821/* 0x0076: wait_loop */
822 0xeeb20088,
823 0x0000047e,
824 0xadfddab2,
825 0xf4aca604,
826 0x2c09100b,
827 0xbb0099cf,
828 0x9ba60298,
829/* 0x0093: wait_done */
830 0xf8e61ef4,
831/* 0x0095: intr_watchdog */
832 0x03e99800,
833 0xf40096b0,
834 0x0a98280b,
835 0x029abb84,
836 0x0d0e1cf4,
837 0x01de7e01,
838 0xf494bd00,
839/* 0x00b2: intr_watchdog_next_time */
840 0x0a98140e,
841 0x00a6b085,
842 0xa6080bf4,
843 0x061cf49a,
844/* 0x00c0: intr_watchdog_next_time_set */
845/* 0x00c3: intr_watchdog_next_proc */
846 0xb58509b5,
847 0xe0b603e9,
848 0x10e6b158,
849 0xc81bf402,
850/* 0x00d2: intr */
851 0x00f900f8,
852 0x80f904bd,
853 0xa0f990f9,
854 0xc0f9b0f9,
855 0xe0f9d0f9,
856 0x000ff0f9,
857 0xf90188fe,
858 0x04504880,
859 0xb60088cf,
860 0x50400180,
861 0x0008f604,
862 0x080804bd,
863 0xc40088cf,
864 0x0bf40289,
865 0x8500b51f,
866 0x957e580e,
867 0x09980000,
868 0x0096b085,
869 0x000d0bf4,
870 0x0009f634,
871 0x09b504bd,
872/* 0x0125: intr_skip_watchdog */
873 0x0089e484,
874 0x360bf408,
875 0xcf068849,
876 0x9ac40099,
877 0x220bf402,
878 0xcf04c04c,
879 0xc0f900cc,
880 0xf14f484e,
881 0x0d5453e3,
882 0x023f7e00,
883 0x40c0fc00,
884 0x0cf604c0,
885/* 0x0157: intr_subintr_skip_fifo */
886 0x4004bd00,
887 0x09f60688,
888/* 0x015f: intr_skip_subintr */
889 0xc404bd00,
890 0x0bf42089,
891 0xbfa4f107,
892/* 0x0169: intr_skip_pause */
893 0x4089c4ff,
894 0xf1070bf4,
895/* 0x0173: intr_skip_user0 */
896 0x00ffbfa4,
897 0x0008f604,
898 0x80fc04bd,
899 0xfc0088fe,
900 0xfce0fcf0,
901 0xfcc0fcd0,
902 0xfca0fcb0,
903 0xfc80fc90,
904 0x0032f400,
905/* 0x0196: timer */
906 0x32f401f8,
907 0x03f89810,
908 0xf40086b0,
909 0xfeb53a1c,
910 0xf6380003,
911 0x04bd0008,
912 0x88cf0808,
913 0x0284f000,
914 0x081c1bf4,
915 0x0088cf34,
916 0x0bf4e0a6,
917 0xf4e8a608,
918/* 0x01c6: timer_reset */
919 0x3400161e,
920 0xbd000ef6,
921 0x840eb504,
922/* 0x01d0: timer_enable */
923 0x38000108,
924 0xbd0008f6,
925/* 0x01d9: timer_done */
926 0x1031f404,
927/* 0x01de: send_proc */
928 0x80f900f8,
929 0xe89890f9,
930 0x04e99805,
931 0xa60486f0,
932 0x2a0bf489,
933 0x940398c4,
934 0x80b60488,
935 0x008ebb18,
936 0xb500fa98,
937 0x8db5008a,
938 0x028cb501,
939 0xb6038bb5,
940 0x94f00190,
941 0x04e9b507,
942/* 0x0217: send_done */
943 0xfc0231f4,
944 0xf880fc90,
945/* 0x021d: find */
946 0x0880f900,
947 0x0131f458,
948/* 0x0224: find_loop */
949 0xa6008a98,
950 0x100bf4ae,
951 0xb15880b6,
952 0xf4021086,
953 0x32f4f11b,
954/* 0x0239: find_done */
955 0xfc8eb201,
956/* 0x023f: send */
957 0x7e00f880,
958 0xf400021d,
959 0x00f89b01,
960/* 0x0248: recv */
961 0x9805e898,
962 0x32f404e9,
963 0xf489a601,
964 0x89c43c0b,
965 0x0180b603,
966 0xb50784f0,
967 0xea9805e8,
968 0xfef0f902,
969 0xf0f9018f,
970 0x9994efb2,
971 0x00e9bb04,
972 0x9818e0b6,
973 0xec9803eb,
974 0x01ed9802,
975 0xf900ee98,
976 0xfef0fca5,
977 0x31f400f8,
978/* 0x028f: recv_done */
979 0xf8f0fc01,
980/* 0x0291: init */
981 0x01084100,
982 0xe70011cf,
983 0xb6010911,
984 0x14fe0814,
985 0x00e04100,
986 0x000013f0,
987 0x0001f61c,
988 0xff0104bd,
989 0x01f61400,
990 0x0104bd00,
991 0x0015f102,
992 0xf6100008,
993 0x04bd0001,
994 0xf000d241,
995 0x10fe0013,
996 0x1031f400,
997 0x38000101,
998 0xbd0001f6,
999/* 0x02db: init_proc */
1000 0x98580f04,
1001 0x16b001f1,
1002 0xfa0bf400,
1003 0xf0b615f9,
1004 0xf20ef458,
1005/* 0x02ec: host_send */
1006 0xcf04b041,
1007 0xa0420011,
1008 0x0022cf04,
1009 0x0bf412a6,
1010 0x071ec42e,
1011 0xb704ee94,
1012 0x980218e0,
1013 0xec9803eb,
1014 0x01ed9802,
1015 0x7e00ee98,
1016 0xb600023f,
1017 0x1ec40110,
1018 0x04b0400f,
1019 0xbd0001f6,
1020 0xc70ef404,
1021/* 0x0328: host_send_done */
1022/* 0x032a: host_recv */
1023 0x494100f8,
1024 0x5413f14e,
1025 0xf4e1a652,
1026/* 0x0336: host_recv_wait */
1027 0xcc41b90b,
1028 0x0011cf04,
1029 0xcf04c842,
1030 0x16f00022,
1031 0xf412a608,
1032 0x23c4ef0b,
1033 0x0434b607,
1034 0x029830b7,
1035 0xb5033bb5,
1036 0x3db5023c,
1037 0x003eb501,
1038 0xf00120b6,
1039 0xc8400f24,
1040 0x0002f604,
1041 0x400204bd,
1042 0x02f60000,
1043 0xf804bd00,
1044/* 0x0379: host_init */
1045 0x00804100,
1046 0xf11014b6,
1047 0x40021815,
1048 0x01f604d0,
1049 0x4104bd00,
1050 0x14b60080,
1051 0x9815f110,
1052 0x04dc4002,
1053 0xbd0001f6,
1054 0x40010104,
1055 0x01f604c4,
1056 0xf804bd00,
1057/* 0x03a9: memx_func_enter */
1058 0x40040600,
1059 0x06f607e0,
1060/* 0x03b3: memx_func_enter_wait */
1061 0x4604bd00,
1062 0x66cf07c0,
1063 0x0464f000,
1064 0x98f70bf4,
1065 0x10b60016,
1066/* 0x03c7: memx_func_leave */
1067 0x0600f804,
1068 0x07e44004,
1069 0xbd0006f6,
1070/* 0x03d1: memx_func_leave_wait */
1071 0x07c04604,
1072 0xf00066cf,
1073 0x1bf40464,
1074/* 0x03df: memx_func_wr32 */
1075 0x9800f8f7,
1076 0x15980016,
1077 0x0810b601,
1078 0x50f960f9,
1079 0xe0fcd0fc,
1080 0x00002e7e,
1081 0x140003f1,
1082 0xa00506fd,
1083 0xb604bd05,
1084 0x1bf40242,
1085/* 0x0407: memx_func_wait */
1086 0x0800f8dd,
1087 0x0088cf2c,
1088 0x98001e98,
1089 0x1c98011d,
1090 0x031b9802,
1091 0x7e1010b6,
1092 0xf8000071,
1093/* 0x0421: memx_func_delay */
1094 0x001e9800,
1095 0x7e0410b6,
1096 0xf800005d,
1097/* 0x042d: memx_exec */
1098 0xf9e0f900,
1099 0xb2c1b2d0,
1100/* 0x0435: memx_exec_next */
1101 0x001398b2,
1102 0x950410b6,
1103 0x30f01034,
1104 0xc835980c,
1105 0x12a655f9,
1106 0xfced1ef4,
1107 0x7ee0fcd0,
1108 0xf800023f,
1109/* 0x0455: memx_info */
1110 0x03544c00,
1111 0x7e08004b,
1112 0xf800023f,
1113/* 0x0461: memx_recv */
1114 0x01d6b000,
1115 0xb0c90bf4,
1116 0x0bf400d6,
1117/* 0x046f: memx_init */
1118 0xf800f8eb,
1119/* 0x0471: perf_recv */
1120/* 0x0473: perf_init */
1121 0xf800f800,
1122/* 0x0475: test_recv */
1123 0x04584100,
1124 0xb60011cf,
1125 0x58400110,
1126 0x0001f604,
1127 0xe7f104bd,
1128 0xe3f1d900,
1129 0x967e134f,
1130 0x00f80001,
1131/* 0x0494: test_init */
1132 0x7e08004e,
1133 0xf8000196,
1134/* 0x049d: idle_recv */
1135/* 0x049f: idle */
1136 0xf400f800,
1137 0x54410031,
1138 0x0011cf04,
1139 0x400110b6,
1140 0x01f60454,
1141/* 0x04b3: idle_loop */
1142 0x0104bd00,
1143 0x0232f458,
1144/* 0x04b8: idle_proc */
1145/* 0x04b8: idle_proc_exec */
1146 0x1eb210f9,
1147 0x0002487e,
1148 0x11f410fc,
1149 0x0231f409,
1150/* 0x04cb: idle_proc_next */
1151 0xb6f00ef4,
1152 0x1fa65810,
1153 0xf4e81bf4,
1154 0x28f4e002,
1155 0xc60ef400,
1156 0x00000000,
1157 0x00000000,
1158 0x00000000,
1159 0x00000000,
1160 0x00000000,
1161 0x00000000,
1162 0x00000000,
1163 0x00000000,
1164 0x00000000,
1165};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc
new file mode 100644
index 000000000000..6fde0b89e5aa
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc
@@ -0,0 +1,63 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#define NVKM_PPWR_CHIPSET GT215
26
27//#define NVKM_FALCON_PC24
28//#define NVKM_FALCON_UNSHIFTED_IO
29//#define NVKM_FALCON_MMIO_UAS
30//#define NVKM_FALCON_MMIO_TRAP
31
32#include "macros.fuc"
33
34.section #nva3_pwr_data
35#define INCLUDE_PROC
36#include "kernel.fuc"
37#include "host.fuc"
38#include "memx.fuc"
39#include "perf.fuc"
40#include "test.fuc"
41#include "idle.fuc"
42#undef INCLUDE_PROC
43
44#define INCLUDE_DATA
45#include "kernel.fuc"
46#include "host.fuc"
47#include "memx.fuc"
48#include "perf.fuc"
49#include "test.fuc"
50#include "idle.fuc"
51#undef INCLUDE_DATA
52.align 256
53
54.section #nva3_pwr_code
55#define INCLUDE_CODE
56#include "kernel.fuc"
57#include "host.fuc"
58#include "memx.fuc"
59#include "perf.fuc"
60#include "test.fuc"
61#include "idle.fuc"
62#undef INCLUDE_CODE
63.align 256
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc.h b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc.h
new file mode 100644
index 000000000000..0fa4d7dcd407
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc.h
@@ -0,0 +1,1229 @@
1uint32_t nva3_pwr_data[] = {
2/* 0x0000: proc_kern */
3 0x52544e49,
4 0x00000000,
5 0x00000000,
6 0x00000000,
7 0x00000000,
8 0x00000000,
9 0x00000000,
10 0x00000000,
11 0x00000000,
12 0x00000000,
13 0x00000000,
14 0x00000000,
15 0x00000000,
16 0x00000000,
17 0x00000000,
18 0x00000000,
19 0x00000000,
20 0x00000000,
21 0x00000000,
22 0x00000000,
23 0x00000000,
24 0x00000000,
25/* 0x0058: proc_list_head */
26 0x54534f48,
27 0x00000430,
28 0x000003cd,
29 0x00000000,
30 0x00000000,
31 0x00000000,
32 0x00000000,
33 0x00000000,
34 0x00000000,
35 0x00000000,
36 0x00000000,
37 0x00000000,
38 0x00000000,
39 0x00000000,
40 0x00000000,
41 0x00000000,
42 0x00000000,
43 0x00000000,
44 0x00000000,
45 0x00000000,
46 0x00000000,
47 0x00000000,
48 0x584d454d,
49 0x0000054e,
50 0x00000540,
51 0x00000000,
52 0x00000000,
53 0x00000000,
54 0x00000000,
55 0x00000000,
56 0x00000000,
57 0x00000000,
58 0x00000000,
59 0x00000000,
60 0x00000000,
61 0x00000000,
62 0x00000000,
63 0x00000000,
64 0x00000000,
65 0x00000000,
66 0x00000000,
67 0x00000000,
68 0x00000000,
69 0x00000000,
70 0x46524550,
71 0x00000552,
72 0x00000550,
73 0x00000000,
74 0x00000000,
75 0x00000000,
76 0x00000000,
77 0x00000000,
78 0x00000000,
79 0x00000000,
80 0x00000000,
81 0x00000000,
82 0x00000000,
83 0x00000000,
84 0x00000000,
85 0x00000000,
86 0x00000000,
87 0x00000000,
88 0x00000000,
89 0x00000000,
90 0x00000000,
91 0x00000000,
92 0x54534554,
93 0x0000057b,
94 0x00000554,
95 0x00000000,
96 0x00000000,
97 0x00000000,
98 0x00000000,
99 0x00000000,
100 0x00000000,
101 0x00000000,
102 0x00000000,
103 0x00000000,
104 0x00000000,
105 0x00000000,
106 0x00000000,
107 0x00000000,
108 0x00000000,
109 0x00000000,
110 0x00000000,
111 0x00000000,
112 0x00000000,
113 0x00000000,
114 0x454c4449,
115 0x00000587,
116 0x00000585,
117 0x00000000,
118 0x00000000,
119 0x00000000,
120 0x00000000,
121 0x00000000,
122 0x00000000,
123 0x00000000,
124 0x00000000,
125 0x00000000,
126 0x00000000,
127 0x00000000,
128 0x00000000,
129 0x00000000,
130 0x00000000,
131 0x00000000,
132 0x00000000,
133 0x00000000,
134 0x00000000,
135 0x00000000,
136/* 0x0210: proc_list_tail */
137/* 0x0210: time_prev */
138 0x00000000,
139/* 0x0214: time_next */
140 0x00000000,
141/* 0x0218: fifo_queue */
142 0x00000000,
143 0x00000000,
144 0x00000000,
145 0x00000000,
146 0x00000000,
147 0x00000000,
148 0x00000000,
149 0x00000000,
150 0x00000000,
151 0x00000000,
152 0x00000000,
153 0x00000000,
154 0x00000000,
155 0x00000000,
156 0x00000000,
157 0x00000000,
158 0x00000000,
159 0x00000000,
160 0x00000000,
161 0x00000000,
162 0x00000000,
163 0x00000000,
164 0x00000000,
165 0x00000000,
166 0x00000000,
167 0x00000000,
168 0x00000000,
169 0x00000000,
170 0x00000000,
171 0x00000000,
172 0x00000000,
173 0x00000000,
174/* 0x0298: rfifo_queue */
175 0x00000000,
176 0x00000000,
177 0x00000000,
178 0x00000000,
179 0x00000000,
180 0x00000000,
181 0x00000000,
182 0x00000000,
183 0x00000000,
184 0x00000000,
185 0x00000000,
186 0x00000000,
187 0x00000000,
188 0x00000000,
189 0x00000000,
190 0x00000000,
191 0x00000000,
192 0x00000000,
193 0x00000000,
194 0x00000000,
195 0x00000000,
196 0x00000000,
197 0x00000000,
198 0x00000000,
199 0x00000000,
200 0x00000000,
201 0x00000000,
202 0x00000000,
203 0x00000000,
204 0x00000000,
205 0x00000000,
206 0x00000000,
207/* 0x0318: memx_func_head */
208 0x00010000,
209 0x00000000,
210 0x0000046f,
211/* 0x0324: memx_func_next */
212 0x00000001,
213 0x00000000,
214 0x00000496,
215 0x00000002,
216 0x00000002,
217 0x000004b7,
218 0x00040003,
219 0x00000000,
220 0x000004df,
221 0x00010004,
222 0x00000000,
223 0x000004fc,
224/* 0x0354: memx_func_tail */
225/* 0x0354: memx_data_head */
226 0x00000000,
227 0x00000000,
228 0x00000000,
229 0x00000000,
230 0x00000000,
231 0x00000000,
232 0x00000000,
233 0x00000000,
234 0x00000000,
235 0x00000000,
236 0x00000000,
237 0x00000000,
238 0x00000000,
239 0x00000000,
240 0x00000000,
241 0x00000000,
242 0x00000000,
243 0x00000000,
244 0x00000000,
245 0x00000000,
246 0x00000000,
247 0x00000000,
248 0x00000000,
249 0x00000000,
250 0x00000000,
251 0x00000000,
252 0x00000000,
253 0x00000000,
254 0x00000000,
255 0x00000000,
256 0x00000000,
257 0x00000000,
258 0x00000000,
259 0x00000000,
260 0x00000000,
261 0x00000000,
262 0x00000000,
263 0x00000000,
264 0x00000000,
265 0x00000000,
266 0x00000000,
267 0x00000000,
268 0x00000000,
269 0x00000000,
270 0x00000000,
271 0x00000000,
272 0x00000000,
273 0x00000000,
274 0x00000000,
275 0x00000000,
276 0x00000000,
277 0x00000000,
278 0x00000000,
279 0x00000000,
280 0x00000000,
281 0x00000000,
282 0x00000000,
283 0x00000000,
284 0x00000000,
285 0x00000000,
286 0x00000000,
287 0x00000000,
288 0x00000000,
289 0x00000000,
290 0x00000000,
291 0x00000000,
292 0x00000000,
293 0x00000000,
294 0x00000000,
295 0x00000000,
296 0x00000000,
297 0x00000000,
298 0x00000000,
299 0x00000000,
300 0x00000000,
301 0x00000000,
302 0x00000000,
303 0x00000000,
304 0x00000000,
305 0x00000000,
306 0x00000000,
307 0x00000000,
308 0x00000000,
309 0x00000000,
310 0x00000000,
311 0x00000000,
312 0x00000000,
313 0x00000000,
314 0x00000000,
315 0x00000000,
316 0x00000000,
317 0x00000000,
318 0x00000000,
319 0x00000000,
320 0x00000000,
321 0x00000000,
322 0x00000000,
323 0x00000000,
324 0x00000000,
325 0x00000000,
326 0x00000000,
327 0x00000000,
328 0x00000000,
329 0x00000000,
330 0x00000000,
331 0x00000000,
332 0x00000000,
333 0x00000000,
334 0x00000000,
335 0x00000000,
336 0x00000000,
337 0x00000000,
338 0x00000000,
339 0x00000000,
340 0x00000000,
341 0x00000000,
342 0x00000000,
343 0x00000000,
344 0x00000000,
345 0x00000000,
346 0x00000000,
347 0x00000000,
348 0x00000000,
349 0x00000000,
350 0x00000000,
351 0x00000000,
352 0x00000000,
353 0x00000000,
354 0x00000000,
355 0x00000000,
356 0x00000000,
357 0x00000000,
358 0x00000000,
359 0x00000000,
360 0x00000000,
361 0x00000000,
362 0x00000000,
363 0x00000000,
364 0x00000000,
365 0x00000000,
366 0x00000000,
367 0x00000000,
368 0x00000000,
369 0x00000000,
370 0x00000000,
371 0x00000000,
372 0x00000000,
373 0x00000000,
374 0x00000000,
375 0x00000000,
376 0x00000000,
377 0x00000000,
378 0x00000000,
379 0x00000000,
380 0x00000000,
381 0x00000000,
382 0x00000000,
383 0x00000000,
384 0x00000000,
385 0x00000000,
386 0x00000000,
387 0x00000000,
388 0x00000000,
389 0x00000000,
390 0x00000000,
391 0x00000000,
392 0x00000000,
393 0x00000000,
394 0x00000000,
395 0x00000000,
396 0x00000000,
397 0x00000000,
398 0x00000000,
399 0x00000000,
400 0x00000000,
401 0x00000000,
402 0x00000000,
403 0x00000000,
404 0x00000000,
405 0x00000000,
406 0x00000000,
407 0x00000000,
408 0x00000000,
409 0x00000000,
410 0x00000000,
411 0x00000000,
412 0x00000000,
413 0x00000000,
414 0x00000000,
415 0x00000000,
416 0x00000000,
417 0x00000000,
418 0x00000000,
419 0x00000000,
420 0x00000000,
421 0x00000000,
422 0x00000000,
423 0x00000000,
424 0x00000000,
425 0x00000000,
426 0x00000000,
427 0x00000000,
428 0x00000000,
429 0x00000000,
430 0x00000000,
431 0x00000000,
432 0x00000000,
433 0x00000000,
434 0x00000000,
435 0x00000000,
436 0x00000000,
437 0x00000000,
438 0x00000000,
439 0x00000000,
440 0x00000000,
441 0x00000000,
442 0x00000000,
443 0x00000000,
444 0x00000000,
445 0x00000000,
446 0x00000000,
447 0x00000000,
448 0x00000000,
449 0x00000000,
450 0x00000000,
451 0x00000000,
452 0x00000000,
453 0x00000000,
454 0x00000000,
455 0x00000000,
456 0x00000000,
457 0x00000000,
458 0x00000000,
459 0x00000000,
460 0x00000000,
461 0x00000000,
462 0x00000000,
463 0x00000000,
464 0x00000000,
465 0x00000000,
466 0x00000000,
467 0x00000000,
468 0x00000000,
469 0x00000000,
470 0x00000000,
471 0x00000000,
472 0x00000000,
473 0x00000000,
474 0x00000000,
475 0x00000000,
476 0x00000000,
477 0x00000000,
478 0x00000000,
479 0x00000000,
480 0x00000000,
481 0x00000000,
482 0x00000000,
483 0x00000000,
484 0x00000000,
485 0x00000000,
486 0x00000000,
487 0x00000000,
488 0x00000000,
489 0x00000000,
490 0x00000000,
491 0x00000000,
492 0x00000000,
493 0x00000000,
494 0x00000000,
495 0x00000000,
496 0x00000000,
497 0x00000000,
498 0x00000000,
499 0x00000000,
500 0x00000000,
501 0x00000000,
502 0x00000000,
503 0x00000000,
504 0x00000000,
505 0x00000000,
506 0x00000000,
507 0x00000000,
508 0x00000000,
509 0x00000000,
510 0x00000000,
511 0x00000000,
512 0x00000000,
513 0x00000000,
514 0x00000000,
515 0x00000000,
516 0x00000000,
517 0x00000000,
518 0x00000000,
519 0x00000000,
520 0x00000000,
521 0x00000000,
522 0x00000000,
523 0x00000000,
524 0x00000000,
525 0x00000000,
526 0x00000000,
527 0x00000000,
528 0x00000000,
529 0x00000000,
530 0x00000000,
531 0x00000000,
532 0x00000000,
533 0x00000000,
534 0x00000000,
535 0x00000000,
536 0x00000000,
537 0x00000000,
538 0x00000000,
539 0x00000000,
540 0x00000000,
541 0x00000000,
542 0x00000000,
543 0x00000000,
544 0x00000000,
545 0x00000000,
546 0x00000000,
547 0x00000000,
548 0x00000000,
549 0x00000000,
550 0x00000000,
551 0x00000000,
552 0x00000000,
553 0x00000000,
554 0x00000000,
555 0x00000000,
556 0x00000000,
557 0x00000000,
558 0x00000000,
559 0x00000000,
560 0x00000000,
561 0x00000000,
562 0x00000000,
563 0x00000000,
564 0x00000000,
565 0x00000000,
566 0x00000000,
567 0x00000000,
568 0x00000000,
569 0x00000000,
570 0x00000000,
571 0x00000000,
572 0x00000000,
573 0x00000000,
574 0x00000000,
575 0x00000000,
576 0x00000000,
577 0x00000000,
578 0x00000000,
579 0x00000000,
580 0x00000000,
581 0x00000000,
582 0x00000000,
583 0x00000000,
584 0x00000000,
585 0x00000000,
586 0x00000000,
587 0x00000000,
588 0x00000000,
589 0x00000000,
590 0x00000000,
591 0x00000000,
592 0x00000000,
593 0x00000000,
594 0x00000000,
595 0x00000000,
596 0x00000000,
597 0x00000000,
598 0x00000000,
599 0x00000000,
600 0x00000000,
601 0x00000000,
602 0x00000000,
603 0x00000000,
604 0x00000000,
605 0x00000000,
606 0x00000000,
607 0x00000000,
608 0x00000000,
609 0x00000000,
610 0x00000000,
611 0x00000000,
612 0x00000000,
613 0x00000000,
614 0x00000000,
615 0x00000000,
616 0x00000000,
617 0x00000000,
618 0x00000000,
619 0x00000000,
620 0x00000000,
621 0x00000000,
622 0x00000000,
623 0x00000000,
624 0x00000000,
625 0x00000000,
626 0x00000000,
627 0x00000000,
628 0x00000000,
629 0x00000000,
630 0x00000000,
631 0x00000000,
632 0x00000000,
633 0x00000000,
634 0x00000000,
635 0x00000000,
636 0x00000000,
637 0x00000000,
638 0x00000000,
639 0x00000000,
640 0x00000000,
641 0x00000000,
642 0x00000000,
643 0x00000000,
644 0x00000000,
645 0x00000000,
646 0x00000000,
647 0x00000000,
648 0x00000000,
649 0x00000000,
650 0x00000000,
651 0x00000000,
652 0x00000000,
653 0x00000000,
654 0x00000000,
655 0x00000000,
656 0x00000000,
657 0x00000000,
658 0x00000000,
659 0x00000000,
660 0x00000000,
661 0x00000000,
662 0x00000000,
663 0x00000000,
664 0x00000000,
665 0x00000000,
666 0x00000000,
667 0x00000000,
668 0x00000000,
669 0x00000000,
670 0x00000000,
671 0x00000000,
672 0x00000000,
673 0x00000000,
674 0x00000000,
675 0x00000000,
676 0x00000000,
677 0x00000000,
678 0x00000000,
679 0x00000000,
680 0x00000000,
681 0x00000000,
682 0x00000000,
683 0x00000000,
684 0x00000000,
685 0x00000000,
686 0x00000000,
687 0x00000000,
688 0x00000000,
689 0x00000000,
690 0x00000000,
691 0x00000000,
692 0x00000000,
693 0x00000000,
694 0x00000000,
695 0x00000000,
696 0x00000000,
697 0x00000000,
698 0x00000000,
699 0x00000000,
700 0x00000000,
701 0x00000000,
702 0x00000000,
703 0x00000000,
704 0x00000000,
705 0x00000000,
706 0x00000000,
707 0x00000000,
708 0x00000000,
709 0x00000000,
710 0x00000000,
711 0x00000000,
712 0x00000000,
713 0x00000000,
714 0x00000000,
715 0x00000000,
716 0x00000000,
717 0x00000000,
718 0x00000000,
719 0x00000000,
720 0x00000000,
721 0x00000000,
722 0x00000000,
723 0x00000000,
724 0x00000000,
725 0x00000000,
726 0x00000000,
727 0x00000000,
728 0x00000000,
729 0x00000000,
730 0x00000000,
731 0x00000000,
732 0x00000000,
733 0x00000000,
734 0x00000000,
735 0x00000000,
736 0x00000000,
737 0x00000000,
738/* 0x0b54: memx_data_tail */
739 0x00000000,
740 0x00000000,
741 0x00000000,
742 0x00000000,
743 0x00000000,
744 0x00000000,
745 0x00000000,
746 0x00000000,
747 0x00000000,
748 0x00000000,
749 0x00000000,
750 0x00000000,
751 0x00000000,
752 0x00000000,
753 0x00000000,
754 0x00000000,
755 0x00000000,
756 0x00000000,
757 0x00000000,
758 0x00000000,
759 0x00000000,
760 0x00000000,
761 0x00000000,
762 0x00000000,
763 0x00000000,
764 0x00000000,
765 0x00000000,
766 0x00000000,
767 0x00000000,
768 0x00000000,
769 0x00000000,
770 0x00000000,
771 0x00000000,
772 0x00000000,
773 0x00000000,
774 0x00000000,
775 0x00000000,
776 0x00000000,
777 0x00000000,
778 0x00000000,
779 0x00000000,
780 0x00000000,
781 0x00000000,
782};
783
784uint32_t nva3_pwr_code[] = {
785 0x030d0ef5,
786/* 0x0004: rd32 */
787 0x07a007f1,
788 0xd00604b6,
789 0x04bd000e,
790 0xf001e7f0,
791 0x07f101e3,
792 0x04b607ac,
793 0x000ed006,
794/* 0x0022: rd32_wait */
795 0xe7f104bd,
796 0xe4b607ac,
797 0x00eecf06,
798 0x7000e4f1,
799 0xf1f21bf4,
800 0xb607a4d7,
801 0xddcf06d4,
802/* 0x003f: wr32 */
803 0xf100f800,
804 0xb607a007,
805 0x0ed00604,
806 0xf104bd00,
807 0xb607a407,
808 0x0dd00604,
809 0xf004bd00,
810 0xe5f002e7,
811 0x01e3f0f0,
812 0x07ac07f1,
813 0xd00604b6,
814 0x04bd000e,
815/* 0x006c: wr32_wait */
816 0x07ace7f1,
817 0xcf06e4b6,
818 0xe4f100ee,
819 0x1bf47000,
820/* 0x007f: nsec */
821 0xf000f8f2,
822 0x84b62c87,
823 0x0088cf06,
824/* 0x0088: nsec_loop */
825 0xb62c97f0,
826 0x99cf0694,
827 0x0298bb00,
828 0xf4069eb8,
829 0x00f8f11e,
830/* 0x009c: wait */
831 0xb62c87f0,
832 0x88cf0684,
833/* 0x00a5: wait_loop */
834 0x02eeb900,
835 0xb90421f4,
836 0xadfd02da,
837 0x06acb804,
838 0xf0150bf4,
839 0x94b62c97,
840 0x0099cf06,
841 0xb80298bb,
842 0x1ef4069b,
843/* 0x00c9: wait_done */
844/* 0x00cb: intr_watchdog */
845 0x9800f8df,
846 0x96b003e9,
847 0x2a0bf400,
848 0xbb840a98,
849 0x1cf4029a,
850 0x01d7f00f,
851 0x025421f5,
852 0x0ef494bd,
853/* 0x00e9: intr_watchdog_next_time */
854 0x850a9815,
855 0xf400a6b0,
856 0x9ab8090b,
857 0x061cf406,
858/* 0x00f8: intr_watchdog_next_time_set */
859/* 0x00fb: intr_watchdog_next_proc */
860 0x80850980,
861 0xe0b603e9,
862 0x10e6b158,
863 0xc61bf402,
864/* 0x010a: intr */
865 0x00f900f8,
866 0x80f904bd,
867 0xa0f990f9,
868 0xc0f9b0f9,
869 0xe0f9d0f9,
870 0xf7f0f0f9,
871 0x0188fe00,
872 0x87f180f9,
873 0x84b605d0,
874 0x0088cf06,
875 0xf10180b6,
876 0xb605d007,
877 0x08d00604,
878 0xf004bd00,
879 0x84b60887,
880 0x0088cf06,
881 0xf40289c4,
882 0x0080230b,
883 0x58e7f085,
884 0x98cb21f4,
885 0x96b08509,
886 0x110bf400,
887 0xb63407f0,
888 0x09d00604,
889 0x8004bd00,
890/* 0x016e: intr_skip_watchdog */
891 0x89e48409,
892 0x0bf40800,
893 0x8897f148,
894 0x0694b606,
895 0xc40099cf,
896 0x0bf4029a,
897 0xc0c7f12c,
898 0x06c4b604,
899 0xf900cccf,
900 0x48e7f1c0,
901 0x53e3f14f,
902 0x00d7f054,
903 0x02b921f5,
904 0x07f1c0fc,
905 0x04b604c0,
906 0x000cd006,
907/* 0x01ae: intr_subintr_skip_fifo */
908 0x07f104bd,
909 0x04b60688,
910 0x0009d006,
911/* 0x01ba: intr_skip_subintr */
912 0x89c404bd,
913 0x070bf420,
914 0xffbfa4f1,
915/* 0x01c4: intr_skip_pause */
916 0xf44089c4,
917 0xa4f1070b,
918/* 0x01ce: intr_skip_user0 */
919 0x07f0ffbf,
920 0x0604b604,
921 0xbd0008d0,
922 0xfe80fc04,
923 0xf0fc0088,
924 0xd0fce0fc,
925 0xb0fcc0fc,
926 0x90fca0fc,
927 0x00fc80fc,
928 0xf80032f4,
929/* 0x01f5: timer */
930 0x1032f401,
931 0xb003f898,
932 0x1cf40086,
933 0x03fe8051,
934 0xb63807f0,
935 0x08d00604,
936 0xf004bd00,
937 0x84b60887,
938 0x0088cf06,
939 0xf40284f0,
940 0x87f0261b,
941 0x0684b634,
942 0xb80088cf,
943 0x0bf406e0,
944 0x06e8b809,
945/* 0x0233: timer_reset */
946 0xf01f1ef4,
947 0x04b63407,
948 0x000ed006,
949 0x0e8004bd,
950/* 0x0241: timer_enable */
951 0x0187f084,
952 0xb63807f0,
953 0x08d00604,
954/* 0x024f: timer_done */
955 0xf404bd00,
956 0x00f81031,
957/* 0x0254: send_proc */
958 0x90f980f9,
959 0x9805e898,
960 0x86f004e9,
961 0x0689b804,
962 0xc42a0bf4,
963 0x88940398,
964 0x1880b604,
965 0x98008ebb,
966 0x8a8000fa,
967 0x018d8000,
968 0x80028c80,
969 0x90b6038b,
970 0x0794f001,
971 0xf404e980,
972/* 0x028e: send_done */
973 0x90fc0231,
974 0x00f880fc,
975/* 0x0294: find */
976 0x87f080f9,
977 0x0131f458,
978/* 0x029c: find_loop */
979 0xb8008a98,
980 0x0bf406ae,
981 0x5880b610,
982 0x021086b1,
983 0xf4f01bf4,
984/* 0x02b2: find_done */
985 0x8eb90132,
986 0xf880fc02,
987/* 0x02b9: send */
988 0x9421f500,
989 0x9701f402,
990/* 0x02c2: recv */
991 0xe89800f8,
992 0x04e99805,
993 0xb80132f4,
994 0x0bf40689,
995 0x0389c43d,
996 0xf00180b6,
997 0xe8800784,
998 0x02ea9805,
999 0x8ffef0f9,
1000 0xb9f0f901,
1001 0x999402ef,
1002 0x00e9bb04,
1003 0x9818e0b6,
1004 0xec9803eb,
1005 0x01ed9802,
1006 0xf900ee98,
1007 0xfef0fca5,
1008 0x31f400f8,
1009/* 0x030b: recv_done */
1010 0xf8f0fc01,
1011/* 0x030d: init */
1012 0x0817f100,
1013 0x0614b601,
1014 0xe70011cf,
1015 0xb6010911,
1016 0x14fe0814,
1017 0xe017f100,
1018 0x0013f000,
1019 0xb61c07f0,
1020 0x01d00604,
1021 0xf004bd00,
1022 0x07f0ff17,
1023 0x0604b614,
1024 0xbd0001d0,
1025 0x0217f004,
1026 0x080015f1,
1027 0xb61007f0,
1028 0x01d00604,
1029 0xf104bd00,
1030 0xf0010a17,
1031 0x10fe0013,
1032 0x1031f400,
1033 0xf00117f0,
1034 0x04b63807,
1035 0x0001d006,
1036 0xf7f004bd,
1037/* 0x0371: init_proc */
1038 0x01f19858,
1039 0xf40016b0,
1040 0x15f9fa0b,
1041 0xf458f0b6,
1042/* 0x0382: host_send */
1043 0x17f1f20e,
1044 0x14b604b0,
1045 0x0011cf06,
1046 0x04a027f1,
1047 0xcf0624b6,
1048 0x12b80022,
1049 0x320bf406,
1050 0x94071ec4,
1051 0xe0b704ee,
1052 0xeb980218,
1053 0x02ec9803,
1054 0x9801ed98,
1055 0x21f500ee,
1056 0x10b602b9,
1057 0x0f1ec401,
1058 0x04b007f1,
1059 0xd00604b6,
1060 0x04bd0001,
1061/* 0x03cb: host_send_done */
1062 0xf8ba0ef4,
1063/* 0x03cd: host_recv */
1064 0x4917f100,
1065 0x5413f14e,
1066 0x06e1b852,
1067/* 0x03db: host_recv_wait */
1068 0xf1aa0bf4,
1069 0xb604cc17,
1070 0x11cf0614,
1071 0xc827f100,
1072 0x0624b604,
1073 0xf00022cf,
1074 0x12b80816,
1075 0xe60bf406,
1076 0xb60723c4,
1077 0x30b70434,
1078 0x3b800298,
1079 0x023c8003,
1080 0x80013d80,
1081 0x20b6003e,
1082 0x0f24f001,
1083 0x04c807f1,
1084 0xd00604b6,
1085 0x04bd0002,
1086 0xf04027f0,
1087 0x04b60007,
1088 0x0002d006,
1089 0x00f804bd,
1090/* 0x0430: host_init */
1091 0x008017f1,
1092 0xf11014b6,
1093 0xf1021815,
1094 0xb604d007,
1095 0x01d00604,
1096 0xf104bd00,
1097 0xb6008017,
1098 0x15f11014,
1099 0x07f10298,
1100 0x04b604dc,
1101 0x0001d006,
1102 0x17f004bd,
1103 0xc407f101,
1104 0x0604b604,
1105 0xbd0001d0,
1106/* 0x046f: memx_func_enter */
1107 0xf000f804,
1108 0x07f10467,
1109 0x04b607e0,
1110 0x0006d006,
1111/* 0x047e: memx_func_enter_wait */
1112 0x67f104bd,
1113 0x64b607c0,
1114 0x0066cf06,
1115 0xf40464f0,
1116 0x1698f30b,
1117 0x0410b600,
1118/* 0x0496: memx_func_leave */
1119 0x67f000f8,
1120 0xe407f104,
1121 0x0604b607,
1122 0xbd0006d0,
1123/* 0x04a5: memx_func_leave_wait */
1124 0xc067f104,
1125 0x0664b607,
1126 0xf00066cf,
1127 0x1bf40464,
1128/* 0x04b7: memx_func_wr32 */
1129 0x9800f8f3,
1130 0x15980016,
1131 0x0810b601,
1132 0x50f960f9,
1133 0xe0fcd0fc,
1134 0xf13f21f4,
1135 0xfd140003,
1136 0x05800506,
1137 0xb604bd00,
1138 0x1bf40242,
1139/* 0x04df: memx_func_wait */
1140 0xf000f8dd,
1141 0x84b62c87,
1142 0x0088cf06,
1143 0x98001e98,
1144 0x1c98011d,
1145 0x031b9802,
1146 0xf41010b6,
1147 0x00f89c21,
1148/* 0x04fc: memx_func_delay */
1149 0xb6001e98,
1150 0x21f40410,
1151/* 0x0507: memx_exec */
1152 0xf900f87f,
1153 0xb9d0f9e0,
1154 0xb2b902c1,
1155/* 0x0511: memx_exec_next */
1156 0x00139802,
1157 0x950410b6,
1158 0x30f01034,
1159 0xc835980c,
1160 0x12b855f9,
1161 0xec1ef406,
1162 0xe0fcd0fc,
1163 0x02b921f5,
1164/* 0x0532: memx_info */
1165 0xc7f100f8,
1166 0xb7f10354,
1167 0x21f50800,
1168 0x00f802b9,
1169/* 0x0540: memx_recv */
1170 0xf401d6b0,
1171 0xd6b0c40b,
1172 0xe90bf400,
1173/* 0x054e: memx_init */
1174 0x00f800f8,
1175/* 0x0550: perf_recv */
1176/* 0x0552: perf_init */
1177 0x00f800f8,
1178/* 0x0554: test_recv */
1179 0x05d817f1,
1180 0xcf0614b6,
1181 0x10b60011,
1182 0xd807f101,
1183 0x0604b605,
1184 0xbd0001d0,
1185 0x00e7f104,
1186 0x4fe3f1d9,
1187 0xf521f513,
1188/* 0x057b: test_init */
1189 0xf100f801,
1190 0xf50800e7,
1191 0xf801f521,
1192/* 0x0585: idle_recv */
1193/* 0x0587: idle */
1194 0xf400f800,
1195 0x17f10031,
1196 0x14b605d4,
1197 0x0011cf06,
1198 0xf10110b6,
1199 0xb605d407,
1200 0x01d00604,
1201/* 0x05a3: idle_loop */
1202 0xf004bd00,
1203 0x32f45817,
1204/* 0x05a9: idle_proc */
1205/* 0x05a9: idle_proc_exec */
1206 0xb910f902,
1207 0x21f5021e,
1208 0x10fc02c2,
1209 0xf40911f4,
1210 0x0ef40231,
1211/* 0x05bd: idle_proc_next */
1212 0x5810b6ef,
1213 0xf4061fb8,
1214 0x02f4e61b,
1215 0x0028f4dd,
1216 0x00bb0ef4,
1217 0x00000000,
1218 0x00000000,
1219 0x00000000,
1220 0x00000000,
1221 0x00000000,
1222 0x00000000,
1223 0x00000000,
1224 0x00000000,
1225 0x00000000,
1226 0x00000000,
1227 0x00000000,
1228 0x00000000,
1229};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc
new file mode 100644
index 000000000000..eaa64da68e36
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc
@@ -0,0 +1,63 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#define NVKM_PPWR_CHIPSET GF100
26
27//#define NVKM_FALCON_PC24
28//#define NVKM_FALCON_UNSHIFTED_IO
29//#define NVKM_FALCON_MMIO_UAS
30//#define NVKM_FALCON_MMIO_TRAP
31
32#include "macros.fuc"
33
34.section #nvc0_pwr_data
35#define INCLUDE_PROC
36#include "kernel.fuc"
37#include "host.fuc"
38#include "memx.fuc"
39#include "perf.fuc"
40#include "test.fuc"
41#include "idle.fuc"
42#undef INCLUDE_PROC
43
44#define INCLUDE_DATA
45#include "kernel.fuc"
46#include "host.fuc"
47#include "memx.fuc"
48#include "perf.fuc"
49#include "test.fuc"
50#include "idle.fuc"
51#undef INCLUDE_DATA
52.align 256
53
54.section #nvc0_pwr_code
55#define INCLUDE_CODE
56#include "kernel.fuc"
57#include "host.fuc"
58#include "memx.fuc"
59#include "perf.fuc"
60#include "test.fuc"
61#include "idle.fuc"
62#undef INCLUDE_CODE
63.align 256
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc.h b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc.h
new file mode 100644
index 000000000000..82c8e8b88917
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc.h
@@ -0,0 +1,1229 @@
1uint32_t nvc0_pwr_data[] = {
2/* 0x0000: proc_kern */
3 0x52544e49,
4 0x00000000,
5 0x00000000,
6 0x00000000,
7 0x00000000,
8 0x00000000,
9 0x00000000,
10 0x00000000,
11 0x00000000,
12 0x00000000,
13 0x00000000,
14 0x00000000,
15 0x00000000,
16 0x00000000,
17 0x00000000,
18 0x00000000,
19 0x00000000,
20 0x00000000,
21 0x00000000,
22 0x00000000,
23 0x00000000,
24 0x00000000,
25/* 0x0058: proc_list_head */
26 0x54534f48,
27 0x00000430,
28 0x000003cd,
29 0x00000000,
30 0x00000000,
31 0x00000000,
32 0x00000000,
33 0x00000000,
34 0x00000000,
35 0x00000000,
36 0x00000000,
37 0x00000000,
38 0x00000000,
39 0x00000000,
40 0x00000000,
41 0x00000000,
42 0x00000000,
43 0x00000000,
44 0x00000000,
45 0x00000000,
46 0x00000000,
47 0x00000000,
48 0x584d454d,
49 0x0000054e,
50 0x00000540,
51 0x00000000,
52 0x00000000,
53 0x00000000,
54 0x00000000,
55 0x00000000,
56 0x00000000,
57 0x00000000,
58 0x00000000,
59 0x00000000,
60 0x00000000,
61 0x00000000,
62 0x00000000,
63 0x00000000,
64 0x00000000,
65 0x00000000,
66 0x00000000,
67 0x00000000,
68 0x00000000,
69 0x00000000,
70 0x46524550,
71 0x00000552,
72 0x00000550,
73 0x00000000,
74 0x00000000,
75 0x00000000,
76 0x00000000,
77 0x00000000,
78 0x00000000,
79 0x00000000,
80 0x00000000,
81 0x00000000,
82 0x00000000,
83 0x00000000,
84 0x00000000,
85 0x00000000,
86 0x00000000,
87 0x00000000,
88 0x00000000,
89 0x00000000,
90 0x00000000,
91 0x00000000,
92 0x54534554,
93 0x0000057b,
94 0x00000554,
95 0x00000000,
96 0x00000000,
97 0x00000000,
98 0x00000000,
99 0x00000000,
100 0x00000000,
101 0x00000000,
102 0x00000000,
103 0x00000000,
104 0x00000000,
105 0x00000000,
106 0x00000000,
107 0x00000000,
108 0x00000000,
109 0x00000000,
110 0x00000000,
111 0x00000000,
112 0x00000000,
113 0x00000000,
114 0x454c4449,
115 0x00000587,
116 0x00000585,
117 0x00000000,
118 0x00000000,
119 0x00000000,
120 0x00000000,
121 0x00000000,
122 0x00000000,
123 0x00000000,
124 0x00000000,
125 0x00000000,
126 0x00000000,
127 0x00000000,
128 0x00000000,
129 0x00000000,
130 0x00000000,
131 0x00000000,
132 0x00000000,
133 0x00000000,
134 0x00000000,
135 0x00000000,
136/* 0x0210: proc_list_tail */
137/* 0x0210: time_prev */
138 0x00000000,
139/* 0x0214: time_next */
140 0x00000000,
141/* 0x0218: fifo_queue */
142 0x00000000,
143 0x00000000,
144 0x00000000,
145 0x00000000,
146 0x00000000,
147 0x00000000,
148 0x00000000,
149 0x00000000,
150 0x00000000,
151 0x00000000,
152 0x00000000,
153 0x00000000,
154 0x00000000,
155 0x00000000,
156 0x00000000,
157 0x00000000,
158 0x00000000,
159 0x00000000,
160 0x00000000,
161 0x00000000,
162 0x00000000,
163 0x00000000,
164 0x00000000,
165 0x00000000,
166 0x00000000,
167 0x00000000,
168 0x00000000,
169 0x00000000,
170 0x00000000,
171 0x00000000,
172 0x00000000,
173 0x00000000,
174/* 0x0298: rfifo_queue */
175 0x00000000,
176 0x00000000,
177 0x00000000,
178 0x00000000,
179 0x00000000,
180 0x00000000,
181 0x00000000,
182 0x00000000,
183 0x00000000,
184 0x00000000,
185 0x00000000,
186 0x00000000,
187 0x00000000,
188 0x00000000,
189 0x00000000,
190 0x00000000,
191 0x00000000,
192 0x00000000,
193 0x00000000,
194 0x00000000,
195 0x00000000,
196 0x00000000,
197 0x00000000,
198 0x00000000,
199 0x00000000,
200 0x00000000,
201 0x00000000,
202 0x00000000,
203 0x00000000,
204 0x00000000,
205 0x00000000,
206 0x00000000,
207/* 0x0318: memx_func_head */
208 0x00010000,
209 0x00000000,
210 0x0000046f,
211/* 0x0324: memx_func_next */
212 0x00000001,
213 0x00000000,
214 0x00000496,
215 0x00000002,
216 0x00000002,
217 0x000004b7,
218 0x00040003,
219 0x00000000,
220 0x000004df,
221 0x00010004,
222 0x00000000,
223 0x000004fc,
224/* 0x0354: memx_func_tail */
225/* 0x0354: memx_data_head */
226 0x00000000,
227 0x00000000,
228 0x00000000,
229 0x00000000,
230 0x00000000,
231 0x00000000,
232 0x00000000,
233 0x00000000,
234 0x00000000,
235 0x00000000,
236 0x00000000,
237 0x00000000,
238 0x00000000,
239 0x00000000,
240 0x00000000,
241 0x00000000,
242 0x00000000,
243 0x00000000,
244 0x00000000,
245 0x00000000,
246 0x00000000,
247 0x00000000,
248 0x00000000,
249 0x00000000,
250 0x00000000,
251 0x00000000,
252 0x00000000,
253 0x00000000,
254 0x00000000,
255 0x00000000,
256 0x00000000,
257 0x00000000,
258 0x00000000,
259 0x00000000,
260 0x00000000,
261 0x00000000,
262 0x00000000,
263 0x00000000,
264 0x00000000,
265 0x00000000,
266 0x00000000,
267 0x00000000,
268 0x00000000,
269 0x00000000,
270 0x00000000,
271 0x00000000,
272 0x00000000,
273 0x00000000,
274 0x00000000,
275 0x00000000,
276 0x00000000,
277 0x00000000,
278 0x00000000,
279 0x00000000,
280 0x00000000,
281 0x00000000,
282 0x00000000,
283 0x00000000,
284 0x00000000,
285 0x00000000,
286 0x00000000,
287 0x00000000,
288 0x00000000,
289 0x00000000,
290 0x00000000,
291 0x00000000,
292 0x00000000,
293 0x00000000,
294 0x00000000,
295 0x00000000,
296 0x00000000,
297 0x00000000,
298 0x00000000,
299 0x00000000,
300 0x00000000,
301 0x00000000,
302 0x00000000,
303 0x00000000,
304 0x00000000,
305 0x00000000,
306 0x00000000,
307 0x00000000,
308 0x00000000,
309 0x00000000,
310 0x00000000,
311 0x00000000,
312 0x00000000,
313 0x00000000,
314 0x00000000,
315 0x00000000,
316 0x00000000,
317 0x00000000,
318 0x00000000,
319 0x00000000,
320 0x00000000,
321 0x00000000,
322 0x00000000,
323 0x00000000,
324 0x00000000,
325 0x00000000,
326 0x00000000,
327 0x00000000,
328 0x00000000,
329 0x00000000,
330 0x00000000,
331 0x00000000,
332 0x00000000,
333 0x00000000,
334 0x00000000,
335 0x00000000,
336 0x00000000,
337 0x00000000,
338 0x00000000,
339 0x00000000,
340 0x00000000,
341 0x00000000,
342 0x00000000,
343 0x00000000,
344 0x00000000,
345 0x00000000,
346 0x00000000,
347 0x00000000,
348 0x00000000,
349 0x00000000,
350 0x00000000,
351 0x00000000,
352 0x00000000,
353 0x00000000,
354 0x00000000,
355 0x00000000,
356 0x00000000,
357 0x00000000,
358 0x00000000,
359 0x00000000,
360 0x00000000,
361 0x00000000,
362 0x00000000,
363 0x00000000,
364 0x00000000,
365 0x00000000,
366 0x00000000,
367 0x00000000,
368 0x00000000,
369 0x00000000,
370 0x00000000,
371 0x00000000,
372 0x00000000,
373 0x00000000,
374 0x00000000,
375 0x00000000,
376 0x00000000,
377 0x00000000,
378 0x00000000,
379 0x00000000,
380 0x00000000,
381 0x00000000,
382 0x00000000,
383 0x00000000,
384 0x00000000,
385 0x00000000,
386 0x00000000,
387 0x00000000,
388 0x00000000,
389 0x00000000,
390 0x00000000,
391 0x00000000,
392 0x00000000,
393 0x00000000,
394 0x00000000,
395 0x00000000,
396 0x00000000,
397 0x00000000,
398 0x00000000,
399 0x00000000,
400 0x00000000,
401 0x00000000,
402 0x00000000,
403 0x00000000,
404 0x00000000,
405 0x00000000,
406 0x00000000,
407 0x00000000,
408 0x00000000,
409 0x00000000,
410 0x00000000,
411 0x00000000,
412 0x00000000,
413 0x00000000,
414 0x00000000,
415 0x00000000,
416 0x00000000,
417 0x00000000,
418 0x00000000,
419 0x00000000,
420 0x00000000,
421 0x00000000,
422 0x00000000,
423 0x00000000,
424 0x00000000,
425 0x00000000,
426 0x00000000,
427 0x00000000,
428 0x00000000,
429 0x00000000,
430 0x00000000,
431 0x00000000,
432 0x00000000,
433 0x00000000,
434 0x00000000,
435 0x00000000,
436 0x00000000,
437 0x00000000,
438 0x00000000,
439 0x00000000,
440 0x00000000,
441 0x00000000,
442 0x00000000,
443 0x00000000,
444 0x00000000,
445 0x00000000,
446 0x00000000,
447 0x00000000,
448 0x00000000,
449 0x00000000,
450 0x00000000,
451 0x00000000,
452 0x00000000,
453 0x00000000,
454 0x00000000,
455 0x00000000,
456 0x00000000,
457 0x00000000,
458 0x00000000,
459 0x00000000,
460 0x00000000,
461 0x00000000,
462 0x00000000,
463 0x00000000,
464 0x00000000,
465 0x00000000,
466 0x00000000,
467 0x00000000,
468 0x00000000,
469 0x00000000,
470 0x00000000,
471 0x00000000,
472 0x00000000,
473 0x00000000,
474 0x00000000,
475 0x00000000,
476 0x00000000,
477 0x00000000,
478 0x00000000,
479 0x00000000,
480 0x00000000,
481 0x00000000,
482 0x00000000,
483 0x00000000,
484 0x00000000,
485 0x00000000,
486 0x00000000,
487 0x00000000,
488 0x00000000,
489 0x00000000,
490 0x00000000,
491 0x00000000,
492 0x00000000,
493 0x00000000,
494 0x00000000,
495 0x00000000,
496 0x00000000,
497 0x00000000,
498 0x00000000,
499 0x00000000,
500 0x00000000,
501 0x00000000,
502 0x00000000,
503 0x00000000,
504 0x00000000,
505 0x00000000,
506 0x00000000,
507 0x00000000,
508 0x00000000,
509 0x00000000,
510 0x00000000,
511 0x00000000,
512 0x00000000,
513 0x00000000,
514 0x00000000,
515 0x00000000,
516 0x00000000,
517 0x00000000,
518 0x00000000,
519 0x00000000,
520 0x00000000,
521 0x00000000,
522 0x00000000,
523 0x00000000,
524 0x00000000,
525 0x00000000,
526 0x00000000,
527 0x00000000,
528 0x00000000,
529 0x00000000,
530 0x00000000,
531 0x00000000,
532 0x00000000,
533 0x00000000,
534 0x00000000,
535 0x00000000,
536 0x00000000,
537 0x00000000,
538 0x00000000,
539 0x00000000,
540 0x00000000,
541 0x00000000,
542 0x00000000,
543 0x00000000,
544 0x00000000,
545 0x00000000,
546 0x00000000,
547 0x00000000,
548 0x00000000,
549 0x00000000,
550 0x00000000,
551 0x00000000,
552 0x00000000,
553 0x00000000,
554 0x00000000,
555 0x00000000,
556 0x00000000,
557 0x00000000,
558 0x00000000,
559 0x00000000,
560 0x00000000,
561 0x00000000,
562 0x00000000,
563 0x00000000,
564 0x00000000,
565 0x00000000,
566 0x00000000,
567 0x00000000,
568 0x00000000,
569 0x00000000,
570 0x00000000,
571 0x00000000,
572 0x00000000,
573 0x00000000,
574 0x00000000,
575 0x00000000,
576 0x00000000,
577 0x00000000,
578 0x00000000,
579 0x00000000,
580 0x00000000,
581 0x00000000,
582 0x00000000,
583 0x00000000,
584 0x00000000,
585 0x00000000,
586 0x00000000,
587 0x00000000,
588 0x00000000,
589 0x00000000,
590 0x00000000,
591 0x00000000,
592 0x00000000,
593 0x00000000,
594 0x00000000,
595 0x00000000,
596 0x00000000,
597 0x00000000,
598 0x00000000,
599 0x00000000,
600 0x00000000,
601 0x00000000,
602 0x00000000,
603 0x00000000,
604 0x00000000,
605 0x00000000,
606 0x00000000,
607 0x00000000,
608 0x00000000,
609 0x00000000,
610 0x00000000,
611 0x00000000,
612 0x00000000,
613 0x00000000,
614 0x00000000,
615 0x00000000,
616 0x00000000,
617 0x00000000,
618 0x00000000,
619 0x00000000,
620 0x00000000,
621 0x00000000,
622 0x00000000,
623 0x00000000,
624 0x00000000,
625 0x00000000,
626 0x00000000,
627 0x00000000,
628 0x00000000,
629 0x00000000,
630 0x00000000,
631 0x00000000,
632 0x00000000,
633 0x00000000,
634 0x00000000,
635 0x00000000,
636 0x00000000,
637 0x00000000,
638 0x00000000,
639 0x00000000,
640 0x00000000,
641 0x00000000,
642 0x00000000,
643 0x00000000,
644 0x00000000,
645 0x00000000,
646 0x00000000,
647 0x00000000,
648 0x00000000,
649 0x00000000,
650 0x00000000,
651 0x00000000,
652 0x00000000,
653 0x00000000,
654 0x00000000,
655 0x00000000,
656 0x00000000,
657 0x00000000,
658 0x00000000,
659 0x00000000,
660 0x00000000,
661 0x00000000,
662 0x00000000,
663 0x00000000,
664 0x00000000,
665 0x00000000,
666 0x00000000,
667 0x00000000,
668 0x00000000,
669 0x00000000,
670 0x00000000,
671 0x00000000,
672 0x00000000,
673 0x00000000,
674 0x00000000,
675 0x00000000,
676 0x00000000,
677 0x00000000,
678 0x00000000,
679 0x00000000,
680 0x00000000,
681 0x00000000,
682 0x00000000,
683 0x00000000,
684 0x00000000,
685 0x00000000,
686 0x00000000,
687 0x00000000,
688 0x00000000,
689 0x00000000,
690 0x00000000,
691 0x00000000,
692 0x00000000,
693 0x00000000,
694 0x00000000,
695 0x00000000,
696 0x00000000,
697 0x00000000,
698 0x00000000,
699 0x00000000,
700 0x00000000,
701 0x00000000,
702 0x00000000,
703 0x00000000,
704 0x00000000,
705 0x00000000,
706 0x00000000,
707 0x00000000,
708 0x00000000,
709 0x00000000,
710 0x00000000,
711 0x00000000,
712 0x00000000,
713 0x00000000,
714 0x00000000,
715 0x00000000,
716 0x00000000,
717 0x00000000,
718 0x00000000,
719 0x00000000,
720 0x00000000,
721 0x00000000,
722 0x00000000,
723 0x00000000,
724 0x00000000,
725 0x00000000,
726 0x00000000,
727 0x00000000,
728 0x00000000,
729 0x00000000,
730 0x00000000,
731 0x00000000,
732 0x00000000,
733 0x00000000,
734 0x00000000,
735 0x00000000,
736 0x00000000,
737 0x00000000,
738/* 0x0b54: memx_data_tail */
739 0x00000000,
740 0x00000000,
741 0x00000000,
742 0x00000000,
743 0x00000000,
744 0x00000000,
745 0x00000000,
746 0x00000000,
747 0x00000000,
748 0x00000000,
749 0x00000000,
750 0x00000000,
751 0x00000000,
752 0x00000000,
753 0x00000000,
754 0x00000000,
755 0x00000000,
756 0x00000000,
757 0x00000000,
758 0x00000000,
759 0x00000000,
760 0x00000000,
761 0x00000000,
762 0x00000000,
763 0x00000000,
764 0x00000000,
765 0x00000000,
766 0x00000000,
767 0x00000000,
768 0x00000000,
769 0x00000000,
770 0x00000000,
771 0x00000000,
772 0x00000000,
773 0x00000000,
774 0x00000000,
775 0x00000000,
776 0x00000000,
777 0x00000000,
778 0x00000000,
779 0x00000000,
780 0x00000000,
781 0x00000000,
782};
783
784uint32_t nvc0_pwr_code[] = {
785 0x030d0ef5,
786/* 0x0004: rd32 */
787 0x07a007f1,
788 0xd00604b6,
789 0x04bd000e,
790 0xf001e7f0,
791 0x07f101e3,
792 0x04b607ac,
793 0x000ed006,
794/* 0x0022: rd32_wait */
795 0xe7f104bd,
796 0xe4b607ac,
797 0x00eecf06,
798 0x7000e4f1,
799 0xf1f21bf4,
800 0xb607a4d7,
801 0xddcf06d4,
802/* 0x003f: wr32 */
803 0xf100f800,
804 0xb607a007,
805 0x0ed00604,
806 0xf104bd00,
807 0xb607a407,
808 0x0dd00604,
809 0xf004bd00,
810 0xe5f002e7,
811 0x01e3f0f0,
812 0x07ac07f1,
813 0xd00604b6,
814 0x04bd000e,
815/* 0x006c: wr32_wait */
816 0x07ace7f1,
817 0xcf06e4b6,
818 0xe4f100ee,
819 0x1bf47000,
820/* 0x007f: nsec */
821 0xf000f8f2,
822 0x84b62c87,
823 0x0088cf06,
824/* 0x0088: nsec_loop */
825 0xb62c97f0,
826 0x99cf0694,
827 0x0298bb00,
828 0xf4069eb8,
829 0x00f8f11e,
830/* 0x009c: wait */
831 0xb62c87f0,
832 0x88cf0684,
833/* 0x00a5: wait_loop */
834 0x02eeb900,
835 0xb90421f4,
836 0xadfd02da,
837 0x06acb804,
838 0xf0150bf4,
839 0x94b62c97,
840 0x0099cf06,
841 0xb80298bb,
842 0x1ef4069b,
843/* 0x00c9: wait_done */
844/* 0x00cb: intr_watchdog */
845 0x9800f8df,
846 0x96b003e9,
847 0x2a0bf400,
848 0xbb840a98,
849 0x1cf4029a,
850 0x01d7f00f,
851 0x025421f5,
852 0x0ef494bd,
853/* 0x00e9: intr_watchdog_next_time */
854 0x850a9815,
855 0xf400a6b0,
856 0x9ab8090b,
857 0x061cf406,
858/* 0x00f8: intr_watchdog_next_time_set */
859/* 0x00fb: intr_watchdog_next_proc */
860 0x80850980,
861 0xe0b603e9,
862 0x10e6b158,
863 0xc61bf402,
864/* 0x010a: intr */
865 0x00f900f8,
866 0x80f904bd,
867 0xa0f990f9,
868 0xc0f9b0f9,
869 0xe0f9d0f9,
870 0xf7f0f0f9,
871 0x0188fe00,
872 0x87f180f9,
873 0x84b605d0,
874 0x0088cf06,
875 0xf10180b6,
876 0xb605d007,
877 0x08d00604,
878 0xf004bd00,
879 0x84b60887,
880 0x0088cf06,
881 0xf40289c4,
882 0x0080230b,
883 0x58e7f085,
884 0x98cb21f4,
885 0x96b08509,
886 0x110bf400,
887 0xb63407f0,
888 0x09d00604,
889 0x8004bd00,
890/* 0x016e: intr_skip_watchdog */
891 0x89e48409,
892 0x0bf40800,
893 0x8897f148,
894 0x0694b606,
895 0xc40099cf,
896 0x0bf4029a,
897 0xc0c7f12c,
898 0x06c4b604,
899 0xf900cccf,
900 0x48e7f1c0,
901 0x53e3f14f,
902 0x00d7f054,
903 0x02b921f5,
904 0x07f1c0fc,
905 0x04b604c0,
906 0x000cd006,
907/* 0x01ae: intr_subintr_skip_fifo */
908 0x07f104bd,
909 0x04b60688,
910 0x0009d006,
911/* 0x01ba: intr_skip_subintr */
912 0x89c404bd,
913 0x070bf420,
914 0xffbfa4f1,
915/* 0x01c4: intr_skip_pause */
916 0xf44089c4,
917 0xa4f1070b,
918/* 0x01ce: intr_skip_user0 */
919 0x07f0ffbf,
920 0x0604b604,
921 0xbd0008d0,
922 0xfe80fc04,
923 0xf0fc0088,
924 0xd0fce0fc,
925 0xb0fcc0fc,
926 0x90fca0fc,
927 0x00fc80fc,
928 0xf80032f4,
929/* 0x01f5: timer */
930 0x1032f401,
931 0xb003f898,
932 0x1cf40086,
933 0x03fe8051,
934 0xb63807f0,
935 0x08d00604,
936 0xf004bd00,
937 0x84b60887,
938 0x0088cf06,
939 0xf40284f0,
940 0x87f0261b,
941 0x0684b634,
942 0xb80088cf,
943 0x0bf406e0,
944 0x06e8b809,
945/* 0x0233: timer_reset */
946 0xf01f1ef4,
947 0x04b63407,
948 0x000ed006,
949 0x0e8004bd,
950/* 0x0241: timer_enable */
951 0x0187f084,
952 0xb63807f0,
953 0x08d00604,
954/* 0x024f: timer_done */
955 0xf404bd00,
956 0x00f81031,
957/* 0x0254: send_proc */
958 0x90f980f9,
959 0x9805e898,
960 0x86f004e9,
961 0x0689b804,
962 0xc42a0bf4,
963 0x88940398,
964 0x1880b604,
965 0x98008ebb,
966 0x8a8000fa,
967 0x018d8000,
968 0x80028c80,
969 0x90b6038b,
970 0x0794f001,
971 0xf404e980,
972/* 0x028e: send_done */
973 0x90fc0231,
974 0x00f880fc,
975/* 0x0294: find */
976 0x87f080f9,
977 0x0131f458,
978/* 0x029c: find_loop */
979 0xb8008a98,
980 0x0bf406ae,
981 0x5880b610,
982 0x021086b1,
983 0xf4f01bf4,
984/* 0x02b2: find_done */
985 0x8eb90132,
986 0xf880fc02,
987/* 0x02b9: send */
988 0x9421f500,
989 0x9701f402,
990/* 0x02c2: recv */
991 0xe89800f8,
992 0x04e99805,
993 0xb80132f4,
994 0x0bf40689,
995 0x0389c43d,
996 0xf00180b6,
997 0xe8800784,
998 0x02ea9805,
999 0x8ffef0f9,
1000 0xb9f0f901,
1001 0x999402ef,
1002 0x00e9bb04,
1003 0x9818e0b6,
1004 0xec9803eb,
1005 0x01ed9802,
1006 0xf900ee98,
1007 0xfef0fca5,
1008 0x31f400f8,
1009/* 0x030b: recv_done */
1010 0xf8f0fc01,
1011/* 0x030d: init */
1012 0x0817f100,
1013 0x0614b601,
1014 0xe70011cf,
1015 0xb6010911,
1016 0x14fe0814,
1017 0xe017f100,
1018 0x0013f000,
1019 0xb61c07f0,
1020 0x01d00604,
1021 0xf004bd00,
1022 0x07f0ff17,
1023 0x0604b614,
1024 0xbd0001d0,
1025 0x0217f004,
1026 0x080015f1,
1027 0xb61007f0,
1028 0x01d00604,
1029 0xf104bd00,
1030 0xf0010a17,
1031 0x10fe0013,
1032 0x1031f400,
1033 0xf00117f0,
1034 0x04b63807,
1035 0x0001d006,
1036 0xf7f004bd,
1037/* 0x0371: init_proc */
1038 0x01f19858,
1039 0xf40016b0,
1040 0x15f9fa0b,
1041 0xf458f0b6,
1042/* 0x0382: host_send */
1043 0x17f1f20e,
1044 0x14b604b0,
1045 0x0011cf06,
1046 0x04a027f1,
1047 0xcf0624b6,
1048 0x12b80022,
1049 0x320bf406,
1050 0x94071ec4,
1051 0xe0b704ee,
1052 0xeb980218,
1053 0x02ec9803,
1054 0x9801ed98,
1055 0x21f500ee,
1056 0x10b602b9,
1057 0x0f1ec401,
1058 0x04b007f1,
1059 0xd00604b6,
1060 0x04bd0001,
1061/* 0x03cb: host_send_done */
1062 0xf8ba0ef4,
1063/* 0x03cd: host_recv */
1064 0x4917f100,
1065 0x5413f14e,
1066 0x06e1b852,
1067/* 0x03db: host_recv_wait */
1068 0xf1aa0bf4,
1069 0xb604cc17,
1070 0x11cf0614,
1071 0xc827f100,
1072 0x0624b604,
1073 0xf00022cf,
1074 0x12b80816,
1075 0xe60bf406,
1076 0xb60723c4,
1077 0x30b70434,
1078 0x3b800298,
1079 0x023c8003,
1080 0x80013d80,
1081 0x20b6003e,
1082 0x0f24f001,
1083 0x04c807f1,
1084 0xd00604b6,
1085 0x04bd0002,
1086 0xf04027f0,
1087 0x04b60007,
1088 0x0002d006,
1089 0x00f804bd,
1090/* 0x0430: host_init */
1091 0x008017f1,
1092 0xf11014b6,
1093 0xf1021815,
1094 0xb604d007,
1095 0x01d00604,
1096 0xf104bd00,
1097 0xb6008017,
1098 0x15f11014,
1099 0x07f10298,
1100 0x04b604dc,
1101 0x0001d006,
1102 0x17f004bd,
1103 0xc407f101,
1104 0x0604b604,
1105 0xbd0001d0,
1106/* 0x046f: memx_func_enter */
1107 0xf000f804,
1108 0x07f10467,
1109 0x04b607e0,
1110 0x0006d006,
1111/* 0x047e: memx_func_enter_wait */
1112 0x67f104bd,
1113 0x64b607c0,
1114 0x0066cf06,
1115 0xf40464f0,
1116 0x1698f30b,
1117 0x0410b600,
1118/* 0x0496: memx_func_leave */
1119 0x67f000f8,
1120 0xe407f104,
1121 0x0604b607,
1122 0xbd0006d0,
1123/* 0x04a5: memx_func_leave_wait */
1124 0xc067f104,
1125 0x0664b607,
1126 0xf00066cf,
1127 0x1bf40464,
1128/* 0x04b7: memx_func_wr32 */
1129 0x9800f8f3,
1130 0x15980016,
1131 0x0810b601,
1132 0x50f960f9,
1133 0xe0fcd0fc,
1134 0xf13f21f4,
1135 0xfd140003,
1136 0x05800506,
1137 0xb604bd00,
1138 0x1bf40242,
1139/* 0x04df: memx_func_wait */
1140 0xf000f8dd,
1141 0x84b62c87,
1142 0x0088cf06,
1143 0x98001e98,
1144 0x1c98011d,
1145 0x031b9802,
1146 0xf41010b6,
1147 0x00f89c21,
1148/* 0x04fc: memx_func_delay */
1149 0xb6001e98,
1150 0x21f40410,
1151/* 0x0507: memx_exec */
1152 0xf900f87f,
1153 0xb9d0f9e0,
1154 0xb2b902c1,
1155/* 0x0511: memx_exec_next */
1156 0x00139802,
1157 0x950410b6,
1158 0x30f01034,
1159 0xc835980c,
1160 0x12b855f9,
1161 0xec1ef406,
1162 0xe0fcd0fc,
1163 0x02b921f5,
1164/* 0x0532: memx_info */
1165 0xc7f100f8,
1166 0xb7f10354,
1167 0x21f50800,
1168 0x00f802b9,
1169/* 0x0540: memx_recv */
1170 0xf401d6b0,
1171 0xd6b0c40b,
1172 0xe90bf400,
1173/* 0x054e: memx_init */
1174 0x00f800f8,
1175/* 0x0550: perf_recv */
1176/* 0x0552: perf_init */
1177 0x00f800f8,
1178/* 0x0554: test_recv */
1179 0x05d817f1,
1180 0xcf0614b6,
1181 0x10b60011,
1182 0xd807f101,
1183 0x0604b605,
1184 0xbd0001d0,
1185 0x00e7f104,
1186 0x4fe3f1d9,
1187 0xf521f513,
1188/* 0x057b: test_init */
1189 0xf100f801,
1190 0xf50800e7,
1191 0xf801f521,
1192/* 0x0585: idle_recv */
1193/* 0x0587: idle */
1194 0xf400f800,
1195 0x17f10031,
1196 0x14b605d4,
1197 0x0011cf06,
1198 0xf10110b6,
1199 0xb605d407,
1200 0x01d00604,
1201/* 0x05a3: idle_loop */
1202 0xf004bd00,
1203 0x32f45817,
1204/* 0x05a9: idle_proc */
1205/* 0x05a9: idle_proc_exec */
1206 0xb910f902,
1207 0x21f5021e,
1208 0x10fc02c2,
1209 0xf40911f4,
1210 0x0ef40231,
1211/* 0x05bd: idle_proc_next */
1212 0x5810b6ef,
1213 0xf4061fb8,
1214 0x02f4e61b,
1215 0x0028f4dd,
1216 0x00bb0ef4,
1217 0x00000000,
1218 0x00000000,
1219 0x00000000,
1220 0x00000000,
1221 0x00000000,
1222 0x00000000,
1223 0x00000000,
1224 0x00000000,
1225 0x00000000,
1226 0x00000000,
1227 0x00000000,
1228 0x00000000,
1229};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc
new file mode 100644
index 000000000000..32d65ea254dd
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc
@@ -0,0 +1,63 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#define NVKM_PPWR_CHIPSET GF119
26
27//#define NVKM_FALCON_PC24
28#define NVKM_FALCON_UNSHIFTED_IO
29//#define NVKM_FALCON_MMIO_UAS
30//#define NVKM_FALCON_MMIO_TRAP
31
32#include "macros.fuc"
33
34.section #nvd0_pwr_data
35#define INCLUDE_PROC
36#include "kernel.fuc"
37#include "host.fuc"
38#include "memx.fuc"
39#include "perf.fuc"
40#include "test.fuc"
41#include "idle.fuc"
42#undef INCLUDE_PROC
43
44#define INCLUDE_DATA
45#include "kernel.fuc"
46#include "host.fuc"
47#include "memx.fuc"
48#include "perf.fuc"
49#include "test.fuc"
50#include "idle.fuc"
51#undef INCLUDE_DATA
52.align 256
53
54.section #nvd0_pwr_code
55#define INCLUDE_CODE
56#include "kernel.fuc"
57#include "host.fuc"
58#include "memx.fuc"
59#include "perf.fuc"
60#include "test.fuc"
61#include "idle.fuc"
62#undef INCLUDE_CODE
63.align 256
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc.h b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc.h
new file mode 100644
index 000000000000..ce65e2a4b789
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc.h
@@ -0,0 +1,1229 @@
1uint32_t nvd0_pwr_data[] = {
2/* 0x0000: proc_kern */
3 0x52544e49,
4 0x00000000,
5 0x00000000,
6 0x00000000,
7 0x00000000,
8 0x00000000,
9 0x00000000,
10 0x00000000,
11 0x00000000,
12 0x00000000,
13 0x00000000,
14 0x00000000,
15 0x00000000,
16 0x00000000,
17 0x00000000,
18 0x00000000,
19 0x00000000,
20 0x00000000,
21 0x00000000,
22 0x00000000,
23 0x00000000,
24 0x00000000,
25/* 0x0058: proc_list_head */
26 0x54534f48,
27 0x000003be,
28 0x00000367,
29 0x00000000,
30 0x00000000,
31 0x00000000,
32 0x00000000,
33 0x00000000,
34 0x00000000,
35 0x00000000,
36 0x00000000,
37 0x00000000,
38 0x00000000,
39 0x00000000,
40 0x00000000,
41 0x00000000,
42 0x00000000,
43 0x00000000,
44 0x00000000,
45 0x00000000,
46 0x00000000,
47 0x00000000,
48 0x584d454d,
49 0x000004c4,
50 0x000004b6,
51 0x00000000,
52 0x00000000,
53 0x00000000,
54 0x00000000,
55 0x00000000,
56 0x00000000,
57 0x00000000,
58 0x00000000,
59 0x00000000,
60 0x00000000,
61 0x00000000,
62 0x00000000,
63 0x00000000,
64 0x00000000,
65 0x00000000,
66 0x00000000,
67 0x00000000,
68 0x00000000,
69 0x00000000,
70 0x46524550,
71 0x000004c8,
72 0x000004c6,
73 0x00000000,
74 0x00000000,
75 0x00000000,
76 0x00000000,
77 0x00000000,
78 0x00000000,
79 0x00000000,
80 0x00000000,
81 0x00000000,
82 0x00000000,
83 0x00000000,
84 0x00000000,
85 0x00000000,
86 0x00000000,
87 0x00000000,
88 0x00000000,
89 0x00000000,
90 0x00000000,
91 0x00000000,
92 0x54534554,
93 0x000004eb,
94 0x000004ca,
95 0x00000000,
96 0x00000000,
97 0x00000000,
98 0x00000000,
99 0x00000000,
100 0x00000000,
101 0x00000000,
102 0x00000000,
103 0x00000000,
104 0x00000000,
105 0x00000000,
106 0x00000000,
107 0x00000000,
108 0x00000000,
109 0x00000000,
110 0x00000000,
111 0x00000000,
112 0x00000000,
113 0x00000000,
114 0x454c4449,
115 0x000004f7,
116 0x000004f5,
117 0x00000000,
118 0x00000000,
119 0x00000000,
120 0x00000000,
121 0x00000000,
122 0x00000000,
123 0x00000000,
124 0x00000000,
125 0x00000000,
126 0x00000000,
127 0x00000000,
128 0x00000000,
129 0x00000000,
130 0x00000000,
131 0x00000000,
132 0x00000000,
133 0x00000000,
134 0x00000000,
135 0x00000000,
136/* 0x0210: proc_list_tail */
137/* 0x0210: time_prev */
138 0x00000000,
139/* 0x0214: time_next */
140 0x00000000,
141/* 0x0218: fifo_queue */
142 0x00000000,
143 0x00000000,
144 0x00000000,
145 0x00000000,
146 0x00000000,
147 0x00000000,
148 0x00000000,
149 0x00000000,
150 0x00000000,
151 0x00000000,
152 0x00000000,
153 0x00000000,
154 0x00000000,
155 0x00000000,
156 0x00000000,
157 0x00000000,
158 0x00000000,
159 0x00000000,
160 0x00000000,
161 0x00000000,
162 0x00000000,
163 0x00000000,
164 0x00000000,
165 0x00000000,
166 0x00000000,
167 0x00000000,
168 0x00000000,
169 0x00000000,
170 0x00000000,
171 0x00000000,
172 0x00000000,
173 0x00000000,
174/* 0x0298: rfifo_queue */
175 0x00000000,
176 0x00000000,
177 0x00000000,
178 0x00000000,
179 0x00000000,
180 0x00000000,
181 0x00000000,
182 0x00000000,
183 0x00000000,
184 0x00000000,
185 0x00000000,
186 0x00000000,
187 0x00000000,
188 0x00000000,
189 0x00000000,
190 0x00000000,
191 0x00000000,
192 0x00000000,
193 0x00000000,
194 0x00000000,
195 0x00000000,
196 0x00000000,
197 0x00000000,
198 0x00000000,
199 0x00000000,
200 0x00000000,
201 0x00000000,
202 0x00000000,
203 0x00000000,
204 0x00000000,
205 0x00000000,
206 0x00000000,
207/* 0x0318: memx_func_head */
208 0x00010000,
209 0x00000000,
210 0x000003f4,
211/* 0x0324: memx_func_next */
212 0x00000001,
213 0x00000000,
214 0x00000415,
215 0x00000002,
216 0x00000002,
217 0x00000430,
218 0x00040003,
219 0x00000000,
220 0x00000458,
221 0x00010004,
222 0x00000000,
223 0x00000472,
224/* 0x0354: memx_func_tail */
225/* 0x0354: memx_data_head */
226 0x00000000,
227 0x00000000,
228 0x00000000,
229 0x00000000,
230 0x00000000,
231 0x00000000,
232 0x00000000,
233 0x00000000,
234 0x00000000,
235 0x00000000,
236 0x00000000,
237 0x00000000,
238 0x00000000,
239 0x00000000,
240 0x00000000,
241 0x00000000,
242 0x00000000,
243 0x00000000,
244 0x00000000,
245 0x00000000,
246 0x00000000,
247 0x00000000,
248 0x00000000,
249 0x00000000,
250 0x00000000,
251 0x00000000,
252 0x00000000,
253 0x00000000,
254 0x00000000,
255 0x00000000,
256 0x00000000,
257 0x00000000,
258 0x00000000,
259 0x00000000,
260 0x00000000,
261 0x00000000,
262 0x00000000,
263 0x00000000,
264 0x00000000,
265 0x00000000,
266 0x00000000,
267 0x00000000,
268 0x00000000,
269 0x00000000,
270 0x00000000,
271 0x00000000,
272 0x00000000,
273 0x00000000,
274 0x00000000,
275 0x00000000,
276 0x00000000,
277 0x00000000,
278 0x00000000,
279 0x00000000,
280 0x00000000,
281 0x00000000,
282 0x00000000,
283 0x00000000,
284 0x00000000,
285 0x00000000,
286 0x00000000,
287 0x00000000,
288 0x00000000,
289 0x00000000,
290 0x00000000,
291 0x00000000,
292 0x00000000,
293 0x00000000,
294 0x00000000,
295 0x00000000,
296 0x00000000,
297 0x00000000,
298 0x00000000,
299 0x00000000,
300 0x00000000,
301 0x00000000,
302 0x00000000,
303 0x00000000,
304 0x00000000,
305 0x00000000,
306 0x00000000,
307 0x00000000,
308 0x00000000,
309 0x00000000,
310 0x00000000,
311 0x00000000,
312 0x00000000,
313 0x00000000,
314 0x00000000,
315 0x00000000,
316 0x00000000,
317 0x00000000,
318 0x00000000,
319 0x00000000,
320 0x00000000,
321 0x00000000,
322 0x00000000,
323 0x00000000,
324 0x00000000,
325 0x00000000,
326 0x00000000,
327 0x00000000,
328 0x00000000,
329 0x00000000,
330 0x00000000,
331 0x00000000,
332 0x00000000,
333 0x00000000,
334 0x00000000,
335 0x00000000,
336 0x00000000,
337 0x00000000,
338 0x00000000,
339 0x00000000,
340 0x00000000,
341 0x00000000,
342 0x00000000,
343 0x00000000,
344 0x00000000,
345 0x00000000,
346 0x00000000,
347 0x00000000,
348 0x00000000,
349 0x00000000,
350 0x00000000,
351 0x00000000,
352 0x00000000,
353 0x00000000,
354 0x00000000,
355 0x00000000,
356 0x00000000,
357 0x00000000,
358 0x00000000,
359 0x00000000,
360 0x00000000,
361 0x00000000,
362 0x00000000,
363 0x00000000,
364 0x00000000,
365 0x00000000,
366 0x00000000,
367 0x00000000,
368 0x00000000,
369 0x00000000,
370 0x00000000,
371 0x00000000,
372 0x00000000,
373 0x00000000,
374 0x00000000,
375 0x00000000,
376 0x00000000,
377 0x00000000,
378 0x00000000,
379 0x00000000,
380 0x00000000,
381 0x00000000,
382 0x00000000,
383 0x00000000,
384 0x00000000,
385 0x00000000,
386 0x00000000,
387 0x00000000,
388 0x00000000,
389 0x00000000,
390 0x00000000,
391 0x00000000,
392 0x00000000,
393 0x00000000,
394 0x00000000,
395 0x00000000,
396 0x00000000,
397 0x00000000,
398 0x00000000,
399 0x00000000,
400 0x00000000,
401 0x00000000,
402 0x00000000,
403 0x00000000,
404 0x00000000,
405 0x00000000,
406 0x00000000,
407 0x00000000,
408 0x00000000,
409 0x00000000,
410 0x00000000,
411 0x00000000,
412 0x00000000,
413 0x00000000,
414 0x00000000,
415 0x00000000,
416 0x00000000,
417 0x00000000,
418 0x00000000,
419 0x00000000,
420 0x00000000,
421 0x00000000,
422 0x00000000,
423 0x00000000,
424 0x00000000,
425 0x00000000,
426 0x00000000,
427 0x00000000,
428 0x00000000,
429 0x00000000,
430 0x00000000,
431 0x00000000,
432 0x00000000,
433 0x00000000,
434 0x00000000,
435 0x00000000,
436 0x00000000,
437 0x00000000,
438 0x00000000,
439 0x00000000,
440 0x00000000,
441 0x00000000,
442 0x00000000,
443 0x00000000,
444 0x00000000,
445 0x00000000,
446 0x00000000,
447 0x00000000,
448 0x00000000,
449 0x00000000,
450 0x00000000,
451 0x00000000,
452 0x00000000,
453 0x00000000,
454 0x00000000,
455 0x00000000,
456 0x00000000,
457 0x00000000,
458 0x00000000,
459 0x00000000,
460 0x00000000,
461 0x00000000,
462 0x00000000,
463 0x00000000,
464 0x00000000,
465 0x00000000,
466 0x00000000,
467 0x00000000,
468 0x00000000,
469 0x00000000,
470 0x00000000,
471 0x00000000,
472 0x00000000,
473 0x00000000,
474 0x00000000,
475 0x00000000,
476 0x00000000,
477 0x00000000,
478 0x00000000,
479 0x00000000,
480 0x00000000,
481 0x00000000,
482 0x00000000,
483 0x00000000,
484 0x00000000,
485 0x00000000,
486 0x00000000,
487 0x00000000,
488 0x00000000,
489 0x00000000,
490 0x00000000,
491 0x00000000,
492 0x00000000,
493 0x00000000,
494 0x00000000,
495 0x00000000,
496 0x00000000,
497 0x00000000,
498 0x00000000,
499 0x00000000,
500 0x00000000,
501 0x00000000,
502 0x00000000,
503 0x00000000,
504 0x00000000,
505 0x00000000,
506 0x00000000,
507 0x00000000,
508 0x00000000,
509 0x00000000,
510 0x00000000,
511 0x00000000,
512 0x00000000,
513 0x00000000,
514 0x00000000,
515 0x00000000,
516 0x00000000,
517 0x00000000,
518 0x00000000,
519 0x00000000,
520 0x00000000,
521 0x00000000,
522 0x00000000,
523 0x00000000,
524 0x00000000,
525 0x00000000,
526 0x00000000,
527 0x00000000,
528 0x00000000,
529 0x00000000,
530 0x00000000,
531 0x00000000,
532 0x00000000,
533 0x00000000,
534 0x00000000,
535 0x00000000,
536 0x00000000,
537 0x00000000,
538 0x00000000,
539 0x00000000,
540 0x00000000,
541 0x00000000,
542 0x00000000,
543 0x00000000,
544 0x00000000,
545 0x00000000,
546 0x00000000,
547 0x00000000,
548 0x00000000,
549 0x00000000,
550 0x00000000,
551 0x00000000,
552 0x00000000,
553 0x00000000,
554 0x00000000,
555 0x00000000,
556 0x00000000,
557 0x00000000,
558 0x00000000,
559 0x00000000,
560 0x00000000,
561 0x00000000,
562 0x00000000,
563 0x00000000,
564 0x00000000,
565 0x00000000,
566 0x00000000,
567 0x00000000,
568 0x00000000,
569 0x00000000,
570 0x00000000,
571 0x00000000,
572 0x00000000,
573 0x00000000,
574 0x00000000,
575 0x00000000,
576 0x00000000,
577 0x00000000,
578 0x00000000,
579 0x00000000,
580 0x00000000,
581 0x00000000,
582 0x00000000,
583 0x00000000,
584 0x00000000,
585 0x00000000,
586 0x00000000,
587 0x00000000,
588 0x00000000,
589 0x00000000,
590 0x00000000,
591 0x00000000,
592 0x00000000,
593 0x00000000,
594 0x00000000,
595 0x00000000,
596 0x00000000,
597 0x00000000,
598 0x00000000,
599 0x00000000,
600 0x00000000,
601 0x00000000,
602 0x00000000,
603 0x00000000,
604 0x00000000,
605 0x00000000,
606 0x00000000,
607 0x00000000,
608 0x00000000,
609 0x00000000,
610 0x00000000,
611 0x00000000,
612 0x00000000,
613 0x00000000,
614 0x00000000,
615 0x00000000,
616 0x00000000,
617 0x00000000,
618 0x00000000,
619 0x00000000,
620 0x00000000,
621 0x00000000,
622 0x00000000,
623 0x00000000,
624 0x00000000,
625 0x00000000,
626 0x00000000,
627 0x00000000,
628 0x00000000,
629 0x00000000,
630 0x00000000,
631 0x00000000,
632 0x00000000,
633 0x00000000,
634 0x00000000,
635 0x00000000,
636 0x00000000,
637 0x00000000,
638 0x00000000,
639 0x00000000,
640 0x00000000,
641 0x00000000,
642 0x00000000,
643 0x00000000,
644 0x00000000,
645 0x00000000,
646 0x00000000,
647 0x00000000,
648 0x00000000,
649 0x00000000,
650 0x00000000,
651 0x00000000,
652 0x00000000,
653 0x00000000,
654 0x00000000,
655 0x00000000,
656 0x00000000,
657 0x00000000,
658 0x00000000,
659 0x00000000,
660 0x00000000,
661 0x00000000,
662 0x00000000,
663 0x00000000,
664 0x00000000,
665 0x00000000,
666 0x00000000,
667 0x00000000,
668 0x00000000,
669 0x00000000,
670 0x00000000,
671 0x00000000,
672 0x00000000,
673 0x00000000,
674 0x00000000,
675 0x00000000,
676 0x00000000,
677 0x00000000,
678 0x00000000,
679 0x00000000,
680 0x00000000,
681 0x00000000,
682 0x00000000,
683 0x00000000,
684 0x00000000,
685 0x00000000,
686 0x00000000,
687 0x00000000,
688 0x00000000,
689 0x00000000,
690 0x00000000,
691 0x00000000,
692 0x00000000,
693 0x00000000,
694 0x00000000,
695 0x00000000,
696 0x00000000,
697 0x00000000,
698 0x00000000,
699 0x00000000,
700 0x00000000,
701 0x00000000,
702 0x00000000,
703 0x00000000,
704 0x00000000,
705 0x00000000,
706 0x00000000,
707 0x00000000,
708 0x00000000,
709 0x00000000,
710 0x00000000,
711 0x00000000,
712 0x00000000,
713 0x00000000,
714 0x00000000,
715 0x00000000,
716 0x00000000,
717 0x00000000,
718 0x00000000,
719 0x00000000,
720 0x00000000,
721 0x00000000,
722 0x00000000,
723 0x00000000,
724 0x00000000,
725 0x00000000,
726 0x00000000,
727 0x00000000,
728 0x00000000,
729 0x00000000,
730 0x00000000,
731 0x00000000,
732 0x00000000,
733 0x00000000,
734 0x00000000,
735 0x00000000,
736 0x00000000,
737 0x00000000,
738/* 0x0b54: memx_data_tail */
739 0x00000000,
740 0x00000000,
741 0x00000000,
742 0x00000000,
743 0x00000000,
744 0x00000000,
745 0x00000000,
746 0x00000000,
747 0x00000000,
748 0x00000000,
749 0x00000000,
750 0x00000000,
751 0x00000000,
752 0x00000000,
753 0x00000000,
754 0x00000000,
755 0x00000000,
756 0x00000000,
757 0x00000000,
758 0x00000000,
759 0x00000000,
760 0x00000000,
761 0x00000000,
762 0x00000000,
763 0x00000000,
764 0x00000000,
765 0x00000000,
766 0x00000000,
767 0x00000000,
768 0x00000000,
769 0x00000000,
770 0x00000000,
771 0x00000000,
772 0x00000000,
773 0x00000000,
774 0x00000000,
775 0x00000000,
776 0x00000000,
777 0x00000000,
778 0x00000000,
779 0x00000000,
780 0x00000000,
781 0x00000000,
782};
783
784uint32_t nvd0_pwr_code[] = {
785 0x02bf0ef5,
786/* 0x0004: rd32 */
787 0x07a007f1,
788 0xbd000ed0,
789 0x01e7f004,
790 0xf101e3f0,
791 0xd007ac07,
792 0x04bd000e,
793/* 0x001c: rd32_wait */
794 0x07ace7f1,
795 0xf100eecf,
796 0xf47000e4,
797 0xd7f1f51b,
798 0xddcf07a4,
799/* 0x0033: wr32 */
800 0xf100f800,
801 0xd007a007,
802 0x04bd000e,
803 0x07a407f1,
804 0xbd000dd0,
805 0x02e7f004,
806 0xf0f0e5f0,
807 0x07f101e3,
808 0x0ed007ac,
809/* 0x0057: wr32_wait */
810 0xf104bd00,
811 0xcf07ace7,
812 0xe4f100ee,
813 0x1bf47000,
814/* 0x0067: nsec */
815 0xf000f8f5,
816 0x88cf2c87,
817/* 0x006d: nsec_loop */
818 0x2c97f000,
819 0xbb0099cf,
820 0x9eb80298,
821 0xf41ef406,
822/* 0x007e: wait */
823 0x87f000f8,
824 0x0088cf2c,
825/* 0x0084: wait_loop */
826 0xf402eeb9,
827 0xdab90421,
828 0x04adfd02,
829 0xf406acb8,
830 0x97f0120b,
831 0x0099cf2c,
832 0xb80298bb,
833 0x1ef4069b,
834/* 0x00a5: wait_done */
835/* 0x00a7: intr_watchdog */
836 0x9800f8e2,
837 0x96b003e9,
838 0x2a0bf400,
839 0xbb840a98,
840 0x1cf4029a,
841 0x01d7f00f,
842 0x020621f5,
843 0x0ef494bd,
844/* 0x00c5: intr_watchdog_next_time */
845 0x850a9815,
846 0xf400a6b0,
847 0x9ab8090b,
848 0x061cf406,
849/* 0x00d4: intr_watchdog_next_time_set */
850/* 0x00d7: intr_watchdog_next_proc */
851 0x80850980,
852 0xe0b603e9,
853 0x10e6b158,
854 0xc61bf402,
855/* 0x00e6: intr */
856 0x00f900f8,
857 0x80f904bd,
858 0xa0f990f9,
859 0xc0f9b0f9,
860 0xe0f9d0f9,
861 0xf7f0f0f9,
862 0x0188fe00,
863 0x87f180f9,
864 0x88cf05d0,
865 0x0180b600,
866 0x05d007f1,
867 0xbd0008d0,
868 0x0887f004,
869 0xc40088cf,
870 0x0bf40289,
871 0x85008020,
872 0xf458e7f0,
873 0x0998a721,
874 0x0096b085,
875 0xf00e0bf4,
876 0x09d03407,
877 0x8004bd00,
878/* 0x013e: intr_skip_watchdog */
879 0x89e48409,
880 0x0bf40800,
881 0x8897f13c,
882 0x0099cf06,
883 0xf4029ac4,
884 0xc7f1260b,
885 0xcccf04c0,
886 0xf1c0f900,
887 0xf14f48e7,
888 0xf05453e3,
889 0x21f500d7,
890 0xc0fc026b,
891 0x04c007f1,
892 0xbd000cd0,
893/* 0x0175: intr_subintr_skip_fifo */
894 0x8807f104,
895 0x0009d006,
896/* 0x017e: intr_skip_subintr */
897 0x89c404bd,
898 0x070bf420,
899 0xffbfa4f1,
900/* 0x0188: intr_skip_pause */
901 0xf44089c4,
902 0xa4f1070b,
903/* 0x0192: intr_skip_user0 */
904 0x07f0ffbf,
905 0x0008d004,
906 0x80fc04bd,
907 0xfc0088fe,
908 0xfce0fcf0,
909 0xfcc0fcd0,
910 0xfca0fcb0,
911 0xfc80fc90,
912 0x0032f400,
913/* 0x01b6: timer */
914 0x32f401f8,
915 0x03f89810,
916 0xf40086b0,
917 0xfe80421c,
918 0x3807f003,
919 0xbd0008d0,
920 0x0887f004,
921 0xf00088cf,
922 0x1bf40284,
923 0x3487f020,
924 0xb80088cf,
925 0x0bf406e0,
926 0x06e8b809,
927/* 0x01eb: timer_reset */
928 0xf0191ef4,
929 0x0ed03407,
930 0x8004bd00,
931/* 0x01f6: timer_enable */
932 0x87f0840e,
933 0x3807f001,
934 0xbd0008d0,
935/* 0x0201: timer_done */
936 0x1031f404,
937/* 0x0206: send_proc */
938 0x80f900f8,
939 0xe89890f9,
940 0x04e99805,
941 0xb80486f0,
942 0x0bf40689,
943 0x0398c42a,
944 0xb6048894,
945 0x8ebb1880,
946 0x00fa9800,
947 0x80008a80,
948 0x8c80018d,
949 0x038b8002,
950 0xf00190b6,
951 0xe9800794,
952 0x0231f404,
953/* 0x0240: send_done */
954 0x80fc90fc,
955/* 0x0246: find */
956 0x80f900f8,
957 0xf45887f0,
958/* 0x024e: find_loop */
959 0x8a980131,
960 0x06aeb800,
961 0xb6100bf4,
962 0x86b15880,
963 0x1bf40210,
964 0x0132f4f0,
965/* 0x0264: find_done */
966 0xfc028eb9,
967/* 0x026b: send */
968 0xf500f880,
969 0xf4024621,
970 0x00f89701,
971/* 0x0274: recv */
972 0x9805e898,
973 0x32f404e9,
974 0x0689b801,
975 0xc43d0bf4,
976 0x80b60389,
977 0x0784f001,
978 0x9805e880,
979 0xf0f902ea,
980 0xf9018ffe,
981 0x02efb9f0,
982 0xbb049994,
983 0xe0b600e9,
984 0x03eb9818,
985 0x9802ec98,
986 0xee9801ed,
987 0xfca5f900,
988 0x00f8fef0,
989 0xfc0131f4,
990/* 0x02bd: recv_done */
991/* 0x02bf: init */
992 0xf100f8f0,
993 0xcf010817,
994 0x11e70011,
995 0x14b60109,
996 0x0014fe08,
997 0x00e017f1,
998 0xf00013f0,
999 0x01d01c07,
1000 0xf004bd00,
1001 0x07f0ff17,
1002 0x0001d014,
1003 0x17f004bd,
1004 0x0015f102,
1005 0x1007f008,
1006 0xbd0001d0,
1007 0xe617f104,
1008 0x0013f000,
1009 0xf40010fe,
1010 0x17f01031,
1011 0x3807f001,
1012 0xbd0001d0,
1013 0x58f7f004,
1014/* 0x0314: init_proc */
1015 0xb001f198,
1016 0x0bf40016,
1017 0xb615f9fa,
1018 0x0ef458f0,
1019/* 0x0325: host_send */
1020 0xb017f1f2,
1021 0x0011cf04,
1022 0x04a027f1,
1023 0xb80022cf,
1024 0x0bf40612,
1025 0x071ec42f,
1026 0xb704ee94,
1027 0x980218e0,
1028 0xec9803eb,
1029 0x01ed9802,
1030 0xf500ee98,
1031 0xb6026b21,
1032 0x1ec40110,
1033 0xb007f10f,
1034 0x0001d004,
1035 0x0ef404bd,
1036/* 0x0365: host_send_done */
1037/* 0x0367: host_recv */
1038 0xf100f8c3,
1039 0xf14e4917,
1040 0xb8525413,
1041 0x0bf406e1,
1042/* 0x0375: host_recv_wait */
1043 0xcc17f1b3,
1044 0x0011cf04,
1045 0x04c827f1,
1046 0xf00022cf,
1047 0x12b80816,
1048 0xec0bf406,
1049 0xb60723c4,
1050 0x30b70434,
1051 0x3b800298,
1052 0x023c8003,
1053 0x80013d80,
1054 0x20b6003e,
1055 0x0f24f001,
1056 0x04c807f1,
1057 0xbd0002d0,
1058 0x4027f004,
1059 0xd00007f0,
1060 0x04bd0002,
1061/* 0x03be: host_init */
1062 0x17f100f8,
1063 0x14b60080,
1064 0x1815f110,
1065 0xd007f102,
1066 0x0001d004,
1067 0x17f104bd,
1068 0x14b60080,
1069 0x9815f110,
1070 0xdc07f102,
1071 0x0001d004,
1072 0x17f004bd,
1073 0xc407f101,
1074 0x0001d004,
1075 0x00f804bd,
1076/* 0x03f4: memx_func_enter */
1077 0xf10467f0,
1078 0xd007e007,
1079 0x04bd0006,
1080/* 0x0400: memx_func_enter_wait */
1081 0x07c067f1,
1082 0xf00066cf,
1083 0x0bf40464,
1084 0x001698f6,
1085 0xf80410b6,
1086/* 0x0415: memx_func_leave */
1087 0x0467f000,
1088 0x07e407f1,
1089 0xbd0006d0,
1090/* 0x0421: memx_func_leave_wait */
1091 0xc067f104,
1092 0x0066cf07,
1093 0xf40464f0,
1094 0x00f8f61b,
1095/* 0x0430: memx_func_wr32 */
1096 0x98001698,
1097 0x10b60115,
1098 0xf960f908,
1099 0xfcd0fc50,
1100 0x3321f4e0,
1101 0x140003f1,
1102 0x800506fd,
1103 0x04bd0005,
1104 0xf40242b6,
1105 0x00f8dd1b,
1106/* 0x0458: memx_func_wait */
1107 0xcf2c87f0,
1108 0x1e980088,
1109 0x011d9800,
1110 0x98021c98,
1111 0x10b6031b,
1112 0x7e21f410,
1113/* 0x0472: memx_func_delay */
1114 0x1e9800f8,
1115 0x0410b600,
1116 0xf86721f4,
1117/* 0x047d: memx_exec */
1118 0xf9e0f900,
1119 0x02c1b9d0,
1120/* 0x0487: memx_exec_next */
1121 0x9802b2b9,
1122 0x10b60013,
1123 0x10349504,
1124 0x980c30f0,
1125 0x55f9c835,
1126 0xf40612b8,
1127 0xd0fcec1e,
1128 0x21f5e0fc,
1129 0x00f8026b,
1130/* 0x04a8: memx_info */
1131 0x0354c7f1,
1132 0x0800b7f1,
1133 0x026b21f5,
1134/* 0x04b6: memx_recv */
1135 0xd6b000f8,
1136 0xc40bf401,
1137 0xf400d6b0,
1138 0x00f8e90b,
1139/* 0x04c4: memx_init */
1140/* 0x04c6: perf_recv */
1141 0x00f800f8,
1142/* 0x04c8: perf_init */
1143/* 0x04ca: test_recv */
1144 0x17f100f8,
1145 0x11cf05d8,
1146 0x0110b600,
1147 0x05d807f1,
1148 0xbd0001d0,
1149 0x00e7f104,
1150 0x4fe3f1d9,
1151 0xb621f513,
1152/* 0x04eb: test_init */
1153 0xf100f801,
1154 0xf50800e7,
1155 0xf801b621,
1156/* 0x04f5: idle_recv */
1157/* 0x04f7: idle */
1158 0xf400f800,
1159 0x17f10031,
1160 0x11cf05d4,
1161 0x0110b600,
1162 0x05d407f1,
1163 0xbd0001d0,
1164/* 0x050d: idle_loop */
1165 0x5817f004,
1166/* 0x0513: idle_proc */
1167/* 0x0513: idle_proc_exec */
1168 0xf90232f4,
1169 0x021eb910,
1170 0x027421f5,
1171 0x11f410fc,
1172 0x0231f409,
1173/* 0x0527: idle_proc_next */
1174 0xb6ef0ef4,
1175 0x1fb85810,
1176 0xe61bf406,
1177 0xf4dd02f4,
1178 0x0ef40028,
1179 0x000000c1,
1180 0x00000000,
1181 0x00000000,
1182 0x00000000,
1183 0x00000000,
1184 0x00000000,
1185 0x00000000,
1186 0x00000000,
1187 0x00000000,
1188 0x00000000,
1189 0x00000000,
1190 0x00000000,
1191 0x00000000,
1192 0x00000000,
1193 0x00000000,
1194 0x00000000,
1195 0x00000000,
1196 0x00000000,
1197 0x00000000,
1198 0x00000000,
1199 0x00000000,
1200 0x00000000,
1201 0x00000000,
1202 0x00000000,
1203 0x00000000,
1204 0x00000000,
1205 0x00000000,
1206 0x00000000,
1207 0x00000000,
1208 0x00000000,
1209 0x00000000,
1210 0x00000000,
1211 0x00000000,
1212 0x00000000,
1213 0x00000000,
1214 0x00000000,
1215 0x00000000,
1216 0x00000000,
1217 0x00000000,
1218 0x00000000,
1219 0x00000000,
1220 0x00000000,
1221 0x00000000,
1222 0x00000000,
1223 0x00000000,
1224 0x00000000,
1225 0x00000000,
1226 0x00000000,
1227 0x00000000,
1228 0x00000000,
1229};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/os.h b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/os.h
new file mode 100644
index 000000000000..5fb0cccc6c64
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/os.h
@@ -0,0 +1,27 @@
1#ifndef __NVKM_PWR_OS_H__
2#define __NVKM_PWR_OS_H__
3
4/* Process names */
5#define PROC_KERN 0x52544e49
6#define PROC_IDLE 0x454c4449
7#define PROC_HOST 0x54534f48
8#define PROC_MEMX 0x584d454d
9#define PROC_PERF 0x46524550
10#define PROC_TEST 0x54534554
11
12/* KERN: message identifiers */
13#define KMSG_FIFO 0x00000000
14#define KMSG_ALARM 0x00000001
15
16/* MEMX: message identifiers */
17#define MEMX_MSG_INFO 0
18#define MEMX_MSG_EXEC 1
19
20/* MEMX: script opcode definitions */
21#define MEMX_ENTER 0
22#define MEMX_LEAVE 1
23#define MEMX_WR32 2
24#define MEMX_WAIT 3
25#define MEMX_DELAY 4
26
27#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/perf.fuc b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/perf.fuc
new file mode 100644
index 000000000000..38eadf705cbf
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/perf.fuc
@@ -0,0 +1,57 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#ifdef INCLUDE_PROC
26process(PROC_PERF, #perf_init, #perf_recv)
27#endif
28
29/******************************************************************************
30 * PERF data segment
31 *****************************************************************************/
32#ifdef INCLUDE_DATA
33#endif
34
35/******************************************************************************
36 * PERF code segment
37 *****************************************************************************/
38#ifdef INCLUDE_CODE
39
40// description
41//
42// $r15 - current (perf)
43// $r14 - sender process name
44// $r13 - message
45// $r12 - data0
46// $r11 - data1
47// $r0 - zero
48perf_recv:
49 ret
50
51// description
52//
53// $r15 - current (perf)
54// $r0 - zero
55perf_init:
56 ret
57#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/test.fuc b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/test.fuc
new file mode 100644
index 000000000000..0c3a71bf5459
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/test.fuc
@@ -0,0 +1,64 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#ifdef INCLUDE_PROC
26process(PROC_TEST, #test_init, #test_recv)
27#endif
28
29/******************************************************************************
30 * TEST data segment
31 *****************************************************************************/
32#ifdef INCLUDE_DATA
33#endif
34
35/******************************************************************************
36 * TEST code segment
37 *****************************************************************************/
38#ifdef INCLUDE_CODE
39// description
40//
41// $r15 - current (test)
42// $r14 - sender process name
43// $r13 - message
44// $r12 - data0
45// $r11 - data1
46// $r0 - zero
47test_recv:
48 nv_iord($r1, NV_PPWR_DSCRATCH(2))
49 add b32 $r1 1
50 nv_iowr(NV_PPWR_DSCRATCH(2), $r1)
51 mov $r14 -0x2700 /* 0xd900, envyas grrr! */
52 sethi $r14 0x134f0000
53 call(timer)
54 ret
55
56// description
57//
58// $r15 - current (test)
59// $r0 - zero
60test_init:
61 mov $r14 0x800
62 call(timer)
63 ret
64#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/memx.c b/drivers/gpu/drm/nouveau/core/subdev/pwr/memx.c
new file mode 100644
index 000000000000..03de3107d29f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/memx.c
@@ -0,0 +1,121 @@
1#ifndef __NVKM_PWR_MEMX_H__
2#define __NVKM_PWR_MEMX_H__
3
4#include <subdev/pwr.h>
5#include <subdev/pwr/fuc/os.h>
6
7struct nouveau_memx {
8 struct nouveau_pwr *ppwr;
9 u32 base;
10 u32 size;
11 struct {
12 u32 mthd;
13 u32 size;
14 u32 data[64];
15 } c;
16};
17
18static void
19memx_out(struct nouveau_memx *memx)
20{
21 struct nouveau_pwr *ppwr = memx->ppwr;
22 int i;
23
24 if (memx->c.size) {
25 nv_wr32(ppwr, 0x10a1c4, (memx->c.size << 16) | memx->c.mthd);
26 for (i = 0; i < memx->c.size; i++)
27 nv_wr32(ppwr, 0x10a1c4, memx->c.data[i]);
28 memx->c.size = 0;
29 }
30}
31
32static void
33memx_cmd(struct nouveau_memx *memx, u32 mthd, u32 size, u32 data[])
34{
35 if ((memx->c.size + size >= ARRAY_SIZE(memx->c.data)) ||
36 (memx->c.size && memx->c.mthd != mthd))
37 memx_out(memx);
38 memcpy(&memx->c.data[memx->c.size], data, size * sizeof(data[0]));
39 memx->c.size += size;
40 memx->c.mthd = mthd;
41}
42
43int
44nouveau_memx_init(struct nouveau_pwr *ppwr, struct nouveau_memx **pmemx)
45{
46 struct nouveau_memx *memx;
47 u32 reply[2];
48 int ret;
49
50 ret = ppwr->message(ppwr, reply, PROC_MEMX, MEMX_MSG_INFO, 0, 0);
51 if (ret)
52 return ret;
53
54 memx = *pmemx = kzalloc(sizeof(*memx), GFP_KERNEL);
55 if (!memx)
56 return -ENOMEM;
57 memx->ppwr = ppwr;
58 memx->base = reply[0];
59 memx->size = reply[1];
60
61 /* acquire data segment access */
62 do {
63 nv_wr32(ppwr, 0x10a580, 0x00000003);
64 } while (nv_rd32(ppwr, 0x10a580) != 0x00000003);
65 nv_wr32(ppwr, 0x10a1c0, 0x01000000 | memx->base);
66 nv_wr32(ppwr, 0x10a1c4, 0x00010000 | MEMX_ENTER);
67 nv_wr32(ppwr, 0x10a1c4, 0x00000000);
68 return 0;
69}
70
71int
72nouveau_memx_fini(struct nouveau_memx **pmemx, bool exec)
73{
74 struct nouveau_memx *memx = *pmemx;
75 struct nouveau_pwr *ppwr = memx->ppwr;
76 u32 finish, reply[2];
77
78 /* flush the cache... */
79 memx_out(memx);
80
81 /* release data segment access */
82 nv_wr32(ppwr, 0x10a1c4, 0x00000000 | MEMX_LEAVE);
83 finish = nv_rd32(ppwr, 0x10a1c0) & 0x00ffffff;
84 nv_wr32(ppwr, 0x10a580, 0x00000000);
85
86 /* call MEMX process to execute the script, and wait for reply */
87 if (exec) {
88 ppwr->message(ppwr, reply, PROC_MEMX, MEMX_MSG_EXEC,
89 memx->base, finish);
90 }
91
92 kfree(memx);
93 return 0;
94}
95
96void
97nouveau_memx_wr32(struct nouveau_memx *memx, u32 addr, u32 data)
98{
99 nv_debug(memx->ppwr, "R[%06x] = 0x%08x\n", addr, data);
100 memx_cmd(memx, MEMX_WR32, 2, (u32[]){ addr, data });
101}
102
103void
104nouveau_memx_wait(struct nouveau_memx *memx,
105 u32 addr, u32 mask, u32 data, u32 nsec)
106{
107 nv_debug(memx->ppwr, "R[%06x] & 0x%08x == 0x%08x, %d us\n",
108 addr, mask, data, nsec);
109 memx_cmd(memx, MEMX_WAIT, 4, (u32[]){ addr, ~mask, data, nsec });
110 memx_out(memx); /* fuc can't handle multiple */
111}
112
113void
114nouveau_memx_nsec(struct nouveau_memx *memx, u32 nsec)
115{
116 nv_debug(memx->ppwr, " DELAY = %d ns\n", nsec);
117 memx_cmd(memx, MEMX_DELAY, 1, (u32[]){ nsec });
118 memx_out(memx); /* fuc can't handle multiple */
119}
120
121#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/nv108.c b/drivers/gpu/drm/nouveau/core/subdev/pwr/nv108.c
new file mode 100644
index 000000000000..52c85414866a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/nv108.c
@@ -0,0 +1,62 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/pwr.h>
26
27#include "fuc/nv108.fuc.h"
28
29struct nv108_pwr_priv {
30 struct nouveau_pwr base;
31};
32
33static int
34nv108_pwr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
35 struct nouveau_oclass *oclass, void *data, u32 size,
36 struct nouveau_object **pobject)
37{
38 struct nv108_pwr_priv *priv;
39 int ret;
40
41 ret = nouveau_pwr_create(parent, engine, oclass, &priv);
42 *pobject = nv_object(priv);
43 if (ret)
44 return ret;
45
46 priv->base.code.data = nv108_pwr_code;
47 priv->base.code.size = sizeof(nv108_pwr_code);
48 priv->base.data.data = nv108_pwr_data;
49 priv->base.data.size = sizeof(nv108_pwr_data);
50 return 0;
51}
52
53struct nouveau_oclass
54nv108_pwr_oclass = {
55 .handle = NV_SUBDEV(PWR, 0x00),
56 .ofuncs = &(struct nouveau_ofuncs) {
57 .ctor = nv108_pwr_ctor,
58 .dtor = _nouveau_pwr_dtor,
59 .init = _nouveau_pwr_init,
60 .fini = _nouveau_pwr_fini,
61 },
62};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/nva3.c b/drivers/gpu/drm/nouveau/core/subdev/pwr/nva3.c
new file mode 100644
index 000000000000..c132b7ca9747
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/nva3.c
@@ -0,0 +1,71 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/pwr.h>
26
27#include "fuc/nva3.fuc.h"
28
29struct nva3_pwr_priv {
30 struct nouveau_pwr base;
31};
32
33static int
34nva3_pwr_init(struct nouveau_object *object)
35{
36 struct nva3_pwr_priv *priv = (void *)object;
37 nv_mask(priv, 0x022210, 0x00000001, 0x00000000);
38 nv_mask(priv, 0x022210, 0x00000001, 0x00000001);
39 return nouveau_pwr_init(&priv->base);
40}
41
42static int
43nva3_pwr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
44 struct nouveau_oclass *oclass, void *data, u32 size,
45 struct nouveau_object **pobject)
46{
47 struct nva3_pwr_priv *priv;
48 int ret;
49
50 ret = nouveau_pwr_create(parent, engine, oclass, &priv);
51 *pobject = nv_object(priv);
52 if (ret)
53 return ret;
54
55 priv->base.code.data = nva3_pwr_code;
56 priv->base.code.size = sizeof(nva3_pwr_code);
57 priv->base.data.data = nva3_pwr_data;
58 priv->base.data.size = sizeof(nva3_pwr_data);
59 return 0;
60}
61
62struct nouveau_oclass
63nva3_pwr_oclass = {
64 .handle = NV_SUBDEV(PWR, 0xa3),
65 .ofuncs = &(struct nouveau_ofuncs) {
66 .ctor = nva3_pwr_ctor,
67 .dtor = _nouveau_pwr_dtor,
68 .init = nva3_pwr_init,
69 .fini = _nouveau_pwr_fini,
70 },
71};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/pwr/nvc0.c
new file mode 100644
index 000000000000..495f6857428d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/nvc0.c
@@ -0,0 +1,62 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/pwr.h>
26
27#include "fuc/nvc0.fuc.h"
28
29struct nvc0_pwr_priv {
30 struct nouveau_pwr base;
31};
32
33static int
34nvc0_pwr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
35 struct nouveau_oclass *oclass, void *data, u32 size,
36 struct nouveau_object **pobject)
37{
38 struct nvc0_pwr_priv *priv;
39 int ret;
40
41 ret = nouveau_pwr_create(parent, engine, oclass, &priv);
42 *pobject = nv_object(priv);
43 if (ret)
44 return ret;
45
46 priv->base.code.data = nvc0_pwr_code;
47 priv->base.code.size = sizeof(nvc0_pwr_code);
48 priv->base.data.data = nvc0_pwr_data;
49 priv->base.data.size = sizeof(nvc0_pwr_data);
50 return 0;
51}
52
53struct nouveau_oclass
54nvc0_pwr_oclass = {
55 .handle = NV_SUBDEV(PWR, 0xc0),
56 .ofuncs = &(struct nouveau_ofuncs) {
57 .ctor = nvc0_pwr_ctor,
58 .dtor = _nouveau_pwr_dtor,
59 .init = _nouveau_pwr_init,
60 .fini = _nouveau_pwr_fini,
61 },
62};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/nvd0.c b/drivers/gpu/drm/nouveau/core/subdev/pwr/nvd0.c
new file mode 100644
index 000000000000..043aa142fe82
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/nvd0.c
@@ -0,0 +1,62 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/pwr.h>
26
27#include "fuc/nvd0.fuc.h"
28
29struct nvd0_pwr_priv {
30 struct nouveau_pwr base;
31};
32
33static int
34nvd0_pwr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
35 struct nouveau_oclass *oclass, void *data, u32 size,
36 struct nouveau_object **pobject)
37{
38 struct nvd0_pwr_priv *priv;
39 int ret;
40
41 ret = nouveau_pwr_create(parent, engine, oclass, &priv);
42 *pobject = nv_object(priv);
43 if (ret)
44 return ret;
45
46 priv->base.code.data = nvd0_pwr_code;
47 priv->base.code.size = sizeof(nvd0_pwr_code);
48 priv->base.data.data = nvd0_pwr_data;
49 priv->base.data.size = sizeof(nvd0_pwr_data);
50 return 0;
51}
52
53struct nouveau_oclass
54nvd0_pwr_oclass = {
55 .handle = NV_SUBDEV(PWR, 0xd0),
56 .ofuncs = &(struct nouveau_ofuncs) {
57 .ctor = nvd0_pwr_ctor,
58 .dtor = _nouveau_pwr_dtor,
59 .init = _nouveau_pwr_init,
60 .fini = _nouveau_pwr_fini,
61 },
62};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/base.c b/drivers/gpu/drm/nouveau/core/subdev/therm/base.c
index f1de7a9c572b..21b2b3021fad 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/base.c
@@ -92,10 +92,11 @@ nouveau_therm_update(struct nouveau_therm *therm, int mode)
92 struct nouveau_timer *ptimer = nouveau_timer(therm); 92 struct nouveau_timer *ptimer = nouveau_timer(therm);
93 struct nouveau_therm_priv *priv = (void *)therm; 93 struct nouveau_therm_priv *priv = (void *)therm;
94 unsigned long flags; 94 unsigned long flags;
95 int duty; 95 bool immd = true;
96 bool poll = true;
97 int duty = -1;
96 98
97 spin_lock_irqsave(&priv->lock, flags); 99 spin_lock_irqsave(&priv->lock, flags);
98 nv_debug(therm, "FAN speed check\n");
99 if (mode < 0) 100 if (mode < 0)
100 mode = priv->mode; 101 mode = priv->mode;
101 priv->mode = mode; 102 priv->mode = mode;
@@ -106,28 +107,48 @@ nouveau_therm_update(struct nouveau_therm *therm, int mode)
106 duty = nouveau_therm_fan_get(therm); 107 duty = nouveau_therm_fan_get(therm);
107 if (duty < 0) 108 if (duty < 0)
108 duty = 100; 109 duty = 100;
110 poll = false;
109 break; 111 break;
110 case NOUVEAU_THERM_CTRL_AUTO: 112 case NOUVEAU_THERM_CTRL_AUTO:
111 if (priv->fan->bios.nr_fan_trip) 113 if (priv->fan->bios.nr_fan_trip) {
112 duty = nouveau_therm_update_trip(therm); 114 duty = nouveau_therm_update_trip(therm);
113 else 115 } else
116 if (priv->fan->bios.linear_min_temp ||
117 priv->fan->bios.linear_max_temp) {
114 duty = nouveau_therm_update_linear(therm); 118 duty = nouveau_therm_update_linear(therm);
119 } else {
120 duty = priv->cstate;
121 poll = false;
122 }
123 immd = false;
115 break; 124 break;
116 case NOUVEAU_THERM_CTRL_NONE: 125 case NOUVEAU_THERM_CTRL_NONE:
117 default: 126 default:
118 ptimer->alarm_cancel(ptimer, &priv->alarm); 127 ptimer->alarm_cancel(ptimer, &priv->alarm);
119 goto done; 128 poll = false;
120 } 129 }
121 130
122 nv_debug(therm, "FAN target request: %d%%\n", duty); 131 if (list_empty(&priv->alarm.head) && poll)
123 nouveau_therm_fan_set(therm, (mode != NOUVEAU_THERM_CTRL_AUTO), duty);
124
125done:
126 if (list_empty(&priv->alarm.head) && (mode == NOUVEAU_THERM_CTRL_AUTO))
127 ptimer->alarm(ptimer, 1000000000ULL, &priv->alarm); 132 ptimer->alarm(ptimer, 1000000000ULL, &priv->alarm);
128 else if (!list_empty(&priv->alarm.head))
129 nv_debug(therm, "therm fan alarm list is not empty\n");
130 spin_unlock_irqrestore(&priv->lock, flags); 133 spin_unlock_irqrestore(&priv->lock, flags);
134
135 if (duty >= 0) {
136 nv_debug(therm, "FAN target request: %d%%\n", duty);
137 nouveau_therm_fan_set(therm, immd, duty);
138 }
139}
140
141int
142nouveau_therm_cstate(struct nouveau_therm *ptherm, int fan, int dir)
143{
144 struct nouveau_therm_priv *priv = (void *)ptherm;
145 if (!dir || (dir < 0 && fan < priv->cstate) ||
146 (dir > 0 && fan > priv->cstate)) {
147 nv_debug(ptherm, "default fan speed -> %d%%\n", fan);
148 priv->cstate = fan;
149 nouveau_therm_update(ptherm, -1);
150 }
151 return 0;
131} 152}
132 153
133static void 154static void
@@ -149,14 +170,15 @@ nouveau_therm_fan_mode(struct nouveau_therm *therm, int mode)
149 "automatic" 170 "automatic"
150 }; 171 };
151 172
152 /* The default PDAEMON ucode interferes with fan management */ 173 /* The default PPWR ucode on fermi interferes with fan management */
153 if ((mode >= ARRAY_SIZE(name)) || 174 if ((mode >= ARRAY_SIZE(name)) ||
154 (mode != NOUVEAU_THERM_CTRL_NONE && device->card_type >= NV_C0)) 175 (mode != NOUVEAU_THERM_CTRL_NONE && device->card_type >= NV_C0 &&
176 !nouveau_subdev(device, NVDEV_SUBDEV_PWR)))
155 return -EINVAL; 177 return -EINVAL;
156 178
157 /* do not allow automatic fan management if the thermal sensor is 179 /* do not allow automatic fan management if the thermal sensor is
158 * not available */ 180 * not available */
159 if (priv->mode == 2 && therm->temp_get(therm) < 0) 181 if (priv->mode == NOUVEAU_THERM_CTRL_AUTO && therm->temp_get(therm) < 0)
160 return -EINVAL; 182 return -EINVAL;
161 183
162 if (priv->mode == mode) 184 if (priv->mode == mode)
@@ -335,7 +357,7 @@ nouveau_therm_preinit(struct nouveau_therm *therm)
335 nouveau_therm_ic_ctor(therm); 357 nouveau_therm_ic_ctor(therm);
336 nouveau_therm_fan_ctor(therm); 358 nouveau_therm_fan_ctor(therm);
337 359
338 nouveau_therm_fan_mode(therm, NOUVEAU_THERM_CTRL_NONE); 360 nouveau_therm_fan_mode(therm, NOUVEAU_THERM_CTRL_AUTO);
339 nouveau_therm_sensor_preinit(therm); 361 nouveau_therm_sensor_preinit(therm);
340 return 0; 362 return 0;
341} 363}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/fan.c b/drivers/gpu/drm/nouveau/core/subdev/therm/fan.c
index 39f47b950ad1..95f6129eeede 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/fan.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/fan.c
@@ -185,8 +185,11 @@ nouveau_therm_fan_set_defaults(struct nouveau_therm *therm)
185 priv->fan->bios.max_duty = 100; 185 priv->fan->bios.max_duty = 100;
186 priv->fan->bios.bump_period = 500; 186 priv->fan->bios.bump_period = 500;
187 priv->fan->bios.slow_down_period = 2000; 187 priv->fan->bios.slow_down_period = 2000;
188/*XXX: talk to mupuf */
189#if 0
188 priv->fan->bios.linear_min_temp = 40; 190 priv->fan->bios.linear_min_temp = 40;
189 priv->fan->bios.linear_max_temp = 85; 191 priv->fan->bios.linear_max_temp = 85;
192#endif
190} 193}
191 194
192static void 195static void
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/fantog.c b/drivers/gpu/drm/nouveau/core/subdev/therm/fantog.c
index e601773ee475..f69dab11f720 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/fantog.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/fantog.c
@@ -97,6 +97,13 @@ nouveau_fantog_create(struct nouveau_therm *therm, struct dcb_gpio_func *func)
97{ 97{
98 struct nouveau_therm_priv *tpriv = (void *)therm; 98 struct nouveau_therm_priv *tpriv = (void *)therm;
99 struct nouveau_fantog_priv *priv; 99 struct nouveau_fantog_priv *priv;
100 int ret;
101
102 if (therm->pwm_ctrl) {
103 ret = therm->pwm_ctrl(therm, func->line, false);
104 if (ret)
105 return ret;
106 }
100 107
101 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 108 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
102 tpriv->fan = &priv->base; 109 tpriv->fan = &priv->base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c b/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c
index 8b3adec5fbb1..13b850076443 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c
@@ -55,28 +55,28 @@ probe_monitoring_device(struct nouveau_i2c_port *i2c,
55 return true; 55 return true;
56} 56}
57 57
58static struct i2c_board_info 58static struct nouveau_i2c_board_info
59nv_board_infos[] = { 59nv_board_infos[] = {
60 { I2C_BOARD_INFO("w83l785ts", 0x2d) }, 60 { { I2C_BOARD_INFO("w83l785ts", 0x2d) }, 0 },
61 { I2C_BOARD_INFO("w83781d", 0x2d) }, 61 { { I2C_BOARD_INFO("w83781d", 0x2d) }, 0 },
62 { I2C_BOARD_INFO("adt7473", 0x2e) }, 62 { { I2C_BOARD_INFO("adt7473", 0x2e) }, 20 },
63 { I2C_BOARD_INFO("adt7473", 0x2d) }, 63 { { I2C_BOARD_INFO("adt7473", 0x2d) }, 20 },
64 { I2C_BOARD_INFO("adt7473", 0x2c) }, 64 { { I2C_BOARD_INFO("adt7473", 0x2c) }, 20 },
65 { I2C_BOARD_INFO("f75375", 0x2e) }, 65 { { I2C_BOARD_INFO("f75375", 0x2e) }, 0 },
66 { I2C_BOARD_INFO("lm99", 0x4c) }, 66 { { I2C_BOARD_INFO("lm99", 0x4c) }, 0 },
67 { I2C_BOARD_INFO("lm90", 0x4c) }, 67 { { I2C_BOARD_INFO("lm90", 0x4c) }, 0 },
68 { I2C_BOARD_INFO("lm90", 0x4d) }, 68 { { I2C_BOARD_INFO("lm90", 0x4d) }, 0 },
69 { I2C_BOARD_INFO("adm1021", 0x18) }, 69 { { I2C_BOARD_INFO("adm1021", 0x18) }, 0 },
70 { I2C_BOARD_INFO("adm1021", 0x19) }, 70 { { I2C_BOARD_INFO("adm1021", 0x19) }, 0 },
71 { I2C_BOARD_INFO("adm1021", 0x1a) }, 71 { { I2C_BOARD_INFO("adm1021", 0x1a) }, 0 },
72 { I2C_BOARD_INFO("adm1021", 0x29) }, 72 { { I2C_BOARD_INFO("adm1021", 0x29) }, 0 },
73 { I2C_BOARD_INFO("adm1021", 0x2a) }, 73 { { I2C_BOARD_INFO("adm1021", 0x2a) }, 0 },
74 { I2C_BOARD_INFO("adm1021", 0x2b) }, 74 { { I2C_BOARD_INFO("adm1021", 0x2b) }, 0 },
75 { I2C_BOARD_INFO("adm1021", 0x4c) }, 75 { { I2C_BOARD_INFO("adm1021", 0x4c) }, 0 },
76 { I2C_BOARD_INFO("adm1021", 0x4d) }, 76 { { I2C_BOARD_INFO("adm1021", 0x4d) }, 0 },
77 { I2C_BOARD_INFO("adm1021", 0x4e) }, 77 { { I2C_BOARD_INFO("adm1021", 0x4e) }, 0 },
78 { I2C_BOARD_INFO("lm63", 0x18) }, 78 { { I2C_BOARD_INFO("lm63", 0x18) }, 0 },
79 { I2C_BOARD_INFO("lm63", 0x4e) }, 79 { { I2C_BOARD_INFO("lm63", 0x4e) }, 0 },
80 { } 80 { }
81}; 81};
82 82
@@ -89,9 +89,9 @@ nouveau_therm_ic_ctor(struct nouveau_therm *therm)
89 struct nvbios_extdev_func extdev_entry; 89 struct nvbios_extdev_func extdev_entry;
90 90
91 if (!nvbios_extdev_find(bios, NVBIOS_EXTDEV_LM89, &extdev_entry)) { 91 if (!nvbios_extdev_find(bios, NVBIOS_EXTDEV_LM89, &extdev_entry)) {
92 struct i2c_board_info board[] = { 92 struct nouveau_i2c_board_info board[] = {
93 { I2C_BOARD_INFO("lm90", extdev_entry.addr >> 1) }, 93 { { I2C_BOARD_INFO("lm90", extdev_entry.addr >> 1) }, 0},
94 { } 94 { }
95 }; 95 };
96 96
97 i2c->identify(i2c, NV_I2C_DEFAULT(0), "monitoring device", 97 i2c->identify(i2c, NV_I2C_DEFAULT(0), "monitoring device",
@@ -101,9 +101,9 @@ nouveau_therm_ic_ctor(struct nouveau_therm *therm)
101 } 101 }
102 102
103 if (!nvbios_extdev_find(bios, NVBIOS_EXTDEV_ADT7473, &extdev_entry)) { 103 if (!nvbios_extdev_find(bios, NVBIOS_EXTDEV_ADT7473, &extdev_entry)) {
104 struct i2c_board_info board[] = { 104 struct nouveau_i2c_board_info board[] = {
105 { I2C_BOARD_INFO("adt7473", extdev_entry.addr >> 1) }, 105 { { I2C_BOARD_INFO("adt7473", extdev_entry.addr >> 1) }, 20 },
106 { } 106 { }
107 }; 107 };
108 108
109 i2c->identify(i2c, NV_I2C_DEFAULT(0), "monitoring device", 109 i2c->identify(i2c, NV_I2C_DEFAULT(0), "monitoring device",
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/nv84.c b/drivers/gpu/drm/nouveau/core/subdev/therm/nv84.c
index 42ba633ccff7..1d15c52fad0c 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/nv84.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/nv84.c
@@ -126,7 +126,7 @@ nv84_therm_intr(struct nouveau_subdev *subdev)
126 126
127 spin_lock_irqsave(&priv->sensor.alarm_program_lock, flags); 127 spin_lock_irqsave(&priv->sensor.alarm_program_lock, flags);
128 128
129 intr = nv_rd32(therm, 0x20100); 129 intr = nv_rd32(therm, 0x20100) & 0x3ff;
130 130
131 /* THRS_4: downclock */ 131 /* THRS_4: downclock */
132 if (intr & 0x002) { 132 if (intr & 0x002) {
@@ -209,6 +209,19 @@ nv84_therm_ctor(struct nouveau_object *parent,
209 return nouveau_therm_preinit(&priv->base.base); 209 return nouveau_therm_preinit(&priv->base.base);
210} 210}
211 211
212int
213nv84_therm_fini(struct nouveau_object *object, bool suspend)
214{
215 /* Disable PTherm IRQs */
216 nv_wr32(object, 0x20000, 0x00000000);
217
218 /* ACK all PTherm IRQs */
219 nv_wr32(object, 0x20100, 0xffffffff);
220 nv_wr32(object, 0x1100, 0x10000); /* PBUS */
221
222 return _nouveau_therm_fini(object, suspend);
223}
224
212struct nouveau_oclass 225struct nouveau_oclass
213nv84_therm_oclass = { 226nv84_therm_oclass = {
214 .handle = NV_SUBDEV(THERM, 0x84), 227 .handle = NV_SUBDEV(THERM, 0x84),
@@ -216,6 +229,6 @@ nv84_therm_oclass = {
216 .ctor = nv84_therm_ctor, 229 .ctor = nv84_therm_ctor,
217 .dtor = _nouveau_therm_dtor, 230 .dtor = _nouveau_therm_dtor,
218 .init = _nouveau_therm_init, 231 .init = _nouveau_therm_init,
219 .fini = _nouveau_therm_fini, 232 .fini = nv84_therm_fini,
220 }, 233 },
221}; 234};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/nva3.c b/drivers/gpu/drm/nouveau/core/subdev/therm/nva3.c
index d11a7c400813..3b2c4580098b 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/nva3.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/nva3.c
@@ -94,6 +94,6 @@ nva3_therm_oclass = {
94 .ctor = nva3_therm_ctor, 94 .ctor = nva3_therm_ctor,
95 .dtor = _nouveau_therm_dtor, 95 .dtor = _nouveau_therm_dtor,
96 .init = nva3_therm_init, 96 .init = nva3_therm_init,
97 .fini = _nouveau_therm_fini, 97 .fini = nv84_therm_fini,
98 }, 98 },
99}; 99};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c b/drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c
index 54c28bdc4204..4dd4f81ae873 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c
@@ -148,6 +148,6 @@ nvd0_therm_oclass = {
148 .ctor = nvd0_therm_ctor, 148 .ctor = nvd0_therm_ctor,
149 .dtor = _nouveau_therm_dtor, 149 .dtor = _nouveau_therm_dtor,
150 .init = nvd0_therm_init, 150 .init = nvd0_therm_init,
151 .fini = _nouveau_therm_fini, 151 .fini = nv84_therm_fini,
152 }, 152 },
153}; 153};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/priv.h b/drivers/gpu/drm/nouveau/core/subdev/therm/priv.h
index dd38529262fb..96f8f95693ce 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/priv.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/priv.h
@@ -76,6 +76,7 @@ struct nouveau_therm_priv {
76 spinlock_t lock; 76 spinlock_t lock;
77 struct nouveau_therm_trip_point *last_trip; 77 struct nouveau_therm_trip_point *last_trip;
78 int mode; 78 int mode;
79 int cstate;
79 int suspend; 80 int suspend;
80 81
81 /* bios */ 82 /* bios */
@@ -144,6 +145,7 @@ int nv50_fan_pwm_get(struct nouveau_therm *, int, u32 *, u32 *);
144int nv50_fan_pwm_set(struct nouveau_therm *, int, u32, u32); 145int nv50_fan_pwm_set(struct nouveau_therm *, int, u32, u32);
145int nv50_fan_pwm_clock(struct nouveau_therm *); 146int nv50_fan_pwm_clock(struct nouveau_therm *);
146int nv84_temp_get(struct nouveau_therm *therm); 147int nv84_temp_get(struct nouveau_therm *therm);
148int nv84_therm_fini(struct nouveau_object *object, bool suspend);
147 149
148int nva3_therm_fan_sense(struct nouveau_therm *); 150int nva3_therm_fan_sense(struct nouveau_therm *);
149 151
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c b/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c
index b80a33011b93..cfde9eb44ad0 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c
@@ -180,8 +180,6 @@ alarm_timer_callback(struct nouveau_alarm *alarm)
180 180
181 spin_lock_irqsave(&priv->sensor.alarm_program_lock, flags); 181 spin_lock_irqsave(&priv->sensor.alarm_program_lock, flags);
182 182
183 nv_debug(therm, "polling the internal temperature\n");
184
185 nouveau_therm_threshold_hyst_polling(therm, &sensor->thrs_fan_boost, 183 nouveau_therm_threshold_hyst_polling(therm, &sensor->thrs_fan_boost,
186 NOUVEAU_THERM_THRS_FANBOOST); 184 NOUVEAU_THERM_THRS_FANBOOST);
187 185
diff --git a/drivers/gpu/drm/nouveau/core/subdev/timer/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/timer/nv04.c
index 57711ecb566c..c0bdd10358d7 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/timer/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/timer/nv04.c
@@ -119,16 +119,8 @@ nv04_timer_alarm_cancel(struct nouveau_timer *ptimer,
119{ 119{
120 struct nv04_timer_priv *priv = (void *)ptimer; 120 struct nv04_timer_priv *priv = (void *)ptimer;
121 unsigned long flags; 121 unsigned long flags;
122
123 /* avoid deleting an entry while the alarm intr is running */
124 spin_lock_irqsave(&priv->lock, flags); 122 spin_lock_irqsave(&priv->lock, flags);
125 123 list_del_init(&alarm->head);
126 /* delete the alarm from the list */
127 list_del(&alarm->head);
128
129 /* reset the head so as list_empty returns 1 */
130 INIT_LIST_HEAD(&alarm->head);
131
132 spin_unlock_irqrestore(&priv->lock, flags); 124 spin_unlock_irqrestore(&priv->lock, flags);
133} 125}
134 126
diff --git a/drivers/gpu/drm/nouveau/core/subdev/volt/base.c b/drivers/gpu/drm/nouveau/core/subdev/volt/base.c
new file mode 100644
index 000000000000..32794a999106
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/volt/base.c
@@ -0,0 +1,198 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/volt.h>
26
27#include <subdev/bios.h>
28#include <subdev/bios/vmap.h>
29#include <subdev/bios/volt.h>
30
31static int
32nouveau_volt_get(struct nouveau_volt *volt)
33{
34 if (volt->vid_get) {
35 int ret = volt->vid_get(volt), i;
36 if (ret >= 0) {
37 for (i = 0; i < volt->vid_nr; i++) {
38 if (volt->vid[i].vid == ret)
39 return volt->vid[i].uv;
40 }
41 ret = -EINVAL;
42 }
43 return ret;
44 }
45 return -ENODEV;
46}
47
48static int
49nouveau_volt_set(struct nouveau_volt *volt, u32 uv)
50{
51 if (volt->vid_set) {
52 int i, ret = -EINVAL;
53 for (i = 0; i < volt->vid_nr; i++) {
54 if (volt->vid[i].uv == uv) {
55 ret = volt->vid_set(volt, volt->vid[i].vid);
56 nv_debug(volt, "set %duv: %d\n", uv, ret);
57 break;
58 }
59 }
60 return ret;
61 }
62 return -ENODEV;
63}
64
65static int
66nouveau_volt_map(struct nouveau_volt *volt, u8 id)
67{
68 struct nouveau_bios *bios = nouveau_bios(volt);
69 struct nvbios_vmap_entry info;
70 u8 ver, len;
71 u16 vmap;
72
73 vmap = nvbios_vmap_entry_parse(bios, id, &ver, &len, &info);
74 if (vmap) {
75 if (info.link != 0xff) {
76 int ret = nouveau_volt_map(volt, info.link);
77 if (ret < 0)
78 return ret;
79 info.min += ret;
80 }
81 return info.min;
82 }
83
84 return id ? id * 10000 : -ENODEV;
85}
86
87static int
88nouveau_volt_set_id(struct nouveau_volt *volt, u8 id, int condition)
89{
90 int ret = nouveau_volt_map(volt, id);
91 if (ret >= 0) {
92 int prev = nouveau_volt_get(volt);
93 if (!condition || prev < 0 ||
94 (condition < 0 && ret < prev) ||
95 (condition > 0 && ret > prev)) {
96 ret = nouveau_volt_set(volt, ret);
97 } else {
98 ret = 0;
99 }
100 }
101 return ret;
102}
103
104int
105_nouveau_volt_init(struct nouveau_object *object)
106{
107 struct nouveau_volt *volt = (void *)object;
108 int ret;
109
110 ret = nouveau_subdev_init(&volt->base);
111 if (ret)
112 return ret;
113
114 ret = volt->get(volt);
115 if (ret < 0) {
116 if (ret != -ENODEV)
117 nv_debug(volt, "current voltage unknown\n");
118 return 0;
119 }
120
121 nv_info(volt, "GPU voltage: %duv\n", ret);
122 return 0;
123}
124
125void
126_nouveau_volt_dtor(struct nouveau_object *object)
127{
128 struct nouveau_volt *volt = (void *)object;
129 nouveau_subdev_destroy(&volt->base);
130}
131
132int
133nouveau_volt_create_(struct nouveau_object *parent,
134 struct nouveau_object *engine,
135 struct nouveau_oclass *oclass, int length, void **pobject)
136{
137 struct nouveau_bios *bios = nouveau_bios(parent);
138 struct nouveau_volt *volt;
139 struct nvbios_volt_entry ivid;
140 struct nvbios_volt info;
141 u8 ver, hdr, cnt, len;
142 u16 data;
143 int ret, i;
144
145 ret = nouveau_subdev_create_(parent, engine, oclass, 0, "VOLT",
146 "voltage", length, pobject);
147 volt = *pobject;
148 if (ret)
149 return ret;
150
151 volt->get = nouveau_volt_get;
152 volt->set = nouveau_volt_set;
153 volt->set_id = nouveau_volt_set_id;
154
155 data = nvbios_volt_parse(bios, &ver, &hdr, &cnt, &len, &info);
156 if (data && info.vidmask && info.base && info.step) {
157 for (i = 0; i < info.vidmask + 1; i++) {
158 if (info.base >= info.min &&
159 info.base <= info.max) {
160 volt->vid[volt->vid_nr].uv = info.base;
161 volt->vid[volt->vid_nr].vid = i;
162 volt->vid_nr++;
163 }
164 info.base += info.step;
165 }
166 volt->vid_mask = info.vidmask;
167 } else
168 if (data && info.vidmask) {
169 for (i = 0; i < cnt; i++) {
170 data = nvbios_volt_entry_parse(bios, i, &ver, &hdr,
171 &ivid);
172 if (data) {
173 volt->vid[volt->vid_nr].uv = ivid.voltage;
174 volt->vid[volt->vid_nr].vid = ivid.vid;
175 volt->vid_nr++;
176 }
177 }
178 volt->vid_mask = info.vidmask;
179 }
180
181 if (volt->vid_nr) {
182 for (i = 0; i < volt->vid_nr; i++) {
183 nv_debug(volt, "VID %02x: %duv\n",
184 volt->vid[i].vid, volt->vid[i].uv);
185 }
186
187 /*XXX: this is an assumption.. there probably exists boards
188 * out there with i2c-connected voltage controllers too..
189 */
190 ret = nouveau_voltgpio_init(volt);
191 if (ret == 0) {
192 volt->vid_get = nouveau_voltgpio_get;
193 volt->vid_set = nouveau_voltgpio_set;
194 }
195 }
196
197 return ret;
198}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/volt/gpio.c b/drivers/gpu/drm/nouveau/core/subdev/volt/gpio.c
new file mode 100644
index 000000000000..755fa91bcd09
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/volt/gpio.c
@@ -0,0 +1,96 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/volt.h>
26#include <subdev/gpio.h>
27#include <subdev/bios/gpio.h>
28
29static const u8 tags[] = {
30 DCB_GPIO_VID0, DCB_GPIO_VID1, DCB_GPIO_VID2, DCB_GPIO_VID3,
31 DCB_GPIO_VID4, DCB_GPIO_VID5, DCB_GPIO_VID6, DCB_GPIO_VID7,
32};
33
34int
35nouveau_voltgpio_get(struct nouveau_volt *volt)
36{
37 struct nouveau_gpio *gpio = nouveau_gpio(volt);
38 u8 vid = 0;
39 int i;
40
41 for (i = 0; i < ARRAY_SIZE(tags); i++) {
42 if (volt->vid_mask & (1 << i)) {
43 int ret = gpio->get(gpio, 0, tags[i], 0xff);
44 if (ret < 0)
45 return ret;
46 vid |= ret << i;
47 }
48 }
49
50 return vid;
51}
52
53int
54nouveau_voltgpio_set(struct nouveau_volt *volt, u8 vid)
55{
56 struct nouveau_gpio *gpio = nouveau_gpio(volt);
57 int i;
58
59 for (i = 0; i < ARRAY_SIZE(tags); i++, vid >>= 1) {
60 if (volt->vid_mask & (1 << i)) {
61 int ret = gpio->set(gpio, 0, tags[i], 0xff, vid & 1);
62 if (ret < 0)
63 return ret;
64 }
65 }
66
67 return 0;
68}
69
70int
71nouveau_voltgpio_init(struct nouveau_volt *volt)
72{
73 struct nouveau_gpio *gpio = nouveau_gpio(volt);
74 struct dcb_gpio_func func;
75 int i;
76
77 /* check we have gpio function info for each vid bit. on some
78 * boards (ie. nvs295) the vid mask has more bits than there
79 * are valid gpio functions... from traces, nvidia appear to
80 * just touch the existing ones, so let's mask off the invalid
81 * bits and continue with life
82 */
83 for (i = 0; i < ARRAY_SIZE(tags); i++) {
84 if (volt->vid_mask & (1 << i)) {
85 int ret = gpio->find(gpio, 0, tags[i], 0xff, &func);
86 if (ret) {
87 if (ret != -ENOENT)
88 return ret;
89 nv_debug(volt, "VID bit %d has no GPIO\n", i);
90 volt->vid_mask &= ~(1 << i);
91 }
92 }
93 }
94
95 return 0;
96}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/volt/nv40.c b/drivers/gpu/drm/nouveau/core/subdev/volt/nv40.c
new file mode 100644
index 000000000000..87d5358376a6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/volt/nv40.c
@@ -0,0 +1,56 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/volt.h>
26
27struct nv40_volt_priv {
28 struct nouveau_volt base;
29};
30
31static int
32nv40_volt_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
33 struct nouveau_oclass *oclass, void *data, u32 size,
34 struct nouveau_object **pobject)
35{
36 struct nv40_volt_priv *priv;
37 int ret;
38
39 ret = nouveau_volt_create(parent, engine, oclass, &priv);
40 *pobject = nv_object(priv);
41 if (ret)
42 return ret;
43
44 return 0;
45}
46
47struct nouveau_oclass
48nv40_volt_oclass = {
49 .handle = NV_SUBDEV(VOLT, 0x40),
50 .ofuncs = &(struct nouveau_ofuncs) {
51 .ctor = nv40_volt_ctor,
52 .dtor = _nouveau_volt_dtor,
53 .init = _nouveau_volt_init,
54 .fini = _nouveau_volt_fini,
55 },
56};
diff --git a/drivers/gpu/drm/nouveau/dispnv04/Makefile b/drivers/gpu/drm/nouveau/dispnv04/Makefile
index ea3f5b8a0f95..424a489d0f03 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/Makefile
+++ b/drivers/gpu/drm/nouveau/dispnv04/Makefile
@@ -5,6 +5,7 @@ nouveau-y += dispnv04/dac.o
5nouveau-y += dispnv04/dfp.o 5nouveau-y += dispnv04/dfp.o
6nouveau-y += dispnv04/disp.o 6nouveau-y += dispnv04/disp.o
7nouveau-y += dispnv04/hw.o 7nouveau-y += dispnv04/hw.o
8nouveau-y += dispnv04/overlay.o
8nouveau-y += dispnv04/tvmodesnv17.o 9nouveau-y += dispnv04/tvmodesnv17.o
9nouveau-y += dispnv04/tvnv04.o 10nouveau-y += dispnv04/tvnv04.o
10nouveau-y += dispnv04/tvnv17.o 11nouveau-y += dispnv04/tvnv17.o
diff --git a/drivers/gpu/drm/nouveau/dispnv04/dfp.c b/drivers/gpu/drm/nouveau/dispnv04/dfp.c
index 59d1c040b84f..936a71c59080 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/dfp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/dfp.c
@@ -493,7 +493,7 @@ static void nv04_dfp_update_backlight(struct drm_encoder *encoder, int mode)
493 if (dev->pdev->device == 0x0174 || dev->pdev->device == 0x0179 || 493 if (dev->pdev->device == 0x0174 || dev->pdev->device == 0x0179 ||
494 dev->pdev->device == 0x0189 || dev->pdev->device == 0x0329) { 494 dev->pdev->device == 0x0189 || dev->pdev->device == 0x0329) {
495 if (mode == DRM_MODE_DPMS_ON) { 495 if (mode == DRM_MODE_DPMS_ON) {
496 nv_mask(device, NV_PBUS_DEBUG_DUALHEAD_CTL, 0, 1 << 31); 496 nv_mask(device, NV_PBUS_DEBUG_DUALHEAD_CTL, 1 << 31, 1 << 31);
497 nv_mask(device, NV_PCRTC_GPIO_EXT, 3, 1); 497 nv_mask(device, NV_PCRTC_GPIO_EXT, 3, 1);
498 } else { 498 } else {
499 nv_mask(device, NV_PBUS_DEBUG_DUALHEAD_CTL, 1 << 31, 0); 499 nv_mask(device, NV_PBUS_DEBUG_DUALHEAD_CTL, 1 << 31, 0);
@@ -625,13 +625,15 @@ static void nv04_tmds_slave_init(struct drm_encoder *encoder)
625 struct nouveau_drm *drm = nouveau_drm(dev); 625 struct nouveau_drm *drm = nouveau_drm(dev);
626 struct nouveau_i2c *i2c = nouveau_i2c(drm->device); 626 struct nouveau_i2c *i2c = nouveau_i2c(drm->device);
627 struct nouveau_i2c_port *port = i2c->find(i2c, 2); 627 struct nouveau_i2c_port *port = i2c->find(i2c, 2);
628 struct i2c_board_info info[] = { 628 struct nouveau_i2c_board_info info[] = {
629 { 629 {
630 .type = "sil164", 630 {
631 .addr = (dcb->tmdsconf.slave_addr == 0x7 ? 0x3a : 0x38), 631 .type = "sil164",
632 .platform_data = &(struct sil164_encoder_params) { 632 .addr = (dcb->tmdsconf.slave_addr == 0x7 ? 0x3a : 0x38),
633 SIL164_INPUT_EDGE_RISING 633 .platform_data = &(struct sil164_encoder_params) {
634 } 634 SIL164_INPUT_EDGE_RISING
635 }
636 }, 0
635 }, 637 },
636 { } 638 { }
637 }; 639 };
@@ -646,7 +648,7 @@ static void nv04_tmds_slave_init(struct drm_encoder *encoder)
646 return; 648 return;
647 649
648 drm_i2c_encoder_init(dev, to_encoder_slave(encoder), 650 drm_i2c_encoder_init(dev, to_encoder_slave(encoder),
649 &port->adapter, &info[type]); 651 &port->adapter, &info[type].dev);
650} 652}
651 653
652static const struct drm_encoder_helper_funcs nv04_lvds_helper_funcs = { 654static const struct drm_encoder_helper_funcs nv04_lvds_helper_funcs = {
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.c b/drivers/gpu/drm/nouveau/dispnv04/disp.c
index 4908d3fd0486..b13ff0fc42de 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/disp.c
@@ -140,6 +140,8 @@ nv04_display_create(struct drm_device *dev)
140 func->save(encoder); 140 func->save(encoder);
141 } 141 }
142 142
143 nouveau_overlay_init(dev);
144
143 return 0; 145 return 0;
144} 146}
145 147
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.h b/drivers/gpu/drm/nouveau/dispnv04/disp.h
index 2cf65e0b517e..56a28db04000 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/disp.h
+++ b/drivers/gpu/drm/nouveau/dispnv04/disp.h
@@ -123,6 +123,9 @@ int nv04_tv_create(struct drm_connector *, struct dcb_output *);
123/* nv17_tv.c */ 123/* nv17_tv.c */
124int nv17_tv_create(struct drm_connector *, struct dcb_output *); 124int nv17_tv_create(struct drm_connector *, struct dcb_output *);
125 125
126/* overlay.c */
127void nouveau_overlay_init(struct drm_device *dev);
128
126static inline bool 129static inline bool
127nv_two_heads(struct drm_device *dev) 130nv_two_heads(struct drm_device *dev)
128{ 131{
diff --git a/drivers/gpu/drm/nouveau/dispnv04/hw.c b/drivers/gpu/drm/nouveau/dispnv04/hw.c
index f8dee834527f..aca76af115b3 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/hw.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/hw.c
@@ -27,6 +27,7 @@
27#include "hw.h" 27#include "hw.h"
28 28
29#include <subdev/bios/pll.h> 29#include <subdev/bios/pll.h>
30#include <subdev/fb.h>
30#include <subdev/clock.h> 31#include <subdev/clock.h>
31#include <subdev/timer.h> 32#include <subdev/timer.h>
32 33
@@ -664,6 +665,7 @@ nv_load_state_ext(struct drm_device *dev, int head,
664 struct nouveau_drm *drm = nouveau_drm(dev); 665 struct nouveau_drm *drm = nouveau_drm(dev);
665 struct nouveau_device *device = nv_device(drm->device); 666 struct nouveau_device *device = nv_device(drm->device);
666 struct nouveau_timer *ptimer = nouveau_timer(device); 667 struct nouveau_timer *ptimer = nouveau_timer(device);
668 struct nouveau_fb *pfb = nouveau_fb(device);
667 struct nv04_crtc_reg *regp = &state->crtc_reg[head]; 669 struct nv04_crtc_reg *regp = &state->crtc_reg[head];
668 uint32_t reg900; 670 uint32_t reg900;
669 int i; 671 int i;
@@ -680,10 +682,10 @@ nv_load_state_ext(struct drm_device *dev, int head,
680 nv_wr32(device, NV_PVIDEO_INTR_EN, 0); 682 nv_wr32(device, NV_PVIDEO_INTR_EN, 0);
681 nv_wr32(device, NV_PVIDEO_OFFSET_BUFF(0), 0); 683 nv_wr32(device, NV_PVIDEO_OFFSET_BUFF(0), 0);
682 nv_wr32(device, NV_PVIDEO_OFFSET_BUFF(1), 0); 684 nv_wr32(device, NV_PVIDEO_OFFSET_BUFF(1), 0);
683 nv_wr32(device, NV_PVIDEO_LIMIT(0), 0); //drm->fb_available_size - 1); 685 nv_wr32(device, NV_PVIDEO_LIMIT(0), pfb->ram->size - 1);
684 nv_wr32(device, NV_PVIDEO_LIMIT(1), 0); //drm->fb_available_size - 1); 686 nv_wr32(device, NV_PVIDEO_LIMIT(1), pfb->ram->size - 1);
685 nv_wr32(device, NV_PVIDEO_UVPLANE_LIMIT(0), 0); //drm->fb_available_size - 1); 687 nv_wr32(device, NV_PVIDEO_UVPLANE_LIMIT(0), pfb->ram->size - 1);
686 nv_wr32(device, NV_PVIDEO_UVPLANE_LIMIT(1), 0); //drm->fb_available_size - 1); 688 nv_wr32(device, NV_PVIDEO_UVPLANE_LIMIT(1), pfb->ram->size - 1);
687 nv_wr32(device, NV_PBUS_POWERCTRL_2, 0); 689 nv_wr32(device, NV_PBUS_POWERCTRL_2, 0);
688 690
689 NVWriteCRTC(dev, head, NV_PCRTC_CURSOR_CONFIG, regp->cursor_cfg); 691 NVWriteCRTC(dev, head, NV_PCRTC_CURSOR_CONFIG, regp->cursor_cfg);
@@ -740,7 +742,7 @@ nv_load_state_ext(struct drm_device *dev, int head,
740 } 742 }
741 /* NV11 and NV20 stop at 0x52. */ 743 /* NV11 and NV20 stop at 0x52. */
742 if (nv_gf4_disp_arch(dev)) { 744 if (nv_gf4_disp_arch(dev)) {
743 if (nv_device(drm->device)->card_type == NV_10) { 745 if (nv_device(drm->device)->card_type < NV_20) {
744 /* Not waiting for vertical retrace before modifying 746 /* Not waiting for vertical retrace before modifying
745 CRE_53/CRE_54 causes lockups. */ 747 CRE_53/CRE_54 causes lockups. */
746 nouveau_timer_wait_eq(ptimer, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x8); 748 nouveau_timer_wait_eq(ptimer, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x8);
diff --git a/drivers/gpu/drm/nouveau/dispnv04/overlay.c b/drivers/gpu/drm/nouveau/dispnv04/overlay.c
new file mode 100644
index 000000000000..3618ac6b6316
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv04/overlay.c
@@ -0,0 +1,320 @@
1/*
2 * Copyright 2013 Ilia Mirkin
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
18 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
19 * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
20 * SOFTWARE.
21 *
22 * Implementation based on the pre-KMS implementation in xf86-video-nouveau,
23 * written by Arthur Huillet.
24 */
25
26#include <drm/drmP.h>
27#include <drm/drm_crtc.h>
28#include <drm/drm_fourcc.h>
29
30#include "nouveau_drm.h"
31
32#include "nouveau_bo.h"
33#include "nouveau_connector.h"
34#include "nouveau_display.h"
35#include "nvreg.h"
36
37
38struct nouveau_plane {
39 struct drm_plane base;
40 bool flip;
41 struct nouveau_bo *cur;
42
43 struct {
44 struct drm_property *colorkey;
45 struct drm_property *contrast;
46 struct drm_property *brightness;
47 struct drm_property *hue;
48 struct drm_property *saturation;
49 struct drm_property *iturbt_709;
50 } props;
51
52 int colorkey;
53 int contrast;
54 int brightness;
55 int hue;
56 int saturation;
57 int iturbt_709;
58};
59
60static uint32_t formats[] = {
61 DRM_FORMAT_NV12,
62 DRM_FORMAT_UYVY,
63};
64
65/* Sine can be approximated with
66 * http://en.wikipedia.org/wiki/Bhaskara_I's_sine_approximation_formula
67 * sin(x degrees) ~= 4 x (180 - x) / (40500 - x (180 - x) )
68 * Note that this only works for the range [0, 180].
69 * Also note that sin(x) == -sin(x - 180)
70 */
71static inline int
72sin_mul(int degrees, int factor)
73{
74 if (degrees > 180) {
75 degrees -= 180;
76 factor *= -1;
77 }
78 return factor * 4 * degrees * (180 - degrees) /
79 (40500 - degrees * (180 - degrees));
80}
81
82/* cos(x) = sin(x + 90) */
83static inline int
84cos_mul(int degrees, int factor)
85{
86 return sin_mul((degrees + 90) % 360, factor);
87}
88
89static int
90nv10_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
91 struct drm_framebuffer *fb, int crtc_x, int crtc_y,
92 unsigned int crtc_w, unsigned int crtc_h,
93 uint32_t src_x, uint32_t src_y,
94 uint32_t src_w, uint32_t src_h)
95{
96 struct nouveau_device *dev = nouveau_dev(plane->dev);
97 struct nouveau_plane *nv_plane = (struct nouveau_plane *)plane;
98 struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb);
99 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
100 struct nouveau_bo *cur = nv_plane->cur;
101 bool flip = nv_plane->flip;
102 int format = ALIGN(src_w * 4, 0x100);
103 int soff = NV_PCRTC0_SIZE * nv_crtc->index;
104 int soff2 = NV_PCRTC0_SIZE * !nv_crtc->index;
105 int ret;
106
107 if (format > 0xffff)
108 return -EINVAL;
109
110 ret = nouveau_bo_pin(nv_fb->nvbo, TTM_PL_FLAG_VRAM);
111 if (ret)
112 return ret;
113
114 nv_plane->cur = nv_fb->nvbo;
115
116 /* Source parameters given in 16.16 fixed point, ignore fractional. */
117 src_x = src_x >> 16;
118 src_y = src_y >> 16;
119 src_w = src_w >> 16;
120 src_h = src_h >> 16;
121
122 nv_mask(dev, NV_PCRTC_ENGINE_CTRL + soff, NV_CRTC_FSEL_OVERLAY, NV_CRTC_FSEL_OVERLAY);
123 nv_mask(dev, NV_PCRTC_ENGINE_CTRL + soff2, NV_CRTC_FSEL_OVERLAY, 0);
124
125 nv_wr32(dev, NV_PVIDEO_BASE(flip), 0);
126 nv_wr32(dev, NV_PVIDEO_OFFSET_BUFF(flip), nv_fb->nvbo->bo.offset);
127 nv_wr32(dev, NV_PVIDEO_SIZE_IN(flip), src_h << 16 | src_w);
128 nv_wr32(dev, NV_PVIDEO_POINT_IN(flip), src_y << 16 | src_x);
129 nv_wr32(dev, NV_PVIDEO_DS_DX(flip), (src_w << 20) / crtc_w);
130 nv_wr32(dev, NV_PVIDEO_DT_DY(flip), (src_h << 20) / crtc_h);
131 nv_wr32(dev, NV_PVIDEO_POINT_OUT(flip), crtc_y << 16 | crtc_x);
132 nv_wr32(dev, NV_PVIDEO_SIZE_OUT(flip), crtc_h << 16 | crtc_w);
133
134 if (fb->pixel_format == DRM_FORMAT_NV12) {
135 format |= NV_PVIDEO_FORMAT_COLOR_LE_CR8YB8CB8YA8;
136 format |= NV_PVIDEO_FORMAT_PLANAR;
137 }
138 if (nv_plane->iturbt_709)
139 format |= NV_PVIDEO_FORMAT_MATRIX_ITURBT709;
140 if (nv_plane->colorkey & (1 << 24))
141 format |= NV_PVIDEO_FORMAT_DISPLAY_COLOR_KEY;
142
143 if (fb->pixel_format == DRM_FORMAT_NV12) {
144 nv_wr32(dev, NV_PVIDEO_UVPLANE_BASE(flip), 0);
145 nv_wr32(dev, NV_PVIDEO_UVPLANE_OFFSET_BUFF(flip),
146 nv_fb->nvbo->bo.offset + fb->offsets[1]);
147 }
148 nv_wr32(dev, NV_PVIDEO_FORMAT(flip), format);
149 nv_wr32(dev, NV_PVIDEO_STOP, 0);
150 /* TODO: wait for vblank? */
151 nv_wr32(dev, NV_PVIDEO_BUFFER, flip ? 0x10 : 0x1);
152 nv_plane->flip = !flip;
153
154 if (cur)
155 nouveau_bo_unpin(cur);
156
157 return 0;
158}
159
160static int
161nv10_disable_plane(struct drm_plane *plane)
162{
163 struct nouveau_device *dev = nouveau_dev(plane->dev);
164 struct nouveau_plane *nv_plane = (struct nouveau_plane *)plane;
165
166 nv_wr32(dev, NV_PVIDEO_STOP, 1);
167 if (nv_plane->cur) {
168 nouveau_bo_unpin(nv_plane->cur);
169 nv_plane->cur = NULL;
170 }
171
172 return 0;
173}
174
175static void
176nv10_destroy_plane(struct drm_plane *plane)
177{
178 nv10_disable_plane(plane);
179 drm_plane_cleanup(plane);
180 kfree(plane);
181}
182
183static void
184nv10_set_params(struct nouveau_plane *plane)
185{
186 struct nouveau_device *dev = nouveau_dev(plane->base.dev);
187 u32 luma = (plane->brightness - 512) << 16 | plane->contrast;
188 u32 chroma = ((sin_mul(plane->hue, plane->saturation) & 0xffff) << 16) |
189 (cos_mul(plane->hue, plane->saturation) & 0xffff);
190 u32 format = 0;
191
192 nv_wr32(dev, NV_PVIDEO_LUMINANCE(0), luma);
193 nv_wr32(dev, NV_PVIDEO_LUMINANCE(1), luma);
194 nv_wr32(dev, NV_PVIDEO_CHROMINANCE(0), chroma);
195 nv_wr32(dev, NV_PVIDEO_CHROMINANCE(1), chroma);
196 nv_wr32(dev, NV_PVIDEO_COLOR_KEY, plane->colorkey & 0xffffff);
197
198 if (plane->cur) {
199 if (plane->iturbt_709)
200 format |= NV_PVIDEO_FORMAT_MATRIX_ITURBT709;
201 if (plane->colorkey & (1 << 24))
202 format |= NV_PVIDEO_FORMAT_DISPLAY_COLOR_KEY;
203 nv_mask(dev, NV_PVIDEO_FORMAT(plane->flip),
204 NV_PVIDEO_FORMAT_MATRIX_ITURBT709 |
205 NV_PVIDEO_FORMAT_DISPLAY_COLOR_KEY,
206 format);
207 }
208}
209
210static int
211nv10_set_property(struct drm_plane *plane,
212 struct drm_property *property,
213 uint64_t value)
214{
215 struct nouveau_plane *nv_plane = (struct nouveau_plane *)plane;
216
217 if (property == nv_plane->props.colorkey)
218 nv_plane->colorkey = value;
219 else if (property == nv_plane->props.contrast)
220 nv_plane->contrast = value;
221 else if (property == nv_plane->props.brightness)
222 nv_plane->brightness = value;
223 else if (property == nv_plane->props.hue)
224 nv_plane->hue = value;
225 else if (property == nv_plane->props.saturation)
226 nv_plane->saturation = value;
227 else if (property == nv_plane->props.iturbt_709)
228 nv_plane->iturbt_709 = value;
229 else
230 return -EINVAL;
231
232 nv10_set_params(nv_plane);
233 return 0;
234}
235
236static const struct drm_plane_funcs nv10_plane_funcs = {
237 .update_plane = nv10_update_plane,
238 .disable_plane = nv10_disable_plane,
239 .set_property = nv10_set_property,
240 .destroy = nv10_destroy_plane,
241};
242
243static void
244nv10_overlay_init(struct drm_device *device)
245{
246 struct nouveau_device *dev = nouveau_dev(device);
247 struct nouveau_plane *plane = kzalloc(sizeof(struct nouveau_plane), GFP_KERNEL);
248 int ret;
249
250 if (!plane)
251 return;
252
253 ret = drm_plane_init(device, &plane->base, 3 /* both crtc's */,
254 &nv10_plane_funcs,
255 formats, ARRAY_SIZE(formats), false);
256 if (ret)
257 goto err;
258
259 /* Set up the plane properties */
260 plane->props.colorkey = drm_property_create_range(
261 device, 0, "colorkey", 0, 0x01ffffff);
262 plane->props.contrast = drm_property_create_range(
263 device, 0, "contrast", 0, 8192 - 1);
264 plane->props.brightness = drm_property_create_range(
265 device, 0, "brightness", 0, 1024);
266 plane->props.hue = drm_property_create_range(
267 device, 0, "hue", 0, 359);
268 plane->props.saturation = drm_property_create_range(
269 device, 0, "saturation", 0, 8192 - 1);
270 plane->props.iturbt_709 = drm_property_create_range(
271 device, 0, "iturbt_709", 0, 1);
272 if (!plane->props.colorkey ||
273 !plane->props.contrast ||
274 !plane->props.brightness ||
275 !plane->props.hue ||
276 !plane->props.saturation ||
277 !plane->props.iturbt_709)
278 goto cleanup;
279
280 plane->colorkey = 0;
281 drm_object_attach_property(&plane->base.base,
282 plane->props.colorkey, plane->colorkey);
283
284 plane->contrast = 0x1000;
285 drm_object_attach_property(&plane->base.base,
286 plane->props.contrast, plane->contrast);
287
288 plane->brightness = 512;
289 drm_object_attach_property(&plane->base.base,
290 plane->props.brightness, plane->brightness);
291
292 plane->hue = 0;
293 drm_object_attach_property(&plane->base.base,
294 plane->props.hue, plane->hue);
295
296 plane->saturation = 0x1000;
297 drm_object_attach_property(&plane->base.base,
298 plane->props.saturation, plane->saturation);
299
300 plane->iturbt_709 = 0;
301 drm_object_attach_property(&plane->base.base,
302 plane->props.iturbt_709, plane->iturbt_709);
303
304 nv10_set_params(plane);
305 nv_wr32(dev, NV_PVIDEO_STOP, 1);
306 return;
307cleanup:
308 drm_plane_cleanup(&plane->base);
309err:
310 kfree(plane);
311 nv_error(dev, "Failed to create plane\n");
312}
313
314void
315nouveau_overlay_init(struct drm_device *device)
316{
317 struct nouveau_device *dev = nouveau_dev(device);
318 if (dev->chipset >= 0x10 && dev->chipset <= 0x40)
319 nv10_overlay_init(device);
320}
diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
index bf13db4e8631..cc4b208ce546 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
@@ -37,15 +37,18 @@
37 37
38#include <subdev/i2c.h> 38#include <subdev/i2c.h>
39 39
40static struct i2c_board_info nv04_tv_encoder_info[] = { 40static struct nouveau_i2c_board_info nv04_tv_encoder_info[] = {
41 { 41 {
42 I2C_BOARD_INFO("ch7006", 0x75), 42 {
43 .platform_data = &(struct ch7006_encoder_params) { 43 I2C_BOARD_INFO("ch7006", 0x75),
44 CH7006_FORMAT_RGB24m12I, CH7006_CLOCK_MASTER, 44 .platform_data = &(struct ch7006_encoder_params) {
45 0, 0, 0, 45 CH7006_FORMAT_RGB24m12I, CH7006_CLOCK_MASTER,
46 CH7006_SYNC_SLAVE, CH7006_SYNC_SEPARATED, 46 0, 0, 0,
47 CH7006_POUT_3_3V, CH7006_ACTIVE_HSYNC 47 CH7006_SYNC_SLAVE, CH7006_SYNC_SEPARATED,
48 } 48 CH7006_POUT_3_3V, CH7006_ACTIVE_HSYNC
49 }
50 },
51 0
49 }, 52 },
50 { } 53 { }
51}; 54};
@@ -229,7 +232,8 @@ nv04_tv_create(struct drm_connector *connector, struct dcb_output *entry)
229 232
230 /* Run the slave-specific initialization */ 233 /* Run the slave-specific initialization */
231 ret = drm_i2c_encoder_init(dev, to_encoder_slave(encoder), 234 ret = drm_i2c_encoder_init(dev, to_encoder_slave(encoder),
232 &port->adapter, &nv04_tv_encoder_info[type]); 235 &port->adapter,
236 &nv04_tv_encoder_info[type].dev);
233 if (ret < 0) 237 if (ret < 0)
234 goto fail_cleanup; 238 goto fail_cleanup;
235 239
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
index 72055a35f845..3621e7f23477 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -87,6 +87,7 @@ nouveau_abi16_swclass(struct nouveau_drm *drm)
87 case NV_04: 87 case NV_04:
88 return 0x006e; 88 return 0x006e;
89 case NV_10: 89 case NV_10:
90 case NV_11:
90 case NV_20: 91 case NV_20:
91 case NV_30: 92 case NV_30:
92 case NV_40: 93 case NV_40:
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index dd7d2e182719..f9a2df29a593 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -317,6 +317,16 @@ static bool nouveau_dsm_detect(void)
317 has_optimus = 1; 317 has_optimus = 1;
318 } 318 }
319 319
320 while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_3D << 8, pdev)) != NULL) {
321 vga_count++;
322
323 retval = nouveau_dsm_pci_probe(pdev);
324 if (retval & NOUVEAU_DSM_HAS_MUX)
325 has_dsm |= 1;
326 if (retval & NOUVEAU_DSM_HAS_OPT)
327 has_optimus = 1;
328 }
329
320 /* find the optimus DSM or the old v1 DSM */ 330 /* find the optimus DSM or the old v1 DSM */
321 if (has_optimus == 1) { 331 if (has_optimus == 1) {
322 acpi_get_name(nouveau_dsm_priv.dhandle, ACPI_FULL_PATHNAME, 332 acpi_get_name(nouveau_dsm_priv.dhandle, ACPI_FULL_PATHNAME,
diff --git a/drivers/gpu/drm/nouveau/nouveau_agp.c b/drivers/gpu/drm/nouveau/nouveau_agp.c
index 6e7a55f93a85..2953c4e91e1a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_agp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_agp.c
@@ -11,10 +11,28 @@ MODULE_PARM_DESC(agpmode, "AGP mode (0 to disable AGP)");
11static int nouveau_agpmode = -1; 11static int nouveau_agpmode = -1;
12module_param_named(agpmode, nouveau_agpmode, int, 0400); 12module_param_named(agpmode, nouveau_agpmode, int, 0400);
13 13
14struct nouveau_agpmode_quirk {
15 u16 hostbridge_vendor;
16 u16 hostbridge_device;
17 u16 chip_vendor;
18 u16 chip_device;
19 int mode;
20};
21
22static struct nouveau_agpmode_quirk nouveau_agpmode_quirk_list[] = {
23 /* VIA Apollo PRO133x / GeForce FX 5600 Ultra, max agpmode 2, fdo #20341 */
24 { PCI_VENDOR_ID_VIA, 0x0691, PCI_VENDOR_ID_NVIDIA, 0x0311, 2 },
25
26 {},
27};
28
14static unsigned long 29static unsigned long
15get_agp_mode(struct nouveau_drm *drm, unsigned long mode) 30get_agp_mode(struct nouveau_drm *drm, const struct drm_agp_info *info)
16{ 31{
17 struct nouveau_device *device = nv_device(drm->device); 32 struct nouveau_device *device = nv_device(drm->device);
33 struct nouveau_agpmode_quirk *quirk = nouveau_agpmode_quirk_list;
34 int agpmode = nouveau_agpmode;
35 unsigned long mode = info->mode;
18 36
19 /* 37 /*
20 * FW seems to be broken on nv18, it makes the card lock up 38 * FW seems to be broken on nv18, it makes the card lock up
@@ -24,11 +42,27 @@ get_agp_mode(struct nouveau_drm *drm, unsigned long mode)
24 mode &= ~PCI_AGP_COMMAND_FW; 42 mode &= ~PCI_AGP_COMMAND_FW;
25 43
26 /* 44 /*
45 * Go through the quirks list and adjust the agpmode accordingly.
46 */
47 while (agpmode == -1 && quirk->hostbridge_vendor) {
48 if (info->id_vendor == quirk->hostbridge_vendor &&
49 info->id_device == quirk->hostbridge_device &&
50 device->pdev->vendor == quirk->chip_vendor &&
51 device->pdev->device == quirk->chip_device) {
52 agpmode = quirk->mode;
53 nv_info(device, "Forcing agp mode to %dX. Use agpmode to override.\n",
54 agpmode);
55 break;
56 }
57 ++quirk;
58 }
59
60 /*
27 * AGP mode set in the command line. 61 * AGP mode set in the command line.
28 */ 62 */
29 if (nouveau_agpmode > 0) { 63 if (agpmode > 0) {
30 bool agpv3 = mode & 0x8; 64 bool agpv3 = mode & 0x8;
31 int rate = agpv3 ? nouveau_agpmode / 4 : nouveau_agpmode; 65 int rate = agpv3 ? agpmode / 4 : agpmode;
32 66
33 mode = (mode & ~0x7) | (rate & 0x7); 67 mode = (mode & ~0x7) | (rate & 0x7);
34 } 68 }
@@ -90,7 +124,7 @@ nouveau_agp_reset(struct nouveau_drm *drm)
90 if (ret) 124 if (ret)
91 return; 125 return;
92 126
93 mode.mode = get_agp_mode(drm, info.mode); 127 mode.mode = get_agp_mode(drm, &info);
94 mode.mode &= ~PCI_AGP_COMMAND_FW; 128 mode.mode &= ~PCI_AGP_COMMAND_FW;
95 129
96 ret = drm_agp_enable(dev, mode); 130 ret = drm_agp_enable(dev, mode);
@@ -139,7 +173,7 @@ nouveau_agp_init(struct nouveau_drm *drm)
139 } 173 }
140 174
141 /* see agp.h for the AGPSTAT_* modes available */ 175 /* see agp.h for the AGPSTAT_* modes available */
142 mode.mode = get_agp_mode(drm, info.mode); 176 mode.mode = get_agp_mode(drm, &info);
143 177
144 ret = drm_agp_enable(dev, mode); 178 ret = drm_agp_enable(dev, mode);
145 if (ret) { 179 if (ret) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c
index 2ffad2176b7f..630f6e84fc01 100644
--- a/drivers/gpu/drm/nouveau/nouveau_backlight.c
+++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c
@@ -82,7 +82,7 @@ nv40_backlight_init(struct drm_connector *connector)
82 memset(&props, 0, sizeof(struct backlight_properties)); 82 memset(&props, 0, sizeof(struct backlight_properties));
83 props.type = BACKLIGHT_RAW; 83 props.type = BACKLIGHT_RAW;
84 props.max_brightness = 31; 84 props.max_brightness = 31;
85 bd = backlight_device_register("nv_backlight", &connector->kdev, drm, 85 bd = backlight_device_register("nv_backlight", connector->kdev, drm,
86 &nv40_bl_ops, &props); 86 &nv40_bl_ops, &props);
87 if (IS_ERR(bd)) 87 if (IS_ERR(bd))
88 return PTR_ERR(bd); 88 return PTR_ERR(bd);
@@ -204,7 +204,7 @@ nv50_backlight_init(struct drm_connector *connector)
204 memset(&props, 0, sizeof(struct backlight_properties)); 204 memset(&props, 0, sizeof(struct backlight_properties));
205 props.type = BACKLIGHT_RAW; 205 props.type = BACKLIGHT_RAW;
206 props.max_brightness = 100; 206 props.max_brightness = 100;
207 bd = backlight_device_register("nv_backlight", &connector->kdev, 207 bd = backlight_device_register("nv_backlight", connector->kdev,
208 nv_encoder, ops, &props); 208 nv_encoder, ops, &props);
209 if (IS_ERR(bd)) 209 if (IS_ERR(bd))
210 return PTR_ERR(bd); 210 return PTR_ERR(bd);
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 4172854d4365..949ab0cbc4ab 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -269,7 +269,8 @@ set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
269 struct nouveau_fb *pfb = nouveau_fb(drm->device); 269 struct nouveau_fb *pfb = nouveau_fb(drm->device);
270 u32 vram_pages = pfb->ram->size >> PAGE_SHIFT; 270 u32 vram_pages = pfb->ram->size >> PAGE_SHIFT;
271 271
272 if (nv_device(drm->device)->card_type == NV_10 && 272 if ((nv_device(drm->device)->card_type == NV_10 ||
273 nv_device(drm->device)->card_type == NV_11) &&
273 nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) && 274 nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) &&
274 nvbo->bo.mem.num_pages < vram_pages / 4) { 275 nvbo->bo.mem.num_pages < vram_pages / 4) {
275 /* 276 /*
@@ -982,7 +983,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
982 bool no_wait_gpu, struct ttm_mem_reg *new_mem) 983 bool no_wait_gpu, struct ttm_mem_reg *new_mem)
983{ 984{
984 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); 985 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
985 struct nouveau_channel *chan = chan = drm->ttm.chan; 986 struct nouveau_channel *chan = drm->ttm.chan;
986 struct nouveau_bo *nvbo = nouveau_bo(bo); 987 struct nouveau_bo *nvbo = nouveau_bo(bo);
987 struct ttm_mem_reg *old_mem = &bo->mem; 988 struct ttm_mem_reg *old_mem = &bo->mem;
988 int ret; 989 int ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 2136d0038252..1674882d60d5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -100,6 +100,7 @@ static void
100nouveau_connector_destroy(struct drm_connector *connector) 100nouveau_connector_destroy(struct drm_connector *connector)
101{ 101{
102 struct nouveau_connector *nv_connector = nouveau_connector(connector); 102 struct nouveau_connector *nv_connector = nouveau_connector(connector);
103 nouveau_event_ref(NULL, &nv_connector->hpd_func);
103 kfree(nv_connector->edid); 104 kfree(nv_connector->edid);
104 drm_sysfs_connector_remove(connector); 105 drm_sysfs_connector_remove(connector);
105 drm_connector_cleanup(connector); 106 drm_connector_cleanup(connector);
@@ -214,9 +215,10 @@ nouveau_connector_set_encoder(struct drm_connector *connector,
214 } else { 215 } else {
215 connector->doublescan_allowed = true; 216 connector->doublescan_allowed = true;
216 if (nv_device(drm->device)->card_type == NV_20 || 217 if (nv_device(drm->device)->card_type == NV_20 ||
217 (nv_device(drm->device)->card_type == NV_10 && 218 ((nv_device(drm->device)->card_type == NV_10 ||
218 (dev->pdev->device & 0x0ff0) != 0x0100 && 219 nv_device(drm->device)->card_type == NV_11) &&
219 (dev->pdev->device & 0x0ff0) != 0x0150)) 220 (dev->pdev->device & 0x0ff0) != 0x0100 &&
221 (dev->pdev->device & 0x0ff0) != 0x0150))
220 /* HW is broken */ 222 /* HW is broken */
221 connector->interlace_allowed = false; 223 connector->interlace_allowed = false;
222 else 224 else
@@ -932,10 +934,9 @@ nouveau_connector_hotplug_work(struct work_struct *work)
932} 934}
933 935
934static int 936static int
935nouveau_connector_hotplug(struct nouveau_eventh *event, int index) 937nouveau_connector_hotplug(void *data, int index)
936{ 938{
937 struct nouveau_connector *nv_connector = 939 struct nouveau_connector *nv_connector = data;
938 container_of(event, struct nouveau_connector, hpd_func);
939 schedule_work(&nv_connector->hpd_work); 940 schedule_work(&nv_connector->hpd_work);
940 return NVKM_EVENT_KEEP; 941 return NVKM_EVENT_KEEP;
941} 942}
@@ -1007,10 +1008,16 @@ nouveau_connector_create(struct drm_device *dev, int index)
1007 1008
1008 ret = gpio->find(gpio, 0, hpd[ffs((entry & 0x07033000) >> 12)], 1009 ret = gpio->find(gpio, 0, hpd[ffs((entry & 0x07033000) >> 12)],
1009 DCB_GPIO_UNUSED, &nv_connector->hpd); 1010 DCB_GPIO_UNUSED, &nv_connector->hpd);
1010 nv_connector->hpd_func.func = nouveau_connector_hotplug;
1011 if (ret) 1011 if (ret)
1012 nv_connector->hpd.func = DCB_GPIO_UNUSED; 1012 nv_connector->hpd.func = DCB_GPIO_UNUSED;
1013 1013
1014 if (nv_connector->hpd.func != DCB_GPIO_UNUSED) {
1015 nouveau_event_new(gpio->events, nv_connector->hpd.line,
1016 nouveau_connector_hotplug,
1017 nv_connector,
1018 &nv_connector->hpd_func);
1019 }
1020
1014 nv_connector->type = nv_connector->dcb[0]; 1021 nv_connector->type = nv_connector->dcb[0];
1015 if (drm_conntype_from_dcb(nv_connector->type) == 1022 if (drm_conntype_from_dcb(nv_connector->type) ==
1016 DRM_MODE_CONNECTOR_Unknown) { 1023 DRM_MODE_CONNECTOR_Unknown) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.h b/drivers/gpu/drm/nouveau/nouveau_connector.h
index 6e399aad491a..264a778f473b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.h
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.h
@@ -69,7 +69,7 @@ struct nouveau_connector {
69 69
70 struct dcb_gpio_func hpd; 70 struct dcb_gpio_func hpd;
71 struct work_struct hpd_work; 71 struct work_struct hpd_work;
72 struct nouveau_eventh hpd_func; 72 struct nouveau_eventh *hpd_func;
73 73
74 int dithering_mode; 74 int dithering_mode;
75 int dithering_depth; 75 int dithering_depth;
@@ -107,7 +107,4 @@ nouveau_crtc_connector_get(struct nouveau_crtc *nv_crtc)
107struct drm_connector * 107struct drm_connector *
108nouveau_connector_create(struct drm_device *, int index); 108nouveau_connector_create(struct drm_device *, int index);
109 109
110int
111nouveau_connector_bpp(struct drm_connector *);
112
113#endif /* __NOUVEAU_CONNECTOR_H__ */ 110#endif /* __NOUVEAU_CONNECTOR_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index bdd5cf71a24c..44642d9094e6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -38,12 +38,85 @@
38 38
39#include "nouveau_fence.h" 39#include "nouveau_fence.h"
40 40
41#include <subdev/bios/gpio.h>
42#include <subdev/gpio.h>
43#include <engine/disp.h> 41#include <engine/disp.h>
44 42
45#include <core/class.h> 43#include <core/class.h>
46 44
45static int
46nouveau_display_vblank_handler(void *data, int head)
47{
48 struct nouveau_drm *drm = data;
49 drm_handle_vblank(drm->dev, head);
50 return NVKM_EVENT_KEEP;
51}
52
53int
54nouveau_display_vblank_enable(struct drm_device *dev, int head)
55{
56 struct nouveau_display *disp = nouveau_display(dev);
57 if (disp) {
58 nouveau_event_get(disp->vblank[head]);
59 return 0;
60 }
61 return -EIO;
62}
63
64void
65nouveau_display_vblank_disable(struct drm_device *dev, int head)
66{
67 struct nouveau_display *disp = nouveau_display(dev);
68 if (disp)
69 nouveau_event_put(disp->vblank[head]);
70}
71
72static void
73nouveau_display_vblank_fini(struct drm_device *dev)
74{
75 struct nouveau_display *disp = nouveau_display(dev);
76 int i;
77
78 if (disp->vblank) {
79 for (i = 0; i < dev->mode_config.num_crtc; i++)
80 nouveau_event_ref(NULL, &disp->vblank[i]);
81 kfree(disp->vblank);
82 disp->vblank = NULL;
83 }
84
85 drm_vblank_cleanup(dev);
86}
87
88static int
89nouveau_display_vblank_init(struct drm_device *dev)
90{
91 struct nouveau_display *disp = nouveau_display(dev);
92 struct nouveau_drm *drm = nouveau_drm(dev);
93 struct nouveau_disp *pdisp = nouveau_disp(drm->device);
94 int ret, i;
95
96 disp->vblank = kzalloc(dev->mode_config.num_crtc *
97 sizeof(*disp->vblank), GFP_KERNEL);
98 if (!disp->vblank)
99 return -ENOMEM;
100
101 for (i = 0; i < dev->mode_config.num_crtc; i++) {
102 ret = nouveau_event_new(pdisp->vblank, i,
103 nouveau_display_vblank_handler,
104 drm, &disp->vblank[i]);
105 if (ret) {
106 nouveau_display_vblank_fini(dev);
107 return ret;
108 }
109 }
110
111 ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
112 if (ret) {
113 nouveau_display_vblank_fini(dev);
114 return ret;
115 }
116
117 return 0;
118}
119
47static void 120static void
48nouveau_user_framebuffer_destroy(struct drm_framebuffer *drm_fb) 121nouveau_user_framebuffer_destroy(struct drm_framebuffer *drm_fb)
49{ 122{
@@ -227,9 +300,7 @@ static struct nouveau_drm_prop_enum_list dither_depth[] = {
227int 300int
228nouveau_display_init(struct drm_device *dev) 301nouveau_display_init(struct drm_device *dev)
229{ 302{
230 struct nouveau_drm *drm = nouveau_drm(dev);
231 struct nouveau_display *disp = nouveau_display(dev); 303 struct nouveau_display *disp = nouveau_display(dev);
232 struct nouveau_gpio *gpio = nouveau_gpio(drm->device);
233 struct drm_connector *connector; 304 struct drm_connector *connector;
234 int ret; 305 int ret;
235 306
@@ -243,10 +314,7 @@ nouveau_display_init(struct drm_device *dev)
243 /* enable hotplug interrupts */ 314 /* enable hotplug interrupts */
244 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 315 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
245 struct nouveau_connector *conn = nouveau_connector(connector); 316 struct nouveau_connector *conn = nouveau_connector(connector);
246 if (gpio && conn->hpd.func != DCB_GPIO_UNUSED) { 317 if (conn->hpd_func) nouveau_event_get(conn->hpd_func);
247 nouveau_event_get(gpio->events, conn->hpd.line,
248 &conn->hpd_func);
249 }
250 } 318 }
251 319
252 return ret; 320 return ret;
@@ -255,18 +323,13 @@ nouveau_display_init(struct drm_device *dev)
255void 323void
256nouveau_display_fini(struct drm_device *dev) 324nouveau_display_fini(struct drm_device *dev)
257{ 325{
258 struct nouveau_drm *drm = nouveau_drm(dev);
259 struct nouveau_display *disp = nouveau_display(dev); 326 struct nouveau_display *disp = nouveau_display(dev);
260 struct nouveau_gpio *gpio = nouveau_gpio(drm->device);
261 struct drm_connector *connector; 327 struct drm_connector *connector;
262 328
263 /* disable hotplug interrupts */ 329 /* disable hotplug interrupts */
264 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 330 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
265 struct nouveau_connector *conn = nouveau_connector(connector); 331 struct nouveau_connector *conn = nouveau_connector(connector);
266 if (gpio && conn->hpd.func != DCB_GPIO_UNUSED) { 332 if (conn->hpd_func) nouveau_event_put(conn->hpd_func);
267 nouveau_event_put(gpio->events, conn->hpd.line,
268 &conn->hpd_func);
269 }
270 } 333 }
271 334
272 drm_kms_helper_poll_disable(dev); 335 drm_kms_helper_poll_disable(dev);
@@ -352,7 +415,7 @@ nouveau_display_create(struct drm_device *dev)
352 goto disp_create_err; 415 goto disp_create_err;
353 416
354 if (dev->mode_config.num_crtc) { 417 if (dev->mode_config.num_crtc) {
355 ret = drm_vblank_init(dev, dev->mode_config.num_crtc); 418 ret = nouveau_display_vblank_init(dev);
356 if (ret) 419 if (ret)
357 goto vblank_err; 420 goto vblank_err;
358 } 421 }
@@ -374,7 +437,7 @@ nouveau_display_destroy(struct drm_device *dev)
374 struct nouveau_display *disp = nouveau_display(dev); 437 struct nouveau_display *disp = nouveau_display(dev);
375 438
376 nouveau_backlight_exit(dev); 439 nouveau_backlight_exit(dev);
377 drm_vblank_cleanup(dev); 440 nouveau_display_vblank_fini(dev);
378 441
379 drm_kms_helper_poll_fini(dev); 442 drm_kms_helper_poll_fini(dev);
380 drm_mode_config_cleanup(dev); 443 drm_mode_config_cleanup(dev);
@@ -394,7 +457,7 @@ nouveau_display_suspend(struct drm_device *dev)
394 457
395 nouveau_display_fini(dev); 458 nouveau_display_fini(dev);
396 459
397 NV_SUSPEND(drm, "unpinning framebuffer(s)...\n"); 460 NV_INFO(drm, "unpinning framebuffer(s)...\n");
398 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 461 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
399 struct nouveau_framebuffer *nouveau_fb; 462 struct nouveau_framebuffer *nouveau_fb;
400 463
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.h b/drivers/gpu/drm/nouveau/nouveau_display.h
index 025c66f8e0ed..8bc8bab90e8d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.h
+++ b/drivers/gpu/drm/nouveau/nouveau_display.h
@@ -36,6 +36,8 @@ struct nouveau_display {
36 int (*init)(struct drm_device *); 36 int (*init)(struct drm_device *);
37 void (*fini)(struct drm_device *); 37 void (*fini)(struct drm_device *);
38 38
39 struct nouveau_eventh **vblank;
40
39 struct drm_property *dithering_mode; 41 struct drm_property *dithering_mode;
40 struct drm_property *dithering_depth; 42 struct drm_property *dithering_depth;
41 struct drm_property *underscan_property; 43 struct drm_property *underscan_property;
@@ -59,6 +61,8 @@ void nouveau_display_fini(struct drm_device *dev);
59int nouveau_display_suspend(struct drm_device *dev); 61int nouveau_display_suspend(struct drm_device *dev);
60void nouveau_display_repin(struct drm_device *dev); 62void nouveau_display_repin(struct drm_device *dev);
61void nouveau_display_resume(struct drm_device *dev); 63void nouveau_display_resume(struct drm_device *dev);
64int nouveau_display_vblank_enable(struct drm_device *, int);
65void nouveau_display_vblank_disable(struct drm_device *, int);
62 66
63int nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, 67int nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
64 struct drm_pending_vblank_event *event, 68 struct drm_pending_vblank_event *event,
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 428d818be775..2418b0de589e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -46,7 +46,8 @@
46#include "nouveau_gem.h" 46#include "nouveau_gem.h"
47#include "nouveau_agp.h" 47#include "nouveau_agp.h"
48#include "nouveau_vga.h" 48#include "nouveau_vga.h"
49#include "nouveau_pm.h" 49#include "nouveau_sysfs.h"
50#include "nouveau_hwmon.h"
50#include "nouveau_acpi.h" 51#include "nouveau_acpi.h"
51#include "nouveau_bios.h" 52#include "nouveau_bios.h"
52#include "nouveau_ioctl.h" 53#include "nouveau_ioctl.h"
@@ -78,41 +79,6 @@ module_param_named(runpm, nouveau_runtime_pm, int, 0400);
78 79
79static struct drm_driver driver; 80static struct drm_driver driver;
80 81
81static int
82nouveau_drm_vblank_handler(struct nouveau_eventh *event, int head)
83{
84 struct nouveau_drm *drm =
85 container_of(event, struct nouveau_drm, vblank[head]);
86 drm_handle_vblank(drm->dev, head);
87 return NVKM_EVENT_KEEP;
88}
89
90static int
91nouveau_drm_vblank_enable(struct drm_device *dev, int head)
92{
93 struct nouveau_drm *drm = nouveau_drm(dev);
94 struct nouveau_disp *pdisp = nouveau_disp(drm->device);
95
96 if (WARN_ON_ONCE(head > ARRAY_SIZE(drm->vblank)))
97 return -EIO;
98 WARN_ON_ONCE(drm->vblank[head].func);
99 drm->vblank[head].func = nouveau_drm_vblank_handler;
100 nouveau_event_get(pdisp->vblank, head, &drm->vblank[head]);
101 return 0;
102}
103
104static void
105nouveau_drm_vblank_disable(struct drm_device *dev, int head)
106{
107 struct nouveau_drm *drm = nouveau_drm(dev);
108 struct nouveau_disp *pdisp = nouveau_disp(drm->device);
109 if (drm->vblank[head].func)
110 nouveau_event_put(pdisp->vblank, head, &drm->vblank[head]);
111 else
112 WARN_ON_ONCE(1);
113 drm->vblank[head].func = NULL;
114}
115
116static u64 82static u64
117nouveau_name(struct pci_dev *pdev) 83nouveau_name(struct pci_dev *pdev)
118{ 84{
@@ -177,7 +143,8 @@ nouveau_accel_init(struct nouveau_drm *drm)
177 143
178 /* initialise synchronisation routines */ 144 /* initialise synchronisation routines */
179 if (device->card_type < NV_10) ret = nv04_fence_create(drm); 145 if (device->card_type < NV_10) ret = nv04_fence_create(drm);
180 else if (device->chipset < 0x17) ret = nv10_fence_create(drm); 146 else if (device->card_type < NV_11 ||
147 device->chipset < 0x17) ret = nv10_fence_create(drm);
181 else if (device->card_type < NV_50) ret = nv17_fence_create(drm); 148 else if (device->card_type < NV_50) ret = nv17_fence_create(drm);
182 else if (device->chipset < 0x84) ret = nv50_fence_create(drm); 149 else if (device->chipset < 0x84) ret = nv50_fence_create(drm);
183 else if (device->card_type < NV_C0) ret = nv84_fence_create(drm); 150 else if (device->card_type < NV_C0) ret = nv84_fence_create(drm);
@@ -418,8 +385,8 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
418 goto fail_dispinit; 385 goto fail_dispinit;
419 } 386 }
420 387
421 nouveau_pm_init(dev); 388 nouveau_sysfs_init(dev);
422 389 nouveau_hwmon_init(dev);
423 nouveau_accel_init(drm); 390 nouveau_accel_init(drm);
424 nouveau_fbcon_init(dev); 391 nouveau_fbcon_init(dev);
425 392
@@ -455,8 +422,8 @@ nouveau_drm_unload(struct drm_device *dev)
455 pm_runtime_get_sync(dev->dev); 422 pm_runtime_get_sync(dev->dev);
456 nouveau_fbcon_fini(dev); 423 nouveau_fbcon_fini(dev);
457 nouveau_accel_fini(drm); 424 nouveau_accel_fini(drm);
458 425 nouveau_hwmon_fini(dev);
459 nouveau_pm_fini(dev); 426 nouveau_sysfs_fini(dev);
460 427
461 if (dev->mode_config.num_crtc) 428 if (dev->mode_config.num_crtc)
462 nouveau_display_fini(dev); 429 nouveau_display_fini(dev);
@@ -496,16 +463,16 @@ nouveau_do_suspend(struct drm_device *dev)
496 int ret; 463 int ret;
497 464
498 if (dev->mode_config.num_crtc) { 465 if (dev->mode_config.num_crtc) {
499 NV_SUSPEND(drm, "suspending display...\n"); 466 NV_INFO(drm, "suspending display...\n");
500 ret = nouveau_display_suspend(dev); 467 ret = nouveau_display_suspend(dev);
501 if (ret) 468 if (ret)
502 return ret; 469 return ret;
503 } 470 }
504 471
505 NV_SUSPEND(drm, "evicting buffers...\n"); 472 NV_INFO(drm, "evicting buffers...\n");
506 ttm_bo_evict_mm(&drm->ttm.bdev, TTM_PL_VRAM); 473 ttm_bo_evict_mm(&drm->ttm.bdev, TTM_PL_VRAM);
507 474
508 NV_SUSPEND(drm, "waiting for kernel channels to go idle...\n"); 475 NV_INFO(drm, "waiting for kernel channels to go idle...\n");
509 if (drm->cechan) { 476 if (drm->cechan) {
510 ret = nouveau_channel_idle(drm->cechan); 477 ret = nouveau_channel_idle(drm->cechan);
511 if (ret) 478 if (ret)
@@ -518,7 +485,7 @@ nouveau_do_suspend(struct drm_device *dev)
518 return ret; 485 return ret;
519 } 486 }
520 487
521 NV_SUSPEND(drm, "suspending client object trees...\n"); 488 NV_INFO(drm, "suspending client object trees...\n");
522 if (drm->fence && nouveau_fence(drm)->suspend) { 489 if (drm->fence && nouveau_fence(drm)->suspend) {
523 if (!nouveau_fence(drm)->suspend(drm)) 490 if (!nouveau_fence(drm)->suspend(drm))
524 return -ENOMEM; 491 return -ENOMEM;
@@ -530,7 +497,7 @@ nouveau_do_suspend(struct drm_device *dev)
530 goto fail_client; 497 goto fail_client;
531 } 498 }
532 499
533 NV_SUSPEND(drm, "suspending kernel object tree...\n"); 500 NV_INFO(drm, "suspending kernel object tree...\n");
534 ret = nouveau_client_fini(&drm->client.base, true); 501 ret = nouveau_client_fini(&drm->client.base, true);
535 if (ret) 502 if (ret)
536 goto fail_client; 503 goto fail_client;
@@ -544,7 +511,7 @@ fail_client:
544 } 511 }
545 512
546 if (dev->mode_config.num_crtc) { 513 if (dev->mode_config.num_crtc) {
547 NV_SUSPEND(drm, "resuming display...\n"); 514 NV_INFO(drm, "resuming display...\n");
548 nouveau_display_resume(dev); 515 nouveau_display_resume(dev);
549 } 516 }
550 return ret; 517 return ret;
@@ -563,7 +530,6 @@ int nouveau_pmops_suspend(struct device *dev)
563 if (drm_dev->mode_config.num_crtc) 530 if (drm_dev->mode_config.num_crtc)
564 nouveau_fbcon_set_suspend(drm_dev, 1); 531 nouveau_fbcon_set_suspend(drm_dev, 1);
565 532
566 nv_suspend_set_printk_level(NV_DBG_INFO);
567 ret = nouveau_do_suspend(drm_dev); 533 ret = nouveau_do_suspend(drm_dev);
568 if (ret) 534 if (ret)
569 return ret; 535 return ret;
@@ -571,8 +537,6 @@ int nouveau_pmops_suspend(struct device *dev)
571 pci_save_state(pdev); 537 pci_save_state(pdev);
572 pci_disable_device(pdev); 538 pci_disable_device(pdev);
573 pci_set_power_state(pdev, PCI_D3hot); 539 pci_set_power_state(pdev, PCI_D3hot);
574 nv_suspend_set_printk_level(NV_DBG_DEBUG);
575
576 return 0; 540 return 0;
577} 541}
578 542
@@ -582,15 +546,15 @@ nouveau_do_resume(struct drm_device *dev)
582 struct nouveau_drm *drm = nouveau_drm(dev); 546 struct nouveau_drm *drm = nouveau_drm(dev);
583 struct nouveau_cli *cli; 547 struct nouveau_cli *cli;
584 548
585 NV_SUSPEND(drm, "re-enabling device...\n"); 549 NV_INFO(drm, "re-enabling device...\n");
586 550
587 nouveau_agp_reset(drm); 551 nouveau_agp_reset(drm);
588 552
589 NV_SUSPEND(drm, "resuming kernel object tree...\n"); 553 NV_INFO(drm, "resuming kernel object tree...\n");
590 nouveau_client_init(&drm->client.base); 554 nouveau_client_init(&drm->client.base);
591 nouveau_agp_init(drm); 555 nouveau_agp_init(drm);
592 556
593 NV_SUSPEND(drm, "resuming client object trees...\n"); 557 NV_INFO(drm, "resuming client object trees...\n");
594 if (drm->fence && nouveau_fence(drm)->resume) 558 if (drm->fence && nouveau_fence(drm)->resume)
595 nouveau_fence(drm)->resume(drm); 559 nouveau_fence(drm)->resume(drm);
596 560
@@ -599,10 +563,9 @@ nouveau_do_resume(struct drm_device *dev)
599 } 563 }
600 564
601 nouveau_run_vbios_init(dev); 565 nouveau_run_vbios_init(dev);
602 nouveau_pm_resume(dev);
603 566
604 if (dev->mode_config.num_crtc) { 567 if (dev->mode_config.num_crtc) {
605 NV_SUSPEND(drm, "resuming display...\n"); 568 NV_INFO(drm, "resuming display...\n");
606 nouveau_display_repin(dev); 569 nouveau_display_repin(dev);
607 } 570 }
608 571
@@ -626,19 +589,15 @@ int nouveau_pmops_resume(struct device *dev)
626 return ret; 589 return ret;
627 pci_set_master(pdev); 590 pci_set_master(pdev);
628 591
629 nv_suspend_set_printk_level(NV_DBG_INFO);
630 ret = nouveau_do_resume(drm_dev); 592 ret = nouveau_do_resume(drm_dev);
631 if (ret) { 593 if (ret)
632 nv_suspend_set_printk_level(NV_DBG_DEBUG);
633 return ret; 594 return ret;
634 }
635 if (drm_dev->mode_config.num_crtc) 595 if (drm_dev->mode_config.num_crtc)
636 nouveau_fbcon_set_suspend(drm_dev, 0); 596 nouveau_fbcon_set_suspend(drm_dev, 0);
637 597
638 nouveau_fbcon_zfill_all(drm_dev); 598 nouveau_fbcon_zfill_all(drm_dev);
639 if (drm_dev->mode_config.num_crtc) 599 if (drm_dev->mode_config.num_crtc)
640 nouveau_display_resume(drm_dev); 600 nouveau_display_resume(drm_dev);
641 nv_suspend_set_printk_level(NV_DBG_DEBUG);
642 return 0; 601 return 0;
643} 602}
644 603
@@ -648,12 +607,10 @@ static int nouveau_pmops_freeze(struct device *dev)
648 struct drm_device *drm_dev = pci_get_drvdata(pdev); 607 struct drm_device *drm_dev = pci_get_drvdata(pdev);
649 int ret; 608 int ret;
650 609
651 nv_suspend_set_printk_level(NV_DBG_INFO);
652 if (drm_dev->mode_config.num_crtc) 610 if (drm_dev->mode_config.num_crtc)
653 nouveau_fbcon_set_suspend(drm_dev, 1); 611 nouveau_fbcon_set_suspend(drm_dev, 1);
654 612
655 ret = nouveau_do_suspend(drm_dev); 613 ret = nouveau_do_suspend(drm_dev);
656 nv_suspend_set_printk_level(NV_DBG_DEBUG);
657 return ret; 614 return ret;
658} 615}
659 616
@@ -663,18 +620,14 @@ static int nouveau_pmops_thaw(struct device *dev)
663 struct drm_device *drm_dev = pci_get_drvdata(pdev); 620 struct drm_device *drm_dev = pci_get_drvdata(pdev);
664 int ret; 621 int ret;
665 622
666 nv_suspend_set_printk_level(NV_DBG_INFO);
667 ret = nouveau_do_resume(drm_dev); 623 ret = nouveau_do_resume(drm_dev);
668 if (ret) { 624 if (ret)
669 nv_suspend_set_printk_level(NV_DBG_DEBUG);
670 return ret; 625 return ret;
671 }
672 if (drm_dev->mode_config.num_crtc) 626 if (drm_dev->mode_config.num_crtc)
673 nouveau_fbcon_set_suspend(drm_dev, 0); 627 nouveau_fbcon_set_suspend(drm_dev, 0);
674 nouveau_fbcon_zfill_all(drm_dev); 628 nouveau_fbcon_zfill_all(drm_dev);
675 if (drm_dev->mode_config.num_crtc) 629 if (drm_dev->mode_config.num_crtc)
676 nouveau_display_resume(drm_dev); 630 nouveau_display_resume(drm_dev);
677 nv_suspend_set_printk_level(NV_DBG_DEBUG);
678 return 0; 631 return 0;
679} 632}
680 633
@@ -816,8 +769,8 @@ driver = {
816#endif 769#endif
817 770
818 .get_vblank_counter = drm_vblank_count, 771 .get_vblank_counter = drm_vblank_count,
819 .enable_vblank = nouveau_drm_vblank_enable, 772 .enable_vblank = nouveau_display_vblank_enable,
820 .disable_vblank = nouveau_drm_vblank_disable, 773 .disable_vblank = nouveau_display_vblank_disable,
821 774
822 .ioctls = nouveau_ioctls, 775 .ioctls = nouveau_ioctls,
823 .num_ioctls = ARRAY_SIZE(nouveau_ioctls), 776 .num_ioctls = ARRAY_SIZE(nouveau_ioctls),
@@ -878,6 +831,7 @@ static int nouveau_pmops_runtime_suspend(struct device *dev)
878 if (nouveau_runtime_pm == 0) 831 if (nouveau_runtime_pm == 0)
879 return -EINVAL; 832 return -EINVAL;
880 833
834 nv_debug_level(SILENT);
881 drm_kms_helper_poll_disable(drm_dev); 835 drm_kms_helper_poll_disable(drm_dev);
882 vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_OFF); 836 vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_OFF);
883 nouveau_switcheroo_optimus_dsm(); 837 nouveau_switcheroo_optimus_dsm();
@@ -914,6 +868,7 @@ static int nouveau_pmops_runtime_resume(struct device *dev)
914 nv_mask(device, 0x88488, (1 << 25), (1 << 25)); 868 nv_mask(device, 0x88488, (1 << 25), (1 << 25));
915 vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON); 869 vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON);
916 drm_dev->switch_power_state = DRM_SWITCH_POWER_ON; 870 drm_dev->switch_power_state = DRM_SWITCH_POWER_ON;
871 nv_debug_level(NORMAL);
917 return ret; 872 return ret;
918} 873}
919 874
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h
index 994fd6ec373b..71ed2dadae61 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.h
@@ -51,10 +51,11 @@ struct nouveau_drm_tile {
51}; 51};
52 52
53enum nouveau_drm_handle { 53enum nouveau_drm_handle {
54 NVDRM_CLIENT = 0xffffffff, 54 NVDRM_CLIENT = 0xffffffff,
55 NVDRM_DEVICE = 0xdddddddd, 55 NVDRM_DEVICE = 0xdddddddd,
56 NVDRM_PUSH = 0xbbbb0000, /* |= client chid */ 56 NVDRM_CONTROL = 0xdddddddc,
57 NVDRM_CHAN = 0xcccc0000, /* |= client chid */ 57 NVDRM_PUSH = 0xbbbb0000, /* |= client chid */
58 NVDRM_CHAN = 0xcccc0000, /* |= client chid */
58}; 59};
59 60
60struct nouveau_cli { 61struct nouveau_cli {
@@ -127,10 +128,10 @@ struct nouveau_drm {
127 struct nvbios vbios; 128 struct nvbios vbios;
128 struct nouveau_display *display; 129 struct nouveau_display *display;
129 struct backlight_device *backlight; 130 struct backlight_device *backlight;
130 struct nouveau_eventh vblank[4];
131 131
132 /* power management */ 132 /* power management */
133 struct nouveau_pm *pm; 133 struct nouveau_hwmon *hwmon;
134 struct nouveau_sysfs *sysfs;
134 135
135 /* display power reference */ 136 /* display power reference */
136 bool have_disp_power_ref; 137 bool have_disp_power_ref;
@@ -154,7 +155,6 @@ nouveau_dev(struct drm_device *dev)
154int nouveau_pmops_suspend(struct device *); 155int nouveau_pmops_suspend(struct device *);
155int nouveau_pmops_resume(struct device *); 156int nouveau_pmops_resume(struct device *);
156 157
157#define NV_SUSPEND(cli, fmt, args...) nv_suspend((cli), fmt, ##args)
158#define NV_FATAL(cli, fmt, args...) nv_fatal((cli), fmt, ##args) 158#define NV_FATAL(cli, fmt, args...) nv_fatal((cli), fmt, ##args)
159#define NV_ERROR(cli, fmt, args...) nv_error((cli), fmt, ##args) 159#define NV_ERROR(cli, fmt, args...) nv_error((cli), fmt, ##args)
160#define NV_WARN(cli, fmt, args...) nv_warn((cli), fmt, ##args) 160#define NV_WARN(cli, fmt, args...) nv_warn((cli), fmt, ##args)
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index c80b519b513a..7903e0ed3c75 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -503,34 +503,45 @@ nouveau_fbcon_fini(struct drm_device *dev)
503 drm->fbcon = NULL; 503 drm->fbcon = NULL;
504} 504}
505 505
506void nouveau_fbcon_save_disable_accel(struct drm_device *dev) 506void
507nouveau_fbcon_save_disable_accel(struct drm_device *dev)
507{ 508{
508 struct nouveau_drm *drm = nouveau_drm(dev); 509 struct nouveau_drm *drm = nouveau_drm(dev);
509 510 if (drm->fbcon) {
510 drm->fbcon->saved_flags = drm->fbcon->helper.fbdev->flags; 511 drm->fbcon->saved_flags = drm->fbcon->helper.fbdev->flags;
511 drm->fbcon->helper.fbdev->flags |= FBINFO_HWACCEL_DISABLED; 512 drm->fbcon->helper.fbdev->flags |= FBINFO_HWACCEL_DISABLED;
513 }
512} 514}
513 515
514void nouveau_fbcon_restore_accel(struct drm_device *dev) 516void
517nouveau_fbcon_restore_accel(struct drm_device *dev)
515{ 518{
516 struct nouveau_drm *drm = nouveau_drm(dev); 519 struct nouveau_drm *drm = nouveau_drm(dev);
517 drm->fbcon->helper.fbdev->flags = drm->fbcon->saved_flags; 520 if (drm->fbcon) {
521 drm->fbcon->helper.fbdev->flags = drm->fbcon->saved_flags;
522 }
518} 523}
519 524
520void nouveau_fbcon_set_suspend(struct drm_device *dev, int state) 525void
526nouveau_fbcon_set_suspend(struct drm_device *dev, int state)
521{ 527{
522 struct nouveau_drm *drm = nouveau_drm(dev); 528 struct nouveau_drm *drm = nouveau_drm(dev);
523 console_lock(); 529 if (drm->fbcon) {
524 if (state == 0) 530 console_lock();
525 nouveau_fbcon_save_disable_accel(dev); 531 if (state == 0)
526 fb_set_suspend(drm->fbcon->helper.fbdev, state); 532 nouveau_fbcon_save_disable_accel(dev);
527 if (state == 1) 533 fb_set_suspend(drm->fbcon->helper.fbdev, state);
528 nouveau_fbcon_restore_accel(dev); 534 if (state == 1)
529 console_unlock(); 535 nouveau_fbcon_restore_accel(dev);
536 console_unlock();
537 }
530} 538}
531 539
532void nouveau_fbcon_zfill_all(struct drm_device *dev) 540void
541nouveau_fbcon_zfill_all(struct drm_device *dev)
533{ 542{
534 struct nouveau_drm *drm = nouveau_drm(dev); 543 struct nouveau_drm *drm = nouveau_drm(dev);
535 nouveau_fbcon_zfill(dev, drm->fbcon); 544 if (drm->fbcon) {
545 nouveau_fbcon_zfill(dev, drm->fbcon);
546 }
536} 547}
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index be3149932c2d..34b82711e7c8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -165,17 +165,11 @@ nouveau_fence_done(struct nouveau_fence *fence)
165 return !fence->channel; 165 return !fence->channel;
166} 166}
167 167
168struct nouveau_fence_uevent {
169 struct nouveau_eventh handler;
170 struct nouveau_fence_priv *priv;
171};
172
173static int 168static int
174nouveau_fence_wait_uevent_handler(struct nouveau_eventh *event, int index) 169nouveau_fence_wait_uevent_handler(void *data, int index)
175{ 170{
176 struct nouveau_fence_uevent *uevent = 171 struct nouveau_fence_priv *priv = data;
177 container_of(event, struct nouveau_fence_uevent, handler); 172 wake_up_all(&priv->waiting);
178 wake_up_all(&uevent->priv->waiting);
179 return NVKM_EVENT_KEEP; 173 return NVKM_EVENT_KEEP;
180} 174}
181 175
@@ -186,13 +180,16 @@ nouveau_fence_wait_uevent(struct nouveau_fence *fence, bool intr)
186 struct nouveau_channel *chan = fence->channel; 180 struct nouveau_channel *chan = fence->channel;
187 struct nouveau_fifo *pfifo = nouveau_fifo(chan->drm->device); 181 struct nouveau_fifo *pfifo = nouveau_fifo(chan->drm->device);
188 struct nouveau_fence_priv *priv = chan->drm->fence; 182 struct nouveau_fence_priv *priv = chan->drm->fence;
189 struct nouveau_fence_uevent uevent = { 183 struct nouveau_eventh *handler;
190 .handler.func = nouveau_fence_wait_uevent_handler,
191 .priv = priv,
192 };
193 int ret = 0; 184 int ret = 0;
194 185
195 nouveau_event_get(pfifo->uevent, 0, &uevent.handler); 186 ret = nouveau_event_new(pfifo->uevent, 0,
187 nouveau_fence_wait_uevent_handler,
188 priv, &handler);
189 if (ret)
190 return ret;
191
192 nouveau_event_get(handler);
196 193
197 if (fence->timeout) { 194 if (fence->timeout) {
198 unsigned long timeout = fence->timeout - jiffies; 195 unsigned long timeout = fence->timeout - jiffies;
@@ -224,7 +221,7 @@ nouveau_fence_wait_uevent(struct nouveau_fence *fence, bool intr)
224 } 221 }
225 } 222 }
226 223
227 nouveau_event_put(pfifo->uevent, 0, &uevent.handler); 224 nouveau_event_ref(NULL, &handler);
228 if (unlikely(ret < 0)) 225 if (unlikely(ret < 0))
229 return ret; 226 return ret;
230 227
diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.c b/drivers/gpu/drm/nouveau/nouveau_hwmon.c
index 936b442a6ab7..38a4db5bfe21 100644
--- a/drivers/gpu/drm/nouveau/nouveau_pm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_hwmon.c
@@ -32,369 +32,12 @@
32#include <drm/drmP.h> 32#include <drm/drmP.h>
33 33
34#include "nouveau_drm.h" 34#include "nouveau_drm.h"
35#include "nouveau_pm.h" 35#include "nouveau_hwmon.h"
36 36
37#include <subdev/gpio.h> 37#include <subdev/gpio.h>
38#include <subdev/timer.h> 38#include <subdev/timer.h>
39#include <subdev/therm.h> 39#include <subdev/therm.h>
40 40
41MODULE_PARM_DESC(perflvl, "Performance level (default: boot)");
42static char *nouveau_perflvl;
43module_param_named(perflvl, nouveau_perflvl, charp, 0400);
44
45MODULE_PARM_DESC(perflvl_wr, "Allow perflvl changes (warning: dangerous!)");
46static int nouveau_perflvl_wr;
47module_param_named(perflvl_wr, nouveau_perflvl_wr, int, 0400);
48
49static int
50nouveau_pm_perflvl_aux(struct drm_device *dev, struct nouveau_pm_level *perflvl,
51 struct nouveau_pm_level *a, struct nouveau_pm_level *b)
52{
53 struct nouveau_drm *drm = nouveau_drm(dev);
54 struct nouveau_pm *pm = nouveau_pm(dev);
55 struct nouveau_therm *therm = nouveau_therm(drm->device);
56 int ret;
57
58 /*XXX: not on all boards, we should control based on temperature
59 * on recent boards.. or maybe on some other factor we don't
60 * know about?
61 */
62 if (therm && therm->fan_set &&
63 a->fanspeed && b->fanspeed && b->fanspeed > a->fanspeed) {
64 ret = therm->fan_set(therm, perflvl->fanspeed);
65 if (ret && ret != -ENODEV) {
66 NV_ERROR(drm, "fanspeed set failed: %d\n", ret);
67 }
68 }
69
70 if (pm->voltage.supported && pm->voltage_set) {
71 if (perflvl->volt_min && b->volt_min > a->volt_min) {
72 ret = pm->voltage_set(dev, perflvl->volt_min);
73 if (ret) {
74 NV_ERROR(drm, "voltage set failed: %d\n", ret);
75 return ret;
76 }
77 }
78 }
79
80 return 0;
81}
82
83static int
84nouveau_pm_perflvl_set(struct drm_device *dev, struct nouveau_pm_level *perflvl)
85{
86 struct nouveau_pm *pm = nouveau_pm(dev);
87 void *state;
88 int ret;
89
90 if (perflvl == pm->cur)
91 return 0;
92
93 ret = nouveau_pm_perflvl_aux(dev, perflvl, pm->cur, perflvl);
94 if (ret)
95 return ret;
96
97 state = pm->clocks_pre(dev, perflvl);
98 if (IS_ERR(state)) {
99 ret = PTR_ERR(state);
100 goto error;
101 }
102 ret = pm->clocks_set(dev, state);
103 if (ret)
104 goto error;
105
106 ret = nouveau_pm_perflvl_aux(dev, perflvl, perflvl, pm->cur);
107 if (ret)
108 return ret;
109
110 pm->cur = perflvl;
111 return 0;
112
113error:
114 /* restore the fan speed and voltage before leaving */
115 nouveau_pm_perflvl_aux(dev, perflvl, perflvl, pm->cur);
116 return ret;
117}
118
119void
120nouveau_pm_trigger(struct drm_device *dev)
121{
122 struct nouveau_drm *drm = nouveau_drm(dev);
123 struct nouveau_timer *ptimer = nouveau_timer(drm->device);
124 struct nouveau_pm *pm = nouveau_pm(dev);
125 struct nouveau_pm_profile *profile = NULL;
126 struct nouveau_pm_level *perflvl = NULL;
127 int ret;
128
129 /* select power profile based on current power source */
130 if (power_supply_is_system_supplied())
131 profile = pm->profile_ac;
132 else
133 profile = pm->profile_dc;
134
135 if (profile != pm->profile) {
136 pm->profile->func->fini(pm->profile);
137 pm->profile = profile;
138 pm->profile->func->init(pm->profile);
139 }
140
141 /* select performance level based on profile */
142 perflvl = profile->func->select(profile);
143
144 /* change perflvl, if necessary */
145 if (perflvl != pm->cur) {
146 u64 time0 = ptimer->read(ptimer);
147
148 NV_INFO(drm, "setting performance level: %d", perflvl->id);
149 ret = nouveau_pm_perflvl_set(dev, perflvl);
150 if (ret)
151 NV_INFO(drm, "> reclocking failed: %d\n\n", ret);
152
153 NV_INFO(drm, "> reclocking took %lluns\n\n",
154 ptimer->read(ptimer) - time0);
155 }
156}
157
158static struct nouveau_pm_profile *
159profile_find(struct drm_device *dev, const char *string)
160{
161 struct nouveau_pm *pm = nouveau_pm(dev);
162 struct nouveau_pm_profile *profile;
163
164 list_for_each_entry(profile, &pm->profiles, head) {
165 if (!strncmp(profile->name, string, sizeof(profile->name)))
166 return profile;
167 }
168
169 return NULL;
170}
171
172static int
173nouveau_pm_profile_set(struct drm_device *dev, const char *profile)
174{
175 struct nouveau_pm *pm = nouveau_pm(dev);
176 struct nouveau_pm_profile *ac = NULL, *dc = NULL;
177 char string[16], *cur = string, *ptr;
178
179 /* safety precaution, for now */
180 if (nouveau_perflvl_wr != 7777)
181 return -EPERM;
182
183 strncpy(string, profile, sizeof(string));
184 string[sizeof(string) - 1] = 0;
185 if ((ptr = strchr(string, '\n')))
186 *ptr = '\0';
187
188 ptr = strsep(&cur, ",");
189 if (ptr)
190 ac = profile_find(dev, ptr);
191
192 ptr = strsep(&cur, ",");
193 if (ptr)
194 dc = profile_find(dev, ptr);
195 else
196 dc = ac;
197
198 if (ac == NULL || dc == NULL)
199 return -EINVAL;
200
201 pm->profile_ac = ac;
202 pm->profile_dc = dc;
203 nouveau_pm_trigger(dev);
204 return 0;
205}
206
207static void
208nouveau_pm_static_dummy(struct nouveau_pm_profile *profile)
209{
210}
211
212static struct nouveau_pm_level *
213nouveau_pm_static_select(struct nouveau_pm_profile *profile)
214{
215 return container_of(profile, struct nouveau_pm_level, profile);
216}
217
218const struct nouveau_pm_profile_func nouveau_pm_static_profile_func = {
219 .destroy = nouveau_pm_static_dummy,
220 .init = nouveau_pm_static_dummy,
221 .fini = nouveau_pm_static_dummy,
222 .select = nouveau_pm_static_select,
223};
224
225static int
226nouveau_pm_perflvl_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
227{
228 struct nouveau_drm *drm = nouveau_drm(dev);
229 struct nouveau_pm *pm = nouveau_pm(dev);
230 struct nouveau_therm *therm = nouveau_therm(drm->device);
231 int ret;
232
233 memset(perflvl, 0, sizeof(*perflvl));
234
235 if (pm->clocks_get) {
236 ret = pm->clocks_get(dev, perflvl);
237 if (ret)
238 return ret;
239 }
240
241 if (pm->voltage.supported && pm->voltage_get) {
242 ret = pm->voltage_get(dev);
243 if (ret > 0) {
244 perflvl->volt_min = ret;
245 perflvl->volt_max = ret;
246 }
247 }
248
249 if (therm && therm->fan_get) {
250 ret = therm->fan_get(therm);
251 if (ret >= 0)
252 perflvl->fanspeed = ret;
253 }
254
255 nouveau_mem_timing_read(dev, &perflvl->timing);
256 return 0;
257}
258
259static void
260nouveau_pm_perflvl_info(struct nouveau_pm_level *perflvl, char *ptr, int len)
261{
262 char c[16], s[16], v[32], f[16], m[16];
263
264 c[0] = '\0';
265 if (perflvl->core)
266 snprintf(c, sizeof(c), " core %dMHz", perflvl->core / 1000);
267
268 s[0] = '\0';
269 if (perflvl->shader)
270 snprintf(s, sizeof(s), " shader %dMHz", perflvl->shader / 1000);
271
272 m[0] = '\0';
273 if (perflvl->memory)
274 snprintf(m, sizeof(m), " memory %dMHz", perflvl->memory / 1000);
275
276 v[0] = '\0';
277 if (perflvl->volt_min && perflvl->volt_min != perflvl->volt_max) {
278 snprintf(v, sizeof(v), " voltage %dmV-%dmV",
279 perflvl->volt_min / 1000, perflvl->volt_max / 1000);
280 } else
281 if (perflvl->volt_min) {
282 snprintf(v, sizeof(v), " voltage %dmV",
283 perflvl->volt_min / 1000);
284 }
285
286 f[0] = '\0';
287 if (perflvl->fanspeed)
288 snprintf(f, sizeof(f), " fanspeed %d%%", perflvl->fanspeed);
289
290 snprintf(ptr, len, "%s%s%s%s%s\n", c, s, m, v, f);
291}
292
293static ssize_t
294nouveau_pm_get_perflvl_info(struct device *d,
295 struct device_attribute *a, char *buf)
296{
297 struct nouveau_pm_level *perflvl =
298 container_of(a, struct nouveau_pm_level, dev_attr);
299 char *ptr = buf;
300 int len = PAGE_SIZE;
301
302 snprintf(ptr, len, "%d:", perflvl->id);
303 ptr += strlen(buf);
304 len -= strlen(buf);
305
306 nouveau_pm_perflvl_info(perflvl, ptr, len);
307 return strlen(buf);
308}
309
310static ssize_t
311nouveau_pm_get_perflvl(struct device *d, struct device_attribute *a, char *buf)
312{
313 struct drm_device *dev = pci_get_drvdata(to_pci_dev(d));
314 struct nouveau_pm *pm = nouveau_pm(dev);
315 struct nouveau_pm_level cur;
316 int len = PAGE_SIZE, ret;
317 char *ptr = buf;
318
319 snprintf(ptr, len, "profile: %s, %s\nc:",
320 pm->profile_ac->name, pm->profile_dc->name);
321 ptr += strlen(buf);
322 len -= strlen(buf);
323
324 ret = nouveau_pm_perflvl_get(dev, &cur);
325 if (ret == 0)
326 nouveau_pm_perflvl_info(&cur, ptr, len);
327 return strlen(buf);
328}
329
330static ssize_t
331nouveau_pm_set_perflvl(struct device *d, struct device_attribute *a,
332 const char *buf, size_t count)
333{
334 struct drm_device *dev = pci_get_drvdata(to_pci_dev(d));
335 int ret;
336
337 ret = nouveau_pm_profile_set(dev, buf);
338 if (ret)
339 return ret;
340 return strlen(buf);
341}
342
343static DEVICE_ATTR(performance_level, S_IRUGO | S_IWUSR,
344 nouveau_pm_get_perflvl, nouveau_pm_set_perflvl);
345
346static int
347nouveau_sysfs_init(struct drm_device *dev)
348{
349 struct nouveau_drm *drm = nouveau_drm(dev);
350 struct nouveau_pm *pm = nouveau_pm(dev);
351 struct device *d = &dev->pdev->dev;
352 int ret, i;
353
354 ret = device_create_file(d, &dev_attr_performance_level);
355 if (ret)
356 return ret;
357
358 for (i = 0; i < pm->nr_perflvl; i++) {
359 struct nouveau_pm_level *perflvl = &pm->perflvl[i];
360
361 perflvl->dev_attr.attr.name = perflvl->name;
362 perflvl->dev_attr.attr.mode = S_IRUGO;
363 perflvl->dev_attr.show = nouveau_pm_get_perflvl_info;
364 perflvl->dev_attr.store = NULL;
365 sysfs_attr_init(&perflvl->dev_attr.attr);
366
367 ret = device_create_file(d, &perflvl->dev_attr);
368 if (ret) {
369 NV_ERROR(drm, "failed pervlvl %d sysfs: %d\n",
370 perflvl->id, i);
371 perflvl->dev_attr.attr.name = NULL;
372 nouveau_pm_fini(dev);
373 return ret;
374 }
375 }
376
377 return 0;
378}
379
380static void
381nouveau_sysfs_fini(struct drm_device *dev)
382{
383 struct nouveau_pm *pm = nouveau_pm(dev);
384 struct device *d = &dev->pdev->dev;
385 int i;
386
387 device_remove_file(d, &dev_attr_performance_level);
388 for (i = 0; i < pm->nr_perflvl; i++) {
389 struct nouveau_pm_level *pl = &pm->perflvl[i];
390
391 if (!pl->dev_attr.attr.name)
392 break;
393
394 device_remove_file(d, &pl->dev_attr);
395 }
396}
397
398#if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE)) 41#if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE))
399static ssize_t 42static ssize_t
400nouveau_hwmon_show_temp(struct device *d, struct device_attribute *a, char *buf) 43nouveau_hwmon_show_temp(struct device *d, struct device_attribute *a, char *buf)
@@ -778,9 +421,6 @@ nouveau_hwmon_set_pwm1(struct device *d, struct device_attribute *a,
778 int ret = -ENODEV; 421 int ret = -ENODEV;
779 long value; 422 long value;
780 423
781 if (nouveau_perflvl_wr != 7777)
782 return -EPERM;
783
784 if (kstrtol(buf, 10, &value) == -EINVAL) 424 if (kstrtol(buf, 10, &value) == -EINVAL)
785 return -EINVAL; 425 return -EINVAL;
786 426
@@ -919,17 +559,21 @@ static const struct attribute_group hwmon_pwm_fan_attrgroup = {
919}; 559};
920#endif 560#endif
921 561
922static int 562int
923nouveau_hwmon_init(struct drm_device *dev) 563nouveau_hwmon_init(struct drm_device *dev)
924{ 564{
925 struct nouveau_pm *pm = nouveau_pm(dev);
926
927#if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE)) 565#if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE))
928 struct nouveau_drm *drm = nouveau_drm(dev); 566 struct nouveau_drm *drm = nouveau_drm(dev);
929 struct nouveau_therm *therm = nouveau_therm(drm->device); 567 struct nouveau_therm *therm = nouveau_therm(drm->device);
568 struct nouveau_hwmon *hwmon;
930 struct device *hwmon_dev; 569 struct device *hwmon_dev;
931 int ret = 0; 570 int ret = 0;
932 571
572 hwmon = drm->hwmon = kzalloc(sizeof(*hwmon), GFP_KERNEL);
573 if (!hwmon)
574 return -ENOMEM;
575 hwmon->dev = dev;
576
933 if (!therm || !therm->temp_get || !therm->attr_get || !therm->attr_set) 577 if (!therm || !therm->temp_get || !therm->attr_get || !therm->attr_set)
934 return -ENODEV; 578 return -ENODEV;
935 579
@@ -976,199 +620,37 @@ nouveau_hwmon_init(struct drm_device *dev)
976 goto error; 620 goto error;
977 } 621 }
978 622
979 pm->hwmon = hwmon_dev; 623 hwmon->hwmon = hwmon_dev;
980 624
981 return 0; 625 return 0;
982 626
983error: 627error:
984 NV_ERROR(drm, "Unable to create some hwmon sysfs files: %d\n", ret); 628 NV_ERROR(drm, "Unable to create some hwmon sysfs files: %d\n", ret);
985 hwmon_device_unregister(hwmon_dev); 629 hwmon_device_unregister(hwmon_dev);
986 pm->hwmon = NULL; 630 hwmon->hwmon = NULL;
987 return ret; 631 return ret;
988#else 632#else
989 pm->hwmon = NULL; 633 hwmon->hwmon = NULL;
990 return 0; 634 return 0;
991#endif 635#endif
992} 636}
993 637
994static void 638void
995nouveau_hwmon_fini(struct drm_device *dev) 639nouveau_hwmon_fini(struct drm_device *dev)
996{ 640{
997#if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE)) 641#if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE))
998 struct nouveau_pm *pm = nouveau_pm(dev); 642 struct nouveau_hwmon *hwmon = nouveau_hwmon(dev);
999 643
1000 if (pm->hwmon) { 644 if (hwmon->hwmon) {
1001 sysfs_remove_group(&pm->hwmon->kobj, &hwmon_default_attrgroup); 645 sysfs_remove_group(&hwmon->hwmon->kobj, &hwmon_default_attrgroup);
1002 sysfs_remove_group(&pm->hwmon->kobj, &hwmon_temp_attrgroup); 646 sysfs_remove_group(&hwmon->hwmon->kobj, &hwmon_temp_attrgroup);
1003 sysfs_remove_group(&pm->hwmon->kobj, &hwmon_pwm_fan_attrgroup); 647 sysfs_remove_group(&hwmon->hwmon->kobj, &hwmon_pwm_fan_attrgroup);
1004 sysfs_remove_group(&pm->hwmon->kobj, &hwmon_fan_rpm_attrgroup); 648 sysfs_remove_group(&hwmon->hwmon->kobj, &hwmon_fan_rpm_attrgroup);
1005 649
1006 hwmon_device_unregister(pm->hwmon); 650 hwmon_device_unregister(hwmon->hwmon);
1007 } 651 }
1008#endif
1009}
1010
1011#if defined(CONFIG_ACPI) && defined(CONFIG_POWER_SUPPLY)
1012static int
1013nouveau_pm_acpi_event(struct notifier_block *nb, unsigned long val, void *data)
1014{
1015 struct nouveau_pm *pm = container_of(nb, struct nouveau_pm, acpi_nb);
1016 struct nouveau_drm *drm = nouveau_drm(pm->dev);
1017 struct acpi_bus_event *entry = (struct acpi_bus_event *)data;
1018
1019 if (strcmp(entry->device_class, "ac_adapter") == 0) {
1020 bool ac = power_supply_is_system_supplied();
1021 652
1022 NV_DEBUG(drm, "power supply changed: %s\n", ac ? "AC" : "DC"); 653 nouveau_drm(dev)->hwmon = NULL;
1023 nouveau_pm_trigger(pm->dev); 654 kfree(hwmon);
1024 }
1025
1026 return NOTIFY_OK;
1027}
1028#endif 655#endif
1029
1030int
1031nouveau_pm_init(struct drm_device *dev)
1032{
1033 struct nouveau_device *device = nouveau_dev(dev);
1034 struct nouveau_drm *drm = nouveau_drm(dev);
1035 struct nouveau_pm *pm;
1036 char info[256];
1037 int ret, i;
1038
1039 pm = drm->pm = kzalloc(sizeof(*pm), GFP_KERNEL);
1040 if (!pm)
1041 return -ENOMEM;
1042
1043 pm->dev = dev;
1044
1045 if (device->card_type < NV_40) {
1046 pm->clocks_get = nv04_pm_clocks_get;
1047 pm->clocks_pre = nv04_pm_clocks_pre;
1048 pm->clocks_set = nv04_pm_clocks_set;
1049 if (nouveau_gpio(drm->device)) {
1050 pm->voltage_get = nouveau_voltage_gpio_get;
1051 pm->voltage_set = nouveau_voltage_gpio_set;
1052 }
1053 } else
1054 if (device->card_type < NV_50) {
1055 pm->clocks_get = nv40_pm_clocks_get;
1056 pm->clocks_pre = nv40_pm_clocks_pre;
1057 pm->clocks_set = nv40_pm_clocks_set;
1058 pm->voltage_get = nouveau_voltage_gpio_get;
1059 pm->voltage_set = nouveau_voltage_gpio_set;
1060 } else
1061 if (device->card_type < NV_C0) {
1062 if (device->chipset < 0xa3 ||
1063 device->chipset == 0xaa ||
1064 device->chipset == 0xac) {
1065 pm->clocks_get = nv50_pm_clocks_get;
1066 pm->clocks_pre = nv50_pm_clocks_pre;
1067 pm->clocks_set = nv50_pm_clocks_set;
1068 } else {
1069 pm->clocks_get = nva3_pm_clocks_get;
1070 pm->clocks_pre = nva3_pm_clocks_pre;
1071 pm->clocks_set = nva3_pm_clocks_set;
1072 }
1073 pm->voltage_get = nouveau_voltage_gpio_get;
1074 pm->voltage_set = nouveau_voltage_gpio_set;
1075 } else
1076 if (device->card_type < NV_E0) {
1077 pm->clocks_get = nvc0_pm_clocks_get;
1078 pm->clocks_pre = nvc0_pm_clocks_pre;
1079 pm->clocks_set = nvc0_pm_clocks_set;
1080 pm->voltage_get = nouveau_voltage_gpio_get;
1081 pm->voltage_set = nouveau_voltage_gpio_set;
1082 }
1083
1084
1085 /* parse aux tables from vbios */
1086 nouveau_volt_init(dev);
1087
1088 INIT_LIST_HEAD(&pm->profiles);
1089
1090 /* determine current ("boot") performance level */
1091 ret = nouveau_pm_perflvl_get(dev, &pm->boot);
1092 if (ret) {
1093 NV_ERROR(drm, "failed to determine boot perflvl\n");
1094 return ret;
1095 }
1096
1097 strncpy(pm->boot.name, "boot", 4);
1098 strncpy(pm->boot.profile.name, "boot", 4);
1099 pm->boot.profile.func = &nouveau_pm_static_profile_func;
1100
1101 list_add(&pm->boot.profile.head, &pm->profiles);
1102
1103 pm->profile_ac = &pm->boot.profile;
1104 pm->profile_dc = &pm->boot.profile;
1105 pm->profile = &pm->boot.profile;
1106 pm->cur = &pm->boot;
1107
1108 /* add performance levels from vbios */
1109 nouveau_perf_init(dev);
1110
1111 /* display available performance levels */
1112 NV_INFO(drm, "%d available performance level(s)\n", pm->nr_perflvl);
1113 for (i = 0; i < pm->nr_perflvl; i++) {
1114 nouveau_pm_perflvl_info(&pm->perflvl[i], info, sizeof(info));
1115 NV_INFO(drm, "%d:%s", pm->perflvl[i].id, info);
1116 }
1117
1118 nouveau_pm_perflvl_info(&pm->boot, info, sizeof(info));
1119 NV_INFO(drm, "c:%s", info);
1120
1121 /* switch performance levels now if requested */
1122 if (nouveau_perflvl != NULL)
1123 nouveau_pm_profile_set(dev, nouveau_perflvl);
1124
1125 nouveau_sysfs_init(dev);
1126 nouveau_hwmon_init(dev);
1127#if defined(CONFIG_ACPI) && defined(CONFIG_POWER_SUPPLY)
1128 pm->acpi_nb.notifier_call = nouveau_pm_acpi_event;
1129 register_acpi_notifier(&pm->acpi_nb);
1130#endif
1131
1132 return 0;
1133}
1134
1135void
1136nouveau_pm_fini(struct drm_device *dev)
1137{
1138 struct nouveau_pm *pm = nouveau_pm(dev);
1139 struct nouveau_pm_profile *profile, *tmp;
1140
1141 list_for_each_entry_safe(profile, tmp, &pm->profiles, head) {
1142 list_del(&profile->head);
1143 profile->func->destroy(profile);
1144 }
1145
1146 if (pm->cur != &pm->boot)
1147 nouveau_pm_perflvl_set(dev, &pm->boot);
1148
1149 nouveau_perf_fini(dev);
1150 nouveau_volt_fini(dev);
1151
1152#if defined(CONFIG_ACPI) && defined(CONFIG_POWER_SUPPLY)
1153 unregister_acpi_notifier(&pm->acpi_nb);
1154#endif
1155 nouveau_hwmon_fini(dev);
1156 nouveau_sysfs_fini(dev);
1157
1158 nouveau_drm(dev)->pm = NULL;
1159 kfree(pm);
1160}
1161
1162void
1163nouveau_pm_resume(struct drm_device *dev)
1164{
1165 struct nouveau_pm *pm = nouveau_pm(dev);
1166 struct nouveau_pm_level *perflvl;
1167
1168 if (!pm->cur || pm->cur == &pm->boot)
1169 return;
1170
1171 perflvl = pm->cur;
1172 pm->cur = &pm->boot;
1173 nouveau_pm_perflvl_set(dev, perflvl);
1174} 656}
diff --git a/drivers/gpu/drm/nouveau/nouveau_hwmon.h b/drivers/gpu/drm/nouveau/nouveau_hwmon.h
new file mode 100644
index 000000000000..62ccbb39863c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_hwmon.h
@@ -0,0 +1,43 @@
1/*
2 * Copyright 2010 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#ifndef __NOUVEAU_PM_H__
26#define __NOUVEAU_PM_H__
27
28struct nouveau_hwmon {
29 struct drm_device *dev;
30 struct device *hwmon;
31};
32
33static inline struct nouveau_hwmon *
34nouveau_hwmon(struct drm_device *dev)
35{
36 return nouveau_drm(dev)->hwmon;
37}
38
39/* nouveau_hwmon.c */
40int nouveau_hwmon_init(struct drm_device *dev);
41void nouveau_hwmon_fini(struct drm_device *dev);
42
43#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_hwsq.h b/drivers/gpu/drm/nouveau/nouveau_hwsq.h
deleted file mode 100644
index 697687593a81..000000000000
--- a/drivers/gpu/drm/nouveau/nouveau_hwsq.h
+++ /dev/null
@@ -1,115 +0,0 @@
1/*
2 * Copyright 2010 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#ifndef __NOUVEAU_HWSQ_H__
26#define __NOUVEAU_HWSQ_H__
27
28struct hwsq_ucode {
29 u8 data[0x200];
30 union {
31 u8 *u08;
32 u16 *u16;
33 u32 *u32;
34 } ptr;
35 u16 len;
36
37 u32 reg;
38 u32 val;
39};
40
41static inline void
42hwsq_init(struct hwsq_ucode *hwsq)
43{
44 hwsq->ptr.u08 = hwsq->data;
45 hwsq->reg = 0xffffffff;
46 hwsq->val = 0xffffffff;
47}
48
49static inline void
50hwsq_fini(struct hwsq_ucode *hwsq)
51{
52 do {
53 *hwsq->ptr.u08++ = 0x7f;
54 hwsq->len = hwsq->ptr.u08 - hwsq->data;
55 } while (hwsq->len & 3);
56 hwsq->ptr.u08 = hwsq->data;
57}
58
59static inline void
60hwsq_usec(struct hwsq_ucode *hwsq, u8 usec)
61{
62 u32 shift = 0;
63 while (usec & ~3) {
64 usec >>= 2;
65 shift++;
66 }
67
68 *hwsq->ptr.u08++ = (shift << 2) | usec;
69}
70
71static inline void
72hwsq_setf(struct hwsq_ucode *hwsq, u8 flag, int val)
73{
74 flag += 0x80;
75 if (val >= 0)
76 flag += 0x20;
77 if (val >= 1)
78 flag += 0x20;
79 *hwsq->ptr.u08++ = flag;
80}
81
82static inline void
83hwsq_op5f(struct hwsq_ucode *hwsq, u8 v0, u8 v1)
84{
85 *hwsq->ptr.u08++ = 0x5f;
86 *hwsq->ptr.u08++ = v0;
87 *hwsq->ptr.u08++ = v1;
88}
89
90static inline void
91hwsq_wr32(struct hwsq_ucode *hwsq, u32 reg, u32 val)
92{
93 if (val != hwsq->val) {
94 if ((val & 0xffff0000) == (hwsq->val & 0xffff0000)) {
95 *hwsq->ptr.u08++ = 0x42;
96 *hwsq->ptr.u16++ = (val & 0x0000ffff);
97 } else {
98 *hwsq->ptr.u08++ = 0xe2;
99 *hwsq->ptr.u32++ = val;
100 }
101
102 hwsq->val = val;
103 }
104
105 if ((reg & 0xffff0000) == (hwsq->reg & 0xffff0000)) {
106 *hwsq->ptr.u08++ = 0x40;
107 *hwsq->ptr.u16++ = (reg & 0x0000ffff);
108 } else {
109 *hwsq->ptr.u08++ = 0xe0;
110 *hwsq->ptr.u32++ = reg;
111 }
112 hwsq->reg = reg;
113}
114
115#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
deleted file mode 100644
index 4f6a572f2258..000000000000
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ /dev/null
@@ -1,647 +0,0 @@
1/*
2 * Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
3 * Copyright 2005 Stephane Marchesin
4 *
5 * The Weather Channel (TM) funded Tungsten Graphics to develop the
6 * initial release of the Radeon 8500 driver under the XFree86 license.
7 * This notice must be preserved.
8 *
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the next
17 * paragraph) shall be included in all copies or substantial portions of the
18 * Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * THE AUTHORS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
26 * DEALINGS IN THE SOFTWARE.
27 *
28 * Authors:
29 * Ben Skeggs <bskeggs@redhat.com>
30 * Roy Spliet <r.spliet@student.tudelft.nl>
31 */
32
33#include "nouveau_drm.h"
34#include "nouveau_pm.h"
35
36#include <subdev/fb.h>
37
38static int
39nv40_mem_timing_calc(struct drm_device *dev, u32 freq,
40 struct nouveau_pm_tbl_entry *e, u8 len,
41 struct nouveau_pm_memtiming *boot,
42 struct nouveau_pm_memtiming *t)
43{
44 struct nouveau_drm *drm = nouveau_drm(dev);
45
46 t->reg[0] = (e->tRP << 24 | e->tRAS << 16 | e->tRFC << 8 | e->tRC);
47
48 /* XXX: I don't trust the -1's and +1's... they must come
49 * from somewhere! */
50 t->reg[1] = (e->tWR + 2 + (t->tCWL - 1)) << 24 |
51 1 << 16 |
52 (e->tWTR + 2 + (t->tCWL - 1)) << 8 |
53 (e->tCL + 2 - (t->tCWL - 1));
54
55 t->reg[2] = 0x20200000 |
56 ((t->tCWL - 1) << 24 |
57 e->tRRD << 16 |
58 e->tRCDWR << 8 |
59 e->tRCDRD);
60
61 NV_DEBUG(drm, "Entry %d: 220: %08x %08x %08x\n", t->id,
62 t->reg[0], t->reg[1], t->reg[2]);
63 return 0;
64}
65
66static int
67nv50_mem_timing_calc(struct drm_device *dev, u32 freq,
68 struct nouveau_pm_tbl_entry *e, u8 len,
69 struct nouveau_pm_memtiming *boot,
70 struct nouveau_pm_memtiming *t)
71{
72 struct nouveau_device *device = nouveau_dev(dev);
73 struct nouveau_fb *pfb = nouveau_fb(device);
74 struct nouveau_drm *drm = nouveau_drm(dev);
75 struct bit_entry P;
76 uint8_t unk18 = 1, unk20 = 0, unk21 = 0, tmp7_3;
77
78 if (bit_table(dev, 'P', &P))
79 return -EINVAL;
80
81 switch (min(len, (u8) 22)) {
82 case 22:
83 unk21 = e->tUNK_21;
84 case 21:
85 unk20 = e->tUNK_20;
86 case 20:
87 if (e->tCWL > 0)
88 t->tCWL = e->tCWL;
89 case 19:
90 unk18 = e->tUNK_18;
91 break;
92 }
93
94 t->reg[0] = (e->tRP << 24 | e->tRAS << 16 | e->tRFC << 8 | e->tRC);
95
96 t->reg[1] = (e->tWR + 2 + (t->tCWL - 1)) << 24 |
97 max(unk18, (u8) 1) << 16 |
98 (e->tWTR + 2 + (t->tCWL - 1)) << 8;
99
100 t->reg[2] = ((t->tCWL - 1) << 24 |
101 e->tRRD << 16 |
102 e->tRCDWR << 8 |
103 e->tRCDRD);
104
105 t->reg[4] = e->tUNK_13 << 8 | e->tUNK_13;
106
107 t->reg[5] = (e->tRFC << 24 | max(e->tRCDRD, e->tRCDWR) << 16 | e->tRP);
108
109 t->reg[8] = boot->reg[8] & 0xffffff00;
110
111 if (P.version == 1) {
112 t->reg[1] |= (e->tCL + 2 - (t->tCWL - 1));
113
114 t->reg[3] = (0x14 + e->tCL) << 24 |
115 0x16 << 16 |
116 (e->tCL - 1) << 8 |
117 (e->tCL - 1);
118
119 t->reg[4] |= boot->reg[4] & 0xffff0000;
120
121 t->reg[6] = (0x33 - t->tCWL) << 16 |
122 t->tCWL << 8 |
123 (0x2e + e->tCL - t->tCWL);
124
125 t->reg[7] = 0x4000202 | (e->tCL - 1) << 16;
126
127 /* XXX: P.version == 1 only has DDR2 and GDDR3? */
128 if (pfb->ram->type == NV_MEM_TYPE_DDR2) {
129 t->reg[5] |= (e->tCL + 3) << 8;
130 t->reg[6] |= (t->tCWL - 2) << 8;
131 t->reg[8] |= (e->tCL - 4);
132 } else {
133 t->reg[5] |= (e->tCL + 2) << 8;
134 t->reg[6] |= t->tCWL << 8;
135 t->reg[8] |= (e->tCL - 2);
136 }
137 } else {
138 t->reg[1] |= (5 + e->tCL - (t->tCWL));
139
140 /* XXX: 0xb? 0x30? */
141 t->reg[3] = (0x30 + e->tCL) << 24 |
142 (boot->reg[3] & 0x00ff0000)|
143 (0xb + e->tCL) << 8 |
144 (e->tCL - 1);
145
146 t->reg[4] |= (unk20 << 24 | unk21 << 16);
147
148 /* XXX: +6? */
149 t->reg[5] |= (t->tCWL + 6) << 8;
150
151 t->reg[6] = (0x5a + e->tCL) << 16 |
152 (6 - e->tCL + t->tCWL) << 8 |
153 (0x50 + e->tCL - t->tCWL);
154
155 tmp7_3 = (boot->reg[7] & 0xff000000) >> 24;
156 t->reg[7] = (tmp7_3 << 24) |
157 ((tmp7_3 - 6 + e->tCL) << 16) |
158 0x202;
159 }
160
161 NV_DEBUG(drm, "Entry %d: 220: %08x %08x %08x %08x\n", t->id,
162 t->reg[0], t->reg[1], t->reg[2], t->reg[3]);
163 NV_DEBUG(drm, " 230: %08x %08x %08x %08x\n",
164 t->reg[4], t->reg[5], t->reg[6], t->reg[7]);
165 NV_DEBUG(drm, " 240: %08x\n", t->reg[8]);
166 return 0;
167}
168
169static int
170nvc0_mem_timing_calc(struct drm_device *dev, u32 freq,
171 struct nouveau_pm_tbl_entry *e, u8 len,
172 struct nouveau_pm_memtiming *boot,
173 struct nouveau_pm_memtiming *t)
174{
175 struct nouveau_drm *drm = nouveau_drm(dev);
176
177 if (e->tCWL > 0)
178 t->tCWL = e->tCWL;
179
180 t->reg[0] = (e->tRP << 24 | (e->tRAS & 0x7f) << 17 |
181 e->tRFC << 8 | e->tRC);
182
183 t->reg[1] = (boot->reg[1] & 0xff000000) |
184 (e->tRCDWR & 0x0f) << 20 |
185 (e->tRCDRD & 0x0f) << 14 |
186 (t->tCWL << 7) |
187 (e->tCL & 0x0f);
188
189 t->reg[2] = (boot->reg[2] & 0xff0000ff) |
190 e->tWR << 16 | e->tWTR << 8;
191
192 t->reg[3] = (e->tUNK_20 & 0x1f) << 9 |
193 (e->tUNK_21 & 0xf) << 5 |
194 (e->tUNK_13 & 0x1f);
195
196 t->reg[4] = (boot->reg[4] & 0xfff00fff) |
197 (e->tRRD&0x1f) << 15;
198
199 NV_DEBUG(drm, "Entry %d: 290: %08x %08x %08x %08x\n", t->id,
200 t->reg[0], t->reg[1], t->reg[2], t->reg[3]);
201 NV_DEBUG(drm, " 2a0: %08x\n", t->reg[4]);
202 return 0;
203}
204
205/**
206 * MR generation methods
207 */
208
209static int
210nouveau_mem_ddr2_mr(struct drm_device *dev, u32 freq,
211 struct nouveau_pm_tbl_entry *e, u8 len,
212 struct nouveau_pm_memtiming *boot,
213 struct nouveau_pm_memtiming *t)
214{
215 struct nouveau_drm *drm = nouveau_drm(dev);
216
217 t->drive_strength = 0;
218 if (len < 15) {
219 t->odt = boot->odt;
220 } else {
221 t->odt = e->RAM_FT1 & 0x07;
222 }
223
224 if (e->tCL >= NV_MEM_CL_DDR2_MAX) {
225 NV_WARN(drm, "(%u) Invalid tCL: %u", t->id, e->tCL);
226 return -ERANGE;
227 }
228
229 if (e->tWR >= NV_MEM_WR_DDR2_MAX) {
230 NV_WARN(drm, "(%u) Invalid tWR: %u", t->id, e->tWR);
231 return -ERANGE;
232 }
233
234 if (t->odt > 3) {
235 NV_WARN(drm, "(%u) Invalid odt value, assuming disabled: %x",
236 t->id, t->odt);
237 t->odt = 0;
238 }
239
240 t->mr[0] = (boot->mr[0] & 0x100f) |
241 (e->tCL) << 4 |
242 (e->tWR - 1) << 9;
243 t->mr[1] = (boot->mr[1] & 0x101fbb) |
244 (t->odt & 0x1) << 2 |
245 (t->odt & 0x2) << 5;
246
247 NV_DEBUG(drm, "(%u) MR: %08x", t->id, t->mr[0]);
248 return 0;
249}
250
251static const uint8_t nv_mem_wr_lut_ddr3[NV_MEM_WR_DDR3_MAX] = {
252 0, 0, 0, 0, 0, 1, 2, 3, 4, 5, 5, 6, 6, 7, 7, 0, 0};
253
254static int
255nouveau_mem_ddr3_mr(struct drm_device *dev, u32 freq,
256 struct nouveau_pm_tbl_entry *e, u8 len,
257 struct nouveau_pm_memtiming *boot,
258 struct nouveau_pm_memtiming *t)
259{
260 struct nouveau_drm *drm = nouveau_drm(dev);
261 u8 cl = e->tCL - 4;
262
263 t->drive_strength = 0;
264 if (len < 15) {
265 t->odt = boot->odt;
266 } else {
267 t->odt = e->RAM_FT1 & 0x07;
268 }
269
270 if (e->tCL >= NV_MEM_CL_DDR3_MAX || e->tCL < 4) {
271 NV_WARN(drm, "(%u) Invalid tCL: %u", t->id, e->tCL);
272 return -ERANGE;
273 }
274
275 if (e->tWR >= NV_MEM_WR_DDR3_MAX || e->tWR < 4) {
276 NV_WARN(drm, "(%u) Invalid tWR: %u", t->id, e->tWR);
277 return -ERANGE;
278 }
279
280 if (e->tCWL < 5) {
281 NV_WARN(drm, "(%u) Invalid tCWL: %u", t->id, e->tCWL);
282 return -ERANGE;
283 }
284
285 t->mr[0] = (boot->mr[0] & 0x180b) |
286 /* CAS */
287 (cl & 0x7) << 4 |
288 (cl & 0x8) >> 1 |
289 (nv_mem_wr_lut_ddr3[e->tWR]) << 9;
290 t->mr[1] = (boot->mr[1] & 0x101dbb) |
291 (t->odt & 0x1) << 2 |
292 (t->odt & 0x2) << 5 |
293 (t->odt & 0x4) << 7;
294 t->mr[2] = (boot->mr[2] & 0x20ffb7) | (e->tCWL - 5) << 3;
295
296 NV_DEBUG(drm, "(%u) MR: %08x %08x", t->id, t->mr[0], t->mr[2]);
297 return 0;
298}
299
300static const uint8_t nv_mem_cl_lut_gddr3[NV_MEM_CL_GDDR3_MAX] = {
301 0, 0, 0, 0, 4, 5, 6, 7, 0, 1, 2, 3, 8, 9, 10, 11};
302static const uint8_t nv_mem_wr_lut_gddr3[NV_MEM_WR_GDDR3_MAX] = {
303 0, 0, 0, 0, 0, 2, 3, 8, 9, 10, 11, 0, 0, 1, 1, 0, 3};
304
305static int
306nouveau_mem_gddr3_mr(struct drm_device *dev, u32 freq,
307 struct nouveau_pm_tbl_entry *e, u8 len,
308 struct nouveau_pm_memtiming *boot,
309 struct nouveau_pm_memtiming *t)
310{
311 struct nouveau_drm *drm = nouveau_drm(dev);
312
313 if (len < 15) {
314 t->drive_strength = boot->drive_strength;
315 t->odt = boot->odt;
316 } else {
317 t->drive_strength = (e->RAM_FT1 & 0x30) >> 4;
318 t->odt = e->RAM_FT1 & 0x07;
319 }
320
321 if (e->tCL >= NV_MEM_CL_GDDR3_MAX) {
322 NV_WARN(drm, "(%u) Invalid tCL: %u", t->id, e->tCL);
323 return -ERANGE;
324 }
325
326 if (e->tWR >= NV_MEM_WR_GDDR3_MAX) {
327 NV_WARN(drm, "(%u) Invalid tWR: %u", t->id, e->tWR);
328 return -ERANGE;
329 }
330
331 if (t->odt > 3) {
332 NV_WARN(drm, "(%u) Invalid odt value, assuming autocal: %x",
333 t->id, t->odt);
334 t->odt = 0;
335 }
336
337 t->mr[0] = (boot->mr[0] & 0xe0b) |
338 /* CAS */
339 ((nv_mem_cl_lut_gddr3[e->tCL] & 0x7) << 4) |
340 ((nv_mem_cl_lut_gddr3[e->tCL] & 0x8) >> 2);
341 t->mr[1] = (boot->mr[1] & 0x100f40) | t->drive_strength |
342 (t->odt << 2) |
343 (nv_mem_wr_lut_gddr3[e->tWR] & 0xf) << 4;
344 t->mr[2] = boot->mr[2];
345
346 NV_DEBUG(drm, "(%u) MR: %08x %08x %08x", t->id,
347 t->mr[0], t->mr[1], t->mr[2]);
348 return 0;
349}
350
351static int
352nouveau_mem_gddr5_mr(struct drm_device *dev, u32 freq,
353 struct nouveau_pm_tbl_entry *e, u8 len,
354 struct nouveau_pm_memtiming *boot,
355 struct nouveau_pm_memtiming *t)
356{
357 struct nouveau_drm *drm = nouveau_drm(dev);
358
359 if (len < 15) {
360 t->drive_strength = boot->drive_strength;
361 t->odt = boot->odt;
362 } else {
363 t->drive_strength = (e->RAM_FT1 & 0x30) >> 4;
364 t->odt = e->RAM_FT1 & 0x03;
365 }
366
367 if (e->tCL >= NV_MEM_CL_GDDR5_MAX) {
368 NV_WARN(drm, "(%u) Invalid tCL: %u", t->id, e->tCL);
369 return -ERANGE;
370 }
371
372 if (e->tWR >= NV_MEM_WR_GDDR5_MAX) {
373 NV_WARN(drm, "(%u) Invalid tWR: %u", t->id, e->tWR);
374 return -ERANGE;
375 }
376
377 if (t->odt > 3) {
378 NV_WARN(drm, "(%u) Invalid odt value, assuming autocal: %x",
379 t->id, t->odt);
380 t->odt = 0;
381 }
382
383 t->mr[0] = (boot->mr[0] & 0x007) |
384 ((e->tCL - 5) << 3) |
385 ((e->tWR - 4) << 8);
386 t->mr[1] = (boot->mr[1] & 0x1007f0) |
387 t->drive_strength |
388 (t->odt << 2);
389
390 NV_DEBUG(drm, "(%u) MR: %08x %08x", t->id, t->mr[0], t->mr[1]);
391 return 0;
392}
393
394int
395nouveau_mem_timing_calc(struct drm_device *dev, u32 freq,
396 struct nouveau_pm_memtiming *t)
397{
398 struct nouveau_device *device = nouveau_dev(dev);
399 struct nouveau_fb *pfb = nouveau_fb(device);
400 struct nouveau_pm *pm = nouveau_pm(dev);
401 struct nouveau_pm_memtiming *boot = &pm->boot.timing;
402 struct nouveau_pm_tbl_entry *e;
403 u8 ver, len, *ptr, *ramcfg;
404 int ret;
405
406 ptr = nouveau_perf_timing(dev, freq, &ver, &len);
407 if (!ptr || ptr[0] == 0x00) {
408 *t = *boot;
409 return 0;
410 }
411 e = (struct nouveau_pm_tbl_entry *)ptr;
412
413 t->tCWL = boot->tCWL;
414
415 switch (device->card_type) {
416 case NV_40:
417 ret = nv40_mem_timing_calc(dev, freq, e, len, boot, t);
418 break;
419 case NV_50:
420 ret = nv50_mem_timing_calc(dev, freq, e, len, boot, t);
421 break;
422 case NV_C0:
423 case NV_D0:
424 ret = nvc0_mem_timing_calc(dev, freq, e, len, boot, t);
425 break;
426 default:
427 ret = -ENODEV;
428 break;
429 }
430
431 switch (pfb->ram->type * !ret) {
432 case NV_MEM_TYPE_GDDR3:
433 ret = nouveau_mem_gddr3_mr(dev, freq, e, len, boot, t);
434 break;
435 case NV_MEM_TYPE_GDDR5:
436 ret = nouveau_mem_gddr5_mr(dev, freq, e, len, boot, t);
437 break;
438 case NV_MEM_TYPE_DDR2:
439 ret = nouveau_mem_ddr2_mr(dev, freq, e, len, boot, t);
440 break;
441 case NV_MEM_TYPE_DDR3:
442 ret = nouveau_mem_ddr3_mr(dev, freq, e, len, boot, t);
443 break;
444 default:
445 ret = -EINVAL;
446 break;
447 }
448
449 ramcfg = nouveau_perf_ramcfg(dev, freq, &ver, &len);
450 if (ramcfg) {
451 int dll_off;
452
453 if (ver == 0x00)
454 dll_off = !!(ramcfg[3] & 0x04);
455 else
456 dll_off = !!(ramcfg[2] & 0x40);
457
458 switch (pfb->ram->type) {
459 case NV_MEM_TYPE_GDDR3:
460 t->mr[1] &= ~0x00000040;
461 t->mr[1] |= 0x00000040 * dll_off;
462 break;
463 default:
464 t->mr[1] &= ~0x00000001;
465 t->mr[1] |= 0x00000001 * dll_off;
466 break;
467 }
468 }
469
470 return ret;
471}
472
473void
474nouveau_mem_timing_read(struct drm_device *dev, struct nouveau_pm_memtiming *t)
475{
476 struct nouveau_device *device = nouveau_dev(dev);
477 struct nouveau_fb *pfb = nouveau_fb(device);
478 u32 timing_base, timing_regs, mr_base;
479 int i;
480
481 if (device->card_type >= 0xC0) {
482 timing_base = 0x10f290;
483 mr_base = 0x10f300;
484 } else {
485 timing_base = 0x100220;
486 mr_base = 0x1002c0;
487 }
488
489 t->id = -1;
490
491 switch (device->card_type) {
492 case NV_50:
493 timing_regs = 9;
494 break;
495 case NV_C0:
496 case NV_D0:
497 timing_regs = 5;
498 break;
499 case NV_30:
500 case NV_40:
501 timing_regs = 3;
502 break;
503 default:
504 timing_regs = 0;
505 return;
506 }
507 for(i = 0; i < timing_regs; i++)
508 t->reg[i] = nv_rd32(device, timing_base + (0x04 * i));
509
510 t->tCWL = 0;
511 if (device->card_type < NV_C0) {
512 t->tCWL = ((nv_rd32(device, 0x100228) & 0x0f000000) >> 24) + 1;
513 } else if (device->card_type <= NV_D0) {
514 t->tCWL = ((nv_rd32(device, 0x10f294) & 0x00000f80) >> 7);
515 }
516
517 t->mr[0] = nv_rd32(device, mr_base);
518 t->mr[1] = nv_rd32(device, mr_base + 0x04);
519 t->mr[2] = nv_rd32(device, mr_base + 0x20);
520 t->mr[3] = nv_rd32(device, mr_base + 0x24);
521
522 t->odt = 0;
523 t->drive_strength = 0;
524
525 switch (pfb->ram->type) {
526 case NV_MEM_TYPE_DDR3:
527 t->odt |= (t->mr[1] & 0x200) >> 7;
528 case NV_MEM_TYPE_DDR2:
529 t->odt |= (t->mr[1] & 0x04) >> 2 |
530 (t->mr[1] & 0x40) >> 5;
531 break;
532 case NV_MEM_TYPE_GDDR3:
533 case NV_MEM_TYPE_GDDR5:
534 t->drive_strength = t->mr[1] & 0x03;
535 t->odt = (t->mr[1] & 0x0c) >> 2;
536 break;
537 default:
538 break;
539 }
540}
541
542int
543nouveau_mem_exec(struct nouveau_mem_exec_func *exec,
544 struct nouveau_pm_level *perflvl)
545{
546 struct nouveau_drm *drm = nouveau_drm(exec->dev);
547 struct nouveau_device *device = nouveau_dev(exec->dev);
548 struct nouveau_fb *pfb = nouveau_fb(device);
549 struct nouveau_pm_memtiming *info = &perflvl->timing;
550 u32 tMRD = 1000, tCKSRE = 0, tCKSRX = 0, tXS = 0, tDLLK = 0;
551 u32 mr[3] = { info->mr[0], info->mr[1], info->mr[2] };
552 u32 mr1_dlloff;
553
554 switch (pfb->ram->type) {
555 case NV_MEM_TYPE_DDR2:
556 tDLLK = 2000;
557 mr1_dlloff = 0x00000001;
558 break;
559 case NV_MEM_TYPE_DDR3:
560 tDLLK = 12000;
561 tCKSRE = 2000;
562 tXS = 1000;
563 mr1_dlloff = 0x00000001;
564 break;
565 case NV_MEM_TYPE_GDDR3:
566 tDLLK = 40000;
567 mr1_dlloff = 0x00000040;
568 break;
569 default:
570 NV_ERROR(drm, "cannot reclock unsupported memtype\n");
571 return -ENODEV;
572 }
573
574 /* fetch current MRs */
575 switch (pfb->ram->type) {
576 case NV_MEM_TYPE_GDDR3:
577 case NV_MEM_TYPE_DDR3:
578 mr[2] = exec->mrg(exec, 2);
579 default:
580 mr[1] = exec->mrg(exec, 1);
581 mr[0] = exec->mrg(exec, 0);
582 break;
583 }
584
585 /* DLL 'on' -> DLL 'off' mode, disable before entering self-refresh */
586 if (!(mr[1] & mr1_dlloff) && (info->mr[1] & mr1_dlloff)) {
587 exec->precharge(exec);
588 exec->mrs (exec, 1, mr[1] | mr1_dlloff);
589 exec->wait(exec, tMRD);
590 }
591
592 /* enter self-refresh mode */
593 exec->precharge(exec);
594 exec->refresh(exec);
595 exec->refresh(exec);
596 exec->refresh_auto(exec, false);
597 exec->refresh_self(exec, true);
598 exec->wait(exec, tCKSRE);
599
600 /* modify input clock frequency */
601 exec->clock_set(exec);
602
603 /* exit self-refresh mode */
604 exec->wait(exec, tCKSRX);
605 exec->precharge(exec);
606 exec->refresh_self(exec, false);
607 exec->refresh_auto(exec, true);
608 exec->wait(exec, tXS);
609 exec->wait(exec, tXS);
610
611 /* update MRs */
612 if (mr[2] != info->mr[2]) {
613 exec->mrs (exec, 2, info->mr[2]);
614 exec->wait(exec, tMRD);
615 }
616
617 if (mr[1] != info->mr[1]) {
618 /* need to keep DLL off until later, at least on GDDR3 */
619 exec->mrs (exec, 1, info->mr[1] | (mr[1] & mr1_dlloff));
620 exec->wait(exec, tMRD);
621 }
622
623 if (mr[0] != info->mr[0]) {
624 exec->mrs (exec, 0, info->mr[0]);
625 exec->wait(exec, tMRD);
626 }
627
628 /* update PFB timing registers */
629 exec->timing_set(exec);
630
631 /* DLL (enable + ) reset */
632 if (!(info->mr[1] & mr1_dlloff)) {
633 if (mr[1] & mr1_dlloff) {
634 exec->mrs (exec, 1, info->mr[1]);
635 exec->wait(exec, tMRD);
636 }
637 exec->mrs (exec, 0, info->mr[0] | 0x00000100);
638 exec->wait(exec, tMRD);
639 exec->mrs (exec, 0, info->mr[0] | 0x00000000);
640 exec->wait(exec, tMRD);
641 exec->wait(exec, tDLLK);
642 if (pfb->ram->type == NV_MEM_TYPE_GDDR3)
643 exec->precharge(exec);
644 }
645
646 return 0;
647}
diff --git a/drivers/gpu/drm/nouveau/nouveau_perf.c b/drivers/gpu/drm/nouveau/nouveau_perf.c
deleted file mode 100644
index 4fe883c54918..000000000000
--- a/drivers/gpu/drm/nouveau/nouveau_perf.c
+++ /dev/null
@@ -1,416 +0,0 @@
1/*
2 * Copyright 2010 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <drm/drmP.h>
26
27#include "nouveau_drm.h"
28#include "nouveau_reg.h"
29#include "nouveau_pm.h"
30
31static u8 *
32nouveau_perf_table(struct drm_device *dev, u8 *ver)
33{
34 struct nouveau_drm *drm = nouveau_drm(dev);
35 struct nvbios *bios = &drm->vbios;
36 struct bit_entry P;
37
38 if (!bit_table(dev, 'P', &P) && P.version && P.version <= 2) {
39 u8 *perf = ROMPTR(dev, P.data[0]);
40 if (perf) {
41 *ver = perf[0];
42 return perf;
43 }
44 }
45
46 if (bios->type == NVBIOS_BMP) {
47 if (bios->data[bios->offset + 6] >= 0x25) {
48 u8 *perf = ROMPTR(dev, bios->data[bios->offset + 0x94]);
49 if (perf) {
50 *ver = perf[1];
51 return perf;
52 }
53 }
54 }
55
56 return NULL;
57}
58
59static u8 *
60nouveau_perf_entry(struct drm_device *dev, int idx,
61 u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
62{
63 u8 *perf = nouveau_perf_table(dev, ver);
64 if (perf) {
65 if (*ver >= 0x12 && *ver < 0x20 && idx < perf[2]) {
66 *hdr = perf[3];
67 *cnt = 0;
68 *len = 0;
69 return perf + perf[0] + idx * perf[3];
70 } else
71 if (*ver >= 0x20 && *ver < 0x40 && idx < perf[2]) {
72 *hdr = perf[3];
73 *cnt = perf[4];
74 *len = perf[5];
75 return perf + perf[1] + idx * (*hdr + (*cnt * *len));
76 } else
77 if (*ver >= 0x40 && *ver < 0x41 && idx < perf[5]) {
78 *hdr = perf[2];
79 *cnt = perf[4];
80 *len = perf[3];
81 return perf + perf[1] + idx * (*hdr + (*cnt * *len));
82 }
83 }
84 return NULL;
85}
86
87u8 *
88nouveau_perf_rammap(struct drm_device *dev, u32 freq,
89 u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
90{
91 struct nouveau_drm *drm = nouveau_drm(dev);
92 struct bit_entry P;
93 u8 *perf, i = 0;
94
95 if (!bit_table(dev, 'P', &P) && P.version == 2) {
96 u8 *rammap = ROMPTR(dev, P.data[4]);
97 if (rammap) {
98 u8 *ramcfg = rammap + rammap[1];
99
100 *ver = rammap[0];
101 *hdr = rammap[2];
102 *cnt = rammap[4];
103 *len = rammap[3];
104
105 freq /= 1000;
106 for (i = 0; i < rammap[5]; i++) {
107 if (freq >= ROM16(ramcfg[0]) &&
108 freq <= ROM16(ramcfg[2]))
109 return ramcfg;
110
111 ramcfg += *hdr + (*cnt * *len);
112 }
113 }
114
115 return NULL;
116 }
117
118 if (nv_device(drm->device)->chipset == 0x49 ||
119 nv_device(drm->device)->chipset == 0x4b)
120 freq /= 2;
121
122 while ((perf = nouveau_perf_entry(dev, i++, ver, hdr, cnt, len))) {
123 if (*ver >= 0x20 && *ver < 0x25) {
124 if (perf[0] != 0xff && freq <= ROM16(perf[11]) * 1000)
125 break;
126 } else
127 if (*ver >= 0x25 && *ver < 0x40) {
128 if (perf[0] != 0xff && freq <= ROM16(perf[12]) * 1000)
129 break;
130 }
131 }
132
133 if (perf) {
134 u8 *ramcfg = perf + *hdr;
135 *ver = 0x00;
136 *hdr = 0;
137 return ramcfg;
138 }
139
140 return NULL;
141}
142
143u8 *
144nouveau_perf_ramcfg(struct drm_device *dev, u32 freq, u8 *ver, u8 *len)
145{
146 struct nouveau_device *device = nouveau_dev(dev);
147 struct nouveau_drm *drm = nouveau_drm(dev);
148 struct nvbios *bios = &drm->vbios;
149 u8 strap, hdr, cnt;
150 u8 *rammap;
151
152 strap = (nv_rd32(device, 0x101000) & 0x0000003c) >> 2;
153 if (bios->ram_restrict_tbl_ptr)
154 strap = bios->data[bios->ram_restrict_tbl_ptr + strap];
155
156 rammap = nouveau_perf_rammap(dev, freq, ver, &hdr, &cnt, len);
157 if (rammap && strap < cnt)
158 return rammap + hdr + (strap * *len);
159
160 return NULL;
161}
162
163u8 *
164nouveau_perf_timing(struct drm_device *dev, u32 freq, u8 *ver, u8 *len)
165{
166 struct nouveau_drm *drm = nouveau_drm(dev);
167 struct nvbios *bios = &drm->vbios;
168 struct bit_entry P;
169 u8 *perf, *timing = NULL;
170 u8 i = 0, hdr, cnt;
171
172 if (bios->type == NVBIOS_BMP) {
173 while ((perf = nouveau_perf_entry(dev, i++, ver, &hdr, &cnt,
174 len)) && *ver == 0x15) {
175 if (freq <= ROM32(perf[5]) * 20) {
176 *ver = 0x00;
177 *len = 14;
178 return perf + 41;
179 }
180 }
181 return NULL;
182 }
183
184 if (!bit_table(dev, 'P', &P)) {
185 if (P.version == 1)
186 timing = ROMPTR(dev, P.data[4]);
187 else
188 if (P.version == 2)
189 timing = ROMPTR(dev, P.data[8]);
190 }
191
192 if (timing && timing[0] == 0x10) {
193 u8 *ramcfg = nouveau_perf_ramcfg(dev, freq, ver, len);
194 if (ramcfg && ramcfg[1] < timing[2]) {
195 *ver = timing[0];
196 *len = timing[3];
197 return timing + timing[1] + (ramcfg[1] * timing[3]);
198 }
199 }
200
201 return NULL;
202}
203
204static void
205legacy_perf_init(struct drm_device *dev)
206{
207 struct nouveau_device *device = nouveau_dev(dev);
208 struct nouveau_drm *drm = nouveau_drm(dev);
209 struct nvbios *bios = &drm->vbios;
210 struct nouveau_pm *pm = nouveau_pm(dev);
211 char *perf, *entry, *bmp = &bios->data[bios->offset];
212 int headerlen, use_straps;
213
214 if (bmp[5] < 0x5 || bmp[6] < 0x14) {
215 NV_DEBUG(drm, "BMP version too old for perf\n");
216 return;
217 }
218
219 perf = ROMPTR(dev, bmp[0x73]);
220 if (!perf) {
221 NV_DEBUG(drm, "No memclock table pointer found.\n");
222 return;
223 }
224
225 switch (perf[0]) {
226 case 0x12:
227 case 0x14:
228 case 0x18:
229 use_straps = 0;
230 headerlen = 1;
231 break;
232 case 0x01:
233 use_straps = perf[1] & 1;
234 headerlen = (use_straps ? 8 : 2);
235 break;
236 default:
237 NV_WARN(drm, "Unknown memclock table version %x.\n", perf[0]);
238 return;
239 }
240
241 entry = perf + headerlen;
242 if (use_straps)
243 entry += (nv_rd32(device, NV_PEXTDEV_BOOT_0) & 0x3c) >> 1;
244
245 sprintf(pm->perflvl[0].name, "performance_level_0");
246 pm->perflvl[0].memory = ROM16(entry[0]) * 20;
247 pm->nr_perflvl = 1;
248}
249
250static void
251nouveau_perf_voltage(struct drm_device *dev, struct nouveau_pm_level *perflvl)
252{
253 struct nouveau_drm *drm = nouveau_drm(dev);
254 struct bit_entry P;
255 u8 *vmap;
256 int id;
257
258 id = perflvl->volt_min;
259 perflvl->volt_min = 0;
260
261 /* boards using voltage table version <0x40 store the voltage
262 * level directly in the perflvl entry as a multiple of 10mV
263 */
264 if (drm->pm->voltage.version < 0x40) {
265 perflvl->volt_min = id * 10000;
266 perflvl->volt_max = perflvl->volt_min;
267 return;
268 }
269
270 /* on newer ones, the perflvl stores an index into yet another
271 * vbios table containing a min/max voltage value for the perflvl
272 */
273 if (bit_table(dev, 'P', &P) || P.version != 2 || P.length < 34) {
274 NV_DEBUG(drm, "where's our volt map table ptr? %d %d\n",
275 P.version, P.length);
276 return;
277 }
278
279 vmap = ROMPTR(dev, P.data[32]);
280 if (!vmap) {
281 NV_DEBUG(drm, "volt map table pointer invalid\n");
282 return;
283 }
284
285 if (id < vmap[3]) {
286 vmap += vmap[1] + (vmap[2] * id);
287 perflvl->volt_min = ROM32(vmap[0]);
288 perflvl->volt_max = ROM32(vmap[4]);
289 }
290}
291
292void
293nouveau_perf_init(struct drm_device *dev)
294{
295 struct nouveau_drm *drm = nouveau_drm(dev);
296 struct nouveau_pm *pm = nouveau_pm(dev);
297 struct nvbios *bios = &drm->vbios;
298 u8 *perf, ver, hdr, cnt, len;
299 int ret, vid, i = -1;
300
301 if (bios->type == NVBIOS_BMP && bios->data[bios->offset + 6] < 0x25) {
302 legacy_perf_init(dev);
303 return;
304 }
305
306 perf = nouveau_perf_table(dev, &ver);
307
308 while ((perf = nouveau_perf_entry(dev, ++i, &ver, &hdr, &cnt, &len))) {
309 struct nouveau_pm_level *perflvl = &pm->perflvl[pm->nr_perflvl];
310
311 if (perf[0] == 0xff)
312 continue;
313
314 switch (ver) {
315 case 0x12:
316 case 0x13:
317 case 0x15:
318 perflvl->fanspeed = perf[55];
319 if (hdr > 56)
320 perflvl->volt_min = perf[56];
321 perflvl->core = ROM32(perf[1]) * 10;
322 perflvl->memory = ROM32(perf[5]) * 20;
323 break;
324 case 0x21:
325 case 0x23:
326 case 0x24:
327 perflvl->fanspeed = perf[4];
328 perflvl->volt_min = perf[5];
329 perflvl->shader = ROM16(perf[6]) * 1000;
330 perflvl->core = perflvl->shader;
331 perflvl->core += (signed char)perf[8] * 1000;
332 if (nv_device(drm->device)->chipset == 0x49 ||
333 nv_device(drm->device)->chipset == 0x4b)
334 perflvl->memory = ROM16(perf[11]) * 1000;
335 else
336 perflvl->memory = ROM16(perf[11]) * 2000;
337 break;
338 case 0x25:
339 perflvl->fanspeed = perf[4];
340 perflvl->volt_min = perf[5];
341 perflvl->core = ROM16(perf[6]) * 1000;
342 perflvl->shader = ROM16(perf[10]) * 1000;
343 perflvl->memory = ROM16(perf[12]) * 1000;
344 break;
345 case 0x30:
346 perflvl->memscript = ROM16(perf[2]);
347 case 0x35:
348 perflvl->fanspeed = perf[6];
349 perflvl->volt_min = perf[7];
350 perflvl->core = ROM16(perf[8]) * 1000;
351 perflvl->shader = ROM16(perf[10]) * 1000;
352 perflvl->memory = ROM16(perf[12]) * 1000;
353 perflvl->vdec = ROM16(perf[16]) * 1000;
354 perflvl->dom6 = ROM16(perf[20]) * 1000;
355 break;
356 case 0x40:
357#define subent(n) ((ROM16(perf[hdr + (n) * len]) & 0xfff) * 1000)
358 perflvl->fanspeed = 0; /*XXX*/
359 perflvl->volt_min = perf[2];
360 if (nv_device(drm->device)->card_type == NV_50) {
361 perflvl->core = subent(0);
362 perflvl->shader = subent(1);
363 perflvl->memory = subent(2);
364 perflvl->vdec = subent(3);
365 perflvl->unka0 = subent(4);
366 } else {
367 perflvl->hub06 = subent(0);
368 perflvl->hub01 = subent(1);
369 perflvl->copy = subent(2);
370 perflvl->shader = subent(3);
371 perflvl->rop = subent(4);
372 perflvl->memory = subent(5);
373 perflvl->vdec = subent(6);
374 perflvl->daemon = subent(10);
375 perflvl->hub07 = subent(11);
376 perflvl->core = perflvl->shader / 2;
377 }
378 break;
379 }
380
381 /* make sure vid is valid */
382 nouveau_perf_voltage(dev, perflvl);
383 if (pm->voltage.supported && perflvl->volt_min) {
384 vid = nouveau_volt_vid_lookup(dev, perflvl->volt_min);
385 if (vid < 0) {
386 NV_DEBUG(drm, "perflvl %d, bad vid\n", i);
387 continue;
388 }
389 }
390
391 /* get the corresponding memory timings */
392 ret = nouveau_mem_timing_calc(dev, perflvl->memory,
393 &perflvl->timing);
394 if (ret) {
395 NV_DEBUG(drm, "perflvl %d, bad timing: %d\n", i, ret);
396 continue;
397 }
398
399 snprintf(perflvl->name, sizeof(perflvl->name),
400 "performance_level_%d", i);
401 perflvl->id = i;
402
403 snprintf(perflvl->profile.name, sizeof(perflvl->profile.name),
404 "%d", perflvl->id);
405 perflvl->profile.func = &nouveau_pm_static_profile_func;
406 list_add_tail(&perflvl->profile.head, &pm->profiles);
407
408
409 pm->nr_perflvl++;
410 }
411}
412
413void
414nouveau_perf_fini(struct drm_device *dev)
415{
416}
diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.h b/drivers/gpu/drm/nouveau/nouveau_pm.h
deleted file mode 100644
index 73b789c230a9..000000000000
--- a/drivers/gpu/drm/nouveau/nouveau_pm.h
+++ /dev/null
@@ -1,283 +0,0 @@
1/*
2 * Copyright 2010 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#ifndef __NOUVEAU_PM_H__
26#define __NOUVEAU_PM_H__
27
28#include <subdev/bios/pll.h>
29#include <subdev/clock.h>
30
31struct nouveau_pm_voltage_level {
32 u32 voltage; /* microvolts */
33 u8 vid;
34};
35
36struct nouveau_pm_voltage {
37 bool supported;
38 u8 version;
39 u8 vid_mask;
40
41 struct nouveau_pm_voltage_level *level;
42 int nr_level;
43};
44
45/* Exclusive upper limits */
46#define NV_MEM_CL_DDR2_MAX 8
47#define NV_MEM_WR_DDR2_MAX 9
48#define NV_MEM_CL_DDR3_MAX 17
49#define NV_MEM_WR_DDR3_MAX 17
50#define NV_MEM_CL_GDDR3_MAX 16
51#define NV_MEM_WR_GDDR3_MAX 18
52#define NV_MEM_CL_GDDR5_MAX 21
53#define NV_MEM_WR_GDDR5_MAX 20
54
55struct nouveau_pm_memtiming {
56 int id;
57
58 u32 reg[9];
59 u32 mr[4];
60
61 u8 tCWL;
62
63 u8 odt;
64 u8 drive_strength;
65};
66
67struct nouveau_pm_tbl_header {
68 u8 version;
69 u8 header_len;
70 u8 entry_cnt;
71 u8 entry_len;
72};
73
74struct nouveau_pm_tbl_entry {
75 u8 tWR;
76 u8 tWTR;
77 u8 tCL;
78 u8 tRC;
79 u8 empty_4;
80 u8 tRFC; /* Byte 5 */
81 u8 empty_6;
82 u8 tRAS; /* Byte 7 */
83 u8 empty_8;
84 u8 tRP; /* Byte 9 */
85 u8 tRCDRD;
86 u8 tRCDWR;
87 u8 tRRD;
88 u8 tUNK_13;
89 u8 RAM_FT1; /* 14, a bitmask of random RAM features */
90 u8 empty_15;
91 u8 tUNK_16;
92 u8 empty_17;
93 u8 tUNK_18;
94 u8 tCWL;
95 u8 tUNK_20, tUNK_21;
96};
97
98struct nouveau_pm_profile;
99struct nouveau_pm_profile_func {
100 void (*destroy)(struct nouveau_pm_profile *);
101 void (*init)(struct nouveau_pm_profile *);
102 void (*fini)(struct nouveau_pm_profile *);
103 struct nouveau_pm_level *(*select)(struct nouveau_pm_profile *);
104};
105
106struct nouveau_pm_profile {
107 const struct nouveau_pm_profile_func *func;
108 struct list_head head;
109 char name[8];
110};
111
112#define NOUVEAU_PM_MAX_LEVEL 8
113struct nouveau_pm_level {
114 struct nouveau_pm_profile profile;
115 struct device_attribute dev_attr;
116 char name[32];
117 int id;
118
119 struct nouveau_pm_memtiming timing;
120 u32 memory;
121 u16 memscript;
122
123 u32 core;
124 u32 shader;
125 u32 rop;
126 u32 copy;
127 u32 daemon;
128 u32 vdec;
129 u32 dom6;
130 u32 unka0; /* nva3:nvc0 */
131 u32 hub01; /* nvc0- */
132 u32 hub06; /* nvc0- */
133 u32 hub07; /* nvc0- */
134
135 u32 volt_min; /* microvolts */
136 u32 volt_max;
137 u8 fanspeed;
138};
139
140struct nouveau_pm_temp_sensor_constants {
141 u16 offset_constant;
142 s16 offset_mult;
143 s16 offset_div;
144 s16 slope_mult;
145 s16 slope_div;
146};
147
148struct nouveau_pm_threshold_temp {
149 s16 critical;
150 s16 down_clock;
151};
152
153struct nouveau_pm {
154 struct drm_device *dev;
155
156 struct nouveau_pm_voltage voltage;
157 struct nouveau_pm_level perflvl[NOUVEAU_PM_MAX_LEVEL];
158 int nr_perflvl;
159 struct nouveau_pm_temp_sensor_constants sensor_constants;
160 struct nouveau_pm_threshold_temp threshold_temp;
161
162 struct nouveau_pm_profile *profile_ac;
163 struct nouveau_pm_profile *profile_dc;
164 struct nouveau_pm_profile *profile;
165 struct list_head profiles;
166
167 struct nouveau_pm_level boot;
168 struct nouveau_pm_level *cur;
169
170 struct device *hwmon;
171 struct notifier_block acpi_nb;
172
173 int (*clocks_get)(struct drm_device *, struct nouveau_pm_level *);
174 void *(*clocks_pre)(struct drm_device *, struct nouveau_pm_level *);
175 int (*clocks_set)(struct drm_device *, void *);
176
177 int (*voltage_get)(struct drm_device *);
178 int (*voltage_set)(struct drm_device *, int voltage);
179};
180
181static inline struct nouveau_pm *
182nouveau_pm(struct drm_device *dev)
183{
184 return nouveau_drm(dev)->pm;
185}
186
187struct nouveau_mem_exec_func {
188 struct drm_device *dev;
189 void (*precharge)(struct nouveau_mem_exec_func *);
190 void (*refresh)(struct nouveau_mem_exec_func *);
191 void (*refresh_auto)(struct nouveau_mem_exec_func *, bool);
192 void (*refresh_self)(struct nouveau_mem_exec_func *, bool);
193 void (*wait)(struct nouveau_mem_exec_func *, u32 nsec);
194 u32 (*mrg)(struct nouveau_mem_exec_func *, int mr);
195 void (*mrs)(struct nouveau_mem_exec_func *, int mr, u32 data);
196 void (*clock_set)(struct nouveau_mem_exec_func *);
197 void (*timing_set)(struct nouveau_mem_exec_func *);
198 void *priv;
199};
200
201/* nouveau_mem.c */
202int nouveau_mem_exec(struct nouveau_mem_exec_func *,
203 struct nouveau_pm_level *);
204
205/* nouveau_pm.c */
206int nouveau_pm_init(struct drm_device *dev);
207void nouveau_pm_fini(struct drm_device *dev);
208void nouveau_pm_resume(struct drm_device *dev);
209extern const struct nouveau_pm_profile_func nouveau_pm_static_profile_func;
210void nouveau_pm_trigger(struct drm_device *dev);
211
212/* nouveau_volt.c */
213void nouveau_volt_init(struct drm_device *);
214void nouveau_volt_fini(struct drm_device *);
215int nouveau_volt_vid_lookup(struct drm_device *, int voltage);
216int nouveau_volt_lvl_lookup(struct drm_device *, int vid);
217int nouveau_voltage_gpio_get(struct drm_device *);
218int nouveau_voltage_gpio_set(struct drm_device *, int voltage);
219
220/* nouveau_perf.c */
221void nouveau_perf_init(struct drm_device *);
222void nouveau_perf_fini(struct drm_device *);
223u8 *nouveau_perf_rammap(struct drm_device *, u32 freq, u8 *ver,
224 u8 *hdr, u8 *cnt, u8 *len);
225u8 *nouveau_perf_ramcfg(struct drm_device *, u32 freq, u8 *ver, u8 *len);
226u8 *nouveau_perf_timing(struct drm_device *, u32 freq, u8 *ver, u8 *len);
227
228/* nouveau_mem.c */
229void nouveau_mem_timing_init(struct drm_device *);
230void nouveau_mem_timing_fini(struct drm_device *);
231
232/* nv04_pm.c */
233int nv04_pm_clocks_get(struct drm_device *, struct nouveau_pm_level *);
234void *nv04_pm_clocks_pre(struct drm_device *, struct nouveau_pm_level *);
235int nv04_pm_clocks_set(struct drm_device *, void *);
236
237/* nv40_pm.c */
238int nv40_pm_clocks_get(struct drm_device *, struct nouveau_pm_level *);
239void *nv40_pm_clocks_pre(struct drm_device *, struct nouveau_pm_level *);
240int nv40_pm_clocks_set(struct drm_device *, void *);
241int nv40_pm_pwm_get(struct drm_device *, int, u32 *, u32 *);
242int nv40_pm_pwm_set(struct drm_device *, int, u32, u32);
243
244/* nv50_pm.c */
245int nv50_pm_clocks_get(struct drm_device *, struct nouveau_pm_level *);
246void *nv50_pm_clocks_pre(struct drm_device *, struct nouveau_pm_level *);
247int nv50_pm_clocks_set(struct drm_device *, void *);
248int nv50_pm_pwm_get(struct drm_device *, int, u32 *, u32 *);
249int nv50_pm_pwm_set(struct drm_device *, int, u32, u32);
250
251/* nva3_pm.c */
252int nva3_pm_clocks_get(struct drm_device *, struct nouveau_pm_level *);
253void *nva3_pm_clocks_pre(struct drm_device *, struct nouveau_pm_level *);
254int nva3_pm_clocks_set(struct drm_device *, void *);
255
256/* nvc0_pm.c */
257int nvc0_pm_clocks_get(struct drm_device *, struct nouveau_pm_level *);
258void *nvc0_pm_clocks_pre(struct drm_device *, struct nouveau_pm_level *);
259int nvc0_pm_clocks_set(struct drm_device *, void *);
260
261/* nouveau_mem.c */
262int nouveau_mem_timing_calc(struct drm_device *, u32 freq,
263 struct nouveau_pm_memtiming *);
264void nouveau_mem_timing_read(struct drm_device *,
265 struct nouveau_pm_memtiming *);
266
267static inline int
268nva3_calc_pll(struct drm_device *dev, struct nvbios_pll *pll, u32 freq,
269 int *N, int *fN, int *M, int *P)
270{
271 struct nouveau_device *device = nouveau_dev(dev);
272 struct nouveau_clock *clk = nouveau_clock(device);
273 struct nouveau_pll_vals pv;
274 int ret;
275
276 ret = clk->pll_calc(clk, pll, freq, &pv);
277 *N = pv.N1;
278 *M = pv.M1;
279 *P = pv.log2P;
280 return ret;
281}
282
283#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_sysfs.c b/drivers/gpu/drm/nouveau/nouveau_sysfs.c
new file mode 100644
index 000000000000..89201a17ce75
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_sysfs.c
@@ -0,0 +1,162 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */
24
25#include "nouveau_sysfs.h"
26
27#include <core/object.h>
28#include <core/class.h>
29
30static inline struct drm_device *
31drm_device(struct device *d)
32{
33 return pci_get_drvdata(to_pci_dev(d));
34}
35
36#define snappendf(p,r,f,a...) do { \
37 snprintf(p, r, f, ##a); \
38 r -= strlen(p); \
39 p += strlen(p); \
40} while(0)
41
42static ssize_t
43nouveau_sysfs_pstate_get(struct device *d, struct device_attribute *a, char *b)
44{
45 struct nouveau_sysfs *sysfs = nouveau_sysfs(drm_device(d));
46 struct nv_control_pstate_info info;
47 size_t cnt = PAGE_SIZE;
48 char *buf = b;
49 int ret, i;
50
51 ret = nv_exec(sysfs->ctrl, NV_CONTROL_PSTATE_INFO, &info, sizeof(info));
52 if (ret)
53 return ret;
54
55 for (i = 0; i < info.count + 1; i++) {
56 const s32 state = i < info.count ? i :
57 NV_CONTROL_PSTATE_ATTR_STATE_CURRENT;
58 struct nv_control_pstate_attr attr = {
59 .state = state,
60 .index = 0,
61 };
62
63 ret = nv_exec(sysfs->ctrl, NV_CONTROL_PSTATE_ATTR,
64 &attr, sizeof(attr));
65 if (ret)
66 return ret;
67
68 if (i < info.count)
69 snappendf(buf, cnt, "%02x:", attr.state);
70 else
71 snappendf(buf, cnt, "--:");
72
73 attr.index = 0;
74 do {
75 attr.state = state;
76 ret = nv_exec(sysfs->ctrl, NV_CONTROL_PSTATE_ATTR,
77 &attr, sizeof(attr));
78 if (ret)
79 return ret;
80
81 snappendf(buf, cnt, " %s %d", attr.name, attr.min);
82 if (attr.min != attr.max)
83 snappendf(buf, cnt, "-%d", attr.max);
84 snappendf(buf, cnt, " %s", attr.unit);
85 } while (attr.index);
86
87 if ((state >= 0 && info.pstate == state) ||
88 (state < 0 && info.ustate < 0))
89 snappendf(buf, cnt, " *");
90 snappendf(buf, cnt, "\n");
91 }
92
93 return strlen(b);
94}
95
96static ssize_t
97nouveau_sysfs_pstate_set(struct device *d, struct device_attribute *a,
98 const char *buf, size_t count)
99{
100 struct nouveau_sysfs *sysfs = nouveau_sysfs(drm_device(d));
101 struct nv_control_pstate_user args;
102 long value, ret;
103 char *tmp;
104
105 if ((tmp = strchr(buf, '\n')))
106 *tmp = '\0';
107
108 if (!strcasecmp(buf, "none"))
109 args.state = NV_CONTROL_PSTATE_USER_STATE_UNKNOWN;
110 else
111 if (!strcasecmp(buf, "auto"))
112 args.state = NV_CONTROL_PSTATE_USER_STATE_PERFMON;
113 else {
114 ret = kstrtol(buf, 16, &value);
115 if (ret)
116 return ret;
117 args.state = value;
118 }
119
120 ret = nv_exec(sysfs->ctrl, NV_CONTROL_PSTATE_USER, &args, sizeof(args));
121 if (ret < 0)
122 return ret;
123
124 return count;
125}
126
127static DEVICE_ATTR(pstate, S_IRUGO | S_IWUSR,
128 nouveau_sysfs_pstate_get, nouveau_sysfs_pstate_set);
129
130void
131nouveau_sysfs_fini(struct drm_device *dev)
132{
133 struct nouveau_sysfs *sysfs = nouveau_sysfs(dev);
134 struct nouveau_drm *drm = nouveau_drm(dev);
135
136 if (sysfs->ctrl) {
137 device_remove_file(&dev->pdev->dev, &dev_attr_pstate);
138 nouveau_object_del(nv_object(drm), NVDRM_DEVICE, NVDRM_CONTROL);
139 }
140
141 drm->sysfs = NULL;
142 kfree(sysfs);
143}
144
145int
146nouveau_sysfs_init(struct drm_device *dev)
147{
148 struct nouveau_drm *drm = nouveau_drm(dev);
149 struct nouveau_sysfs *sysfs;
150 int ret;
151
152 sysfs = drm->sysfs = kzalloc(sizeof(*sysfs), GFP_KERNEL);
153 if (!sysfs)
154 return -ENOMEM;
155
156 ret = nouveau_object_new(nv_object(drm), NVDRM_DEVICE, NVDRM_CONTROL,
157 NV_CONTROL_CLASS, NULL, 0, &sysfs->ctrl);
158 if (ret == 0)
159 device_create_file(&dev->pdev->dev, &dev_attr_pstate);
160
161 return 0;
162}
diff --git a/drivers/gpu/drm/nouveau/nouveau_sysfs.h b/drivers/gpu/drm/nouveau/nouveau_sysfs.h
new file mode 100644
index 000000000000..74b47f1e01ed
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_sysfs.h
@@ -0,0 +1,19 @@
1#ifndef __NOUVEAU_SYSFS_H__
2#define __NOUVEAU_SYSFS_H__
3
4#include "nouveau_drm.h"
5
6struct nouveau_sysfs {
7 struct nouveau_object *ctrl;
8};
9
10static inline struct nouveau_sysfs *
11nouveau_sysfs(struct drm_device *dev)
12{
13 return nouveau_drm(dev)->sysfs;
14}
15
16int nouveau_sysfs_init(struct drm_device *);
17void nouveau_sysfs_fini(struct drm_device *);
18
19#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_volt.c b/drivers/gpu/drm/nouveau/nouveau_volt.c
deleted file mode 100644
index 9976414cbe50..000000000000
--- a/drivers/gpu/drm/nouveau/nouveau_volt.c
+++ /dev/null
@@ -1,250 +0,0 @@
1/*
2 * Copyright 2010 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <drm/drmP.h>
26
27#include "nouveau_drm.h"
28#include "nouveau_pm.h"
29
30#include <subdev/bios/gpio.h>
31#include <subdev/gpio.h>
32
33static const enum dcb_gpio_func_name vidtag[] = { 0x04, 0x05, 0x06, 0x1a, 0x73 };
34static int nr_vidtag = sizeof(vidtag) / sizeof(vidtag[0]);
35
36int
37nouveau_voltage_gpio_get(struct drm_device *dev)
38{
39 struct nouveau_pm_voltage *volt = &nouveau_pm(dev)->voltage;
40 struct nouveau_device *device = nouveau_dev(dev);
41 struct nouveau_gpio *gpio = nouveau_gpio(device);
42 u8 vid = 0;
43 int i;
44
45 for (i = 0; i < nr_vidtag; i++) {
46 if (!(volt->vid_mask & (1 << i)))
47 continue;
48
49 vid |= gpio->get(gpio, 0, vidtag[i], 0xff) << i;
50 }
51
52 return nouveau_volt_lvl_lookup(dev, vid);
53}
54
55int
56nouveau_voltage_gpio_set(struct drm_device *dev, int voltage)
57{
58 struct nouveau_device *device = nouveau_dev(dev);
59 struct nouveau_gpio *gpio = nouveau_gpio(device);
60 struct nouveau_pm_voltage *volt = &nouveau_pm(dev)->voltage;
61 int vid, i;
62
63 vid = nouveau_volt_vid_lookup(dev, voltage);
64 if (vid < 0)
65 return vid;
66
67 for (i = 0; i < nr_vidtag; i++) {
68 if (!(volt->vid_mask & (1 << i)))
69 continue;
70
71 gpio->set(gpio, 0, vidtag[i], 0xff, !!(vid & (1 << i)));
72 }
73
74 return 0;
75}
76
77int
78nouveau_volt_vid_lookup(struct drm_device *dev, int voltage)
79{
80 struct nouveau_pm_voltage *volt = &nouveau_pm(dev)->voltage;
81 int i;
82
83 for (i = 0; i < volt->nr_level; i++) {
84 if (volt->level[i].voltage == voltage)
85 return volt->level[i].vid;
86 }
87
88 return -ENOENT;
89}
90
91int
92nouveau_volt_lvl_lookup(struct drm_device *dev, int vid)
93{
94 struct nouveau_pm_voltage *volt = &nouveau_pm(dev)->voltage;
95 int i;
96
97 for (i = 0; i < volt->nr_level; i++) {
98 if (volt->level[i].vid == vid)
99 return volt->level[i].voltage;
100 }
101
102 return -ENOENT;
103}
104
105void
106nouveau_volt_init(struct drm_device *dev)
107{
108 struct nouveau_drm *drm = nouveau_drm(dev);
109 struct nouveau_gpio *gpio = nouveau_gpio(drm->device);
110 struct nouveau_pm *pm = nouveau_pm(dev);
111 struct nouveau_pm_voltage *voltage = &pm->voltage;
112 struct nvbios *bios = &drm->vbios;
113 struct dcb_gpio_func func;
114 struct bit_entry P;
115 u8 *volt = NULL, *entry;
116 int i, headerlen, recordlen, entries, vidmask, vidshift;
117
118 if (bios->type == NVBIOS_BIT) {
119 if (bit_table(dev, 'P', &P))
120 return;
121
122 if (P.version == 1)
123 volt = ROMPTR(dev, P.data[16]);
124 else
125 if (P.version == 2)
126 volt = ROMPTR(dev, P.data[12]);
127 else {
128 NV_WARN(drm, "unknown volt for BIT P %d\n", P.version);
129 }
130 } else {
131 if (bios->data[bios->offset + 6] < 0x27) {
132 NV_DEBUG(drm, "BMP version too old for voltage\n");
133 return;
134 }
135
136 volt = ROMPTR(dev, bios->data[bios->offset + 0x98]);
137 }
138
139 if (!volt) {
140 NV_DEBUG(drm, "voltage table pointer invalid\n");
141 return;
142 }
143
144 switch (volt[0]) {
145 case 0x10:
146 case 0x11:
147 case 0x12:
148 headerlen = 5;
149 recordlen = volt[1];
150 entries = volt[2];
151 vidshift = 0;
152 vidmask = volt[4];
153 break;
154 case 0x20:
155 headerlen = volt[1];
156 recordlen = volt[3];
157 entries = volt[2];
158 vidshift = 0; /* could be vidshift like 0x30? */
159 vidmask = volt[5];
160 break;
161 case 0x30:
162 headerlen = volt[1];
163 recordlen = volt[2];
164 entries = volt[3];
165 vidmask = volt[4];
166 /* no longer certain what volt[5] is, if it's related to
167 * the vid shift then it's definitely not a function of
168 * how many bits are set.
169 *
170 * after looking at a number of nva3+ vbios images, they
171 * all seem likely to have a static shift of 2.. lets
172 * go with that for now until proven otherwise.
173 */
174 vidshift = 2;
175 break;
176 case 0x40:
177 headerlen = volt[1];
178 recordlen = volt[2];
179 entries = volt[3]; /* not a clue what the entries are for.. */
180 vidmask = volt[11]; /* guess.. */
181 vidshift = 0;
182 break;
183 default:
184 NV_WARN(drm, "voltage table 0x%02x unknown\n", volt[0]);
185 return;
186 }
187
188 /* validate vid mask */
189 voltage->vid_mask = vidmask;
190 if (!voltage->vid_mask)
191 return;
192
193 i = 0;
194 while (vidmask) {
195 if (i > nr_vidtag) {
196 NV_DEBUG(drm, "vid bit %d unknown\n", i);
197 return;
198 }
199
200 if (gpio && gpio->find(gpio, 0, vidtag[i], 0xff, &func)) {
201 NV_DEBUG(drm, "vid bit %d has no gpio tag\n", i);
202 return;
203 }
204
205 vidmask >>= 1;
206 i++;
207 }
208
209 /* parse vbios entries into common format */
210 voltage->version = volt[0];
211 if (voltage->version < 0x40) {
212 voltage->nr_level = entries;
213 voltage->level =
214 kcalloc(entries, sizeof(*voltage->level), GFP_KERNEL);
215 if (!voltage->level)
216 return;
217
218 entry = volt + headerlen;
219 for (i = 0; i < entries; i++, entry += recordlen) {
220 voltage->level[i].voltage = entry[0] * 10000;
221 voltage->level[i].vid = entry[1] >> vidshift;
222 }
223 } else {
224 u32 volt_uv = ROM32(volt[4]);
225 s16 step_uv = ROM16(volt[8]);
226 u8 vid;
227
228 voltage->nr_level = voltage->vid_mask + 1;
229 voltage->level = kcalloc(voltage->nr_level,
230 sizeof(*voltage->level), GFP_KERNEL);
231 if (!voltage->level)
232 return;
233
234 for (vid = 0; vid <= voltage->vid_mask; vid++) {
235 voltage->level[vid].voltage = volt_uv;
236 voltage->level[vid].vid = vid;
237 volt_uv += step_uv;
238 }
239 }
240
241 voltage->supported = true;
242}
243
244void
245nouveau_volt_fini(struct drm_device *dev)
246{
247 struct nouveau_pm_voltage *volt = &nouveau_pm(dev)->voltage;
248
249 kfree(volt->level);
250}
diff --git a/drivers/gpu/drm/nouveau/nv04_pm.c b/drivers/gpu/drm/nouveau/nv04_pm.c
deleted file mode 100644
index 27afc0ea28b0..000000000000
--- a/drivers/gpu/drm/nouveau/nv04_pm.c
+++ /dev/null
@@ -1,146 +0,0 @@
1/*
2 * Copyright 2010 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <drm/drmP.h>
26#include "nouveau_drm.h"
27#include "nouveau_reg.h"
28#include "dispnv04/hw.h"
29#include "nouveau_pm.h"
30
31#include <subdev/bios/pll.h>
32#include <subdev/clock.h>
33#include <subdev/timer.h>
34
35int
36nv04_pm_clocks_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
37{
38 int ret;
39
40 ret = nouveau_hw_get_clock(dev, PLL_CORE);
41 if (ret < 0)
42 return ret;
43 perflvl->core = ret;
44
45 ret = nouveau_hw_get_clock(dev, PLL_MEMORY);
46 if (ret < 0)
47 return ret;
48 perflvl->memory = ret;
49
50 return 0;
51}
52
53struct nv04_pm_clock {
54 struct nvbios_pll pll;
55 struct nouveau_pll_vals calc;
56};
57
58struct nv04_pm_state {
59 struct nv04_pm_clock core;
60 struct nv04_pm_clock memory;
61};
62
63static int
64calc_pll(struct drm_device *dev, u32 id, int khz, struct nv04_pm_clock *clk)
65{
66 struct nouveau_device *device = nouveau_dev(dev);
67 struct nouveau_bios *bios = nouveau_bios(device);
68 struct nouveau_clock *pclk = nouveau_clock(device);
69 int ret;
70
71 ret = nvbios_pll_parse(bios, id, &clk->pll);
72 if (ret)
73 return ret;
74
75 ret = pclk->pll_calc(pclk, &clk->pll, khz, &clk->calc);
76 if (!ret)
77 return -EINVAL;
78
79 return 0;
80}
81
82void *
83nv04_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
84{
85 struct nv04_pm_state *info;
86 int ret;
87
88 info = kzalloc(sizeof(*info), GFP_KERNEL);
89 if (!info)
90 return ERR_PTR(-ENOMEM);
91
92 ret = calc_pll(dev, PLL_CORE, perflvl->core, &info->core);
93 if (ret)
94 goto error;
95
96 if (perflvl->memory) {
97 ret = calc_pll(dev, PLL_MEMORY, perflvl->memory, &info->memory);
98 if (ret)
99 goto error;
100 }
101
102 return info;
103error:
104 kfree(info);
105 return ERR_PTR(ret);
106}
107
108static void
109prog_pll(struct drm_device *dev, struct nv04_pm_clock *clk)
110{
111 struct nouveau_device *device = nouveau_dev(dev);
112 struct nouveau_clock *pclk = nouveau_clock(device);
113 u32 reg = clk->pll.reg;
114
115 /* thank the insane nouveau_hw_setpll() interface for this */
116 if (device->card_type >= NV_40)
117 reg += 4;
118
119 pclk->pll_prog(pclk, reg, &clk->calc);
120}
121
122int
123nv04_pm_clocks_set(struct drm_device *dev, void *pre_state)
124{
125 struct nouveau_device *device = nouveau_dev(dev);
126 struct nouveau_timer *ptimer = nouveau_timer(device);
127 struct nv04_pm_state *state = pre_state;
128
129 prog_pll(dev, &state->core);
130
131 if (state->memory.pll.reg) {
132 prog_pll(dev, &state->memory);
133 if (device->card_type < NV_30) {
134 if (device->card_type == NV_20)
135 nv_mask(device, 0x1002c4, 0, 1 << 20);
136
137 /* Reset the DLLs */
138 nv_mask(device, 0x1002c0, 0, 1 << 8);
139 }
140 }
141
142 nv_ofuncs(ptimer)->init(nv_object(ptimer));
143
144 kfree(state);
145 return 0;
146}
diff --git a/drivers/gpu/drm/nouveau/nv40_pm.c b/drivers/gpu/drm/nouveau/nv40_pm.c
deleted file mode 100644
index 625f80d53dc2..000000000000
--- a/drivers/gpu/drm/nouveau/nv40_pm.c
+++ /dev/null
@@ -1,353 +0,0 @@
1/*
2 * Copyright 2011 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <drm/drmP.h>
26#include "nouveau_drm.h"
27#include "nouveau_bios.h"
28#include "nouveau_pm.h"
29#include "dispnv04/hw.h"
30
31#include <subdev/bios/pll.h>
32#include <subdev/clock.h>
33#include <subdev/timer.h>
34
35#include <engine/fifo.h>
36
37#define min2(a,b) ((a) < (b) ? (a) : (b))
38
39static u32
40read_pll_1(struct drm_device *dev, u32 reg)
41{
42 struct nouveau_device *device = nouveau_dev(dev);
43 u32 ctrl = nv_rd32(device, reg + 0x00);
44 int P = (ctrl & 0x00070000) >> 16;
45 int N = (ctrl & 0x0000ff00) >> 8;
46 int M = (ctrl & 0x000000ff) >> 0;
47 u32 ref = 27000, clk = 0;
48
49 if (ctrl & 0x80000000)
50 clk = ref * N / M;
51
52 return clk >> P;
53}
54
55static u32
56read_pll_2(struct drm_device *dev, u32 reg)
57{
58 struct nouveau_device *device = nouveau_dev(dev);
59 u32 ctrl = nv_rd32(device, reg + 0x00);
60 u32 coef = nv_rd32(device, reg + 0x04);
61 int N2 = (coef & 0xff000000) >> 24;
62 int M2 = (coef & 0x00ff0000) >> 16;
63 int N1 = (coef & 0x0000ff00) >> 8;
64 int M1 = (coef & 0x000000ff) >> 0;
65 int P = (ctrl & 0x00070000) >> 16;
66 u32 ref = 27000, clk = 0;
67
68 if ((ctrl & 0x80000000) && M1) {
69 clk = ref * N1 / M1;
70 if ((ctrl & 0x40000100) == 0x40000000) {
71 if (M2)
72 clk = clk * N2 / M2;
73 else
74 clk = 0;
75 }
76 }
77
78 return clk >> P;
79}
80
81static u32
82read_clk(struct drm_device *dev, u32 src)
83{
84 switch (src) {
85 case 3:
86 return read_pll_2(dev, 0x004000);
87 case 2:
88 return read_pll_1(dev, 0x004008);
89 default:
90 break;
91 }
92
93 return 0;
94}
95
96int
97nv40_pm_clocks_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
98{
99 struct nouveau_device *device = nouveau_dev(dev);
100 u32 ctrl = nv_rd32(device, 0x00c040);
101
102 perflvl->core = read_clk(dev, (ctrl & 0x00000003) >> 0);
103 perflvl->shader = read_clk(dev, (ctrl & 0x00000030) >> 4);
104 perflvl->memory = read_pll_2(dev, 0x4020);
105 return 0;
106}
107
108struct nv40_pm_state {
109 u32 ctrl;
110 u32 npll_ctrl;
111 u32 npll_coef;
112 u32 spll;
113 u32 mpll_ctrl;
114 u32 mpll_coef;
115};
116
117static int
118nv40_calc_pll(struct drm_device *dev, u32 reg, struct nvbios_pll *pll,
119 u32 clk, int *N1, int *M1, int *N2, int *M2, int *log2P)
120{
121 struct nouveau_device *device = nouveau_dev(dev);
122 struct nouveau_bios *bios = nouveau_bios(device);
123 struct nouveau_clock *pclk = nouveau_clock(device);
124 struct nouveau_pll_vals coef;
125 int ret;
126
127 ret = nvbios_pll_parse(bios, reg, pll);
128 if (ret)
129 return ret;
130
131 if (clk < pll->vco1.max_freq)
132 pll->vco2.max_freq = 0;
133
134 ret = pclk->pll_calc(pclk, pll, clk, &coef);
135 if (ret == 0)
136 return -ERANGE;
137
138 *N1 = coef.N1;
139 *M1 = coef.M1;
140 if (N2 && M2) {
141 if (pll->vco2.max_freq) {
142 *N2 = coef.N2;
143 *M2 = coef.M2;
144 } else {
145 *N2 = 1;
146 *M2 = 1;
147 }
148 }
149 *log2P = coef.log2P;
150 return 0;
151}
152
153void *
154nv40_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
155{
156 struct nv40_pm_state *info;
157 struct nvbios_pll pll;
158 int N1, N2, M1, M2, log2P;
159 int ret;
160
161 info = kmalloc(sizeof(*info), GFP_KERNEL);
162 if (!info)
163 return ERR_PTR(-ENOMEM);
164
165 /* core/geometric clock */
166 ret = nv40_calc_pll(dev, 0x004000, &pll, perflvl->core,
167 &N1, &M1, &N2, &M2, &log2P);
168 if (ret < 0)
169 goto out;
170
171 if (N2 == M2) {
172 info->npll_ctrl = 0x80000100 | (log2P << 16);
173 info->npll_coef = (N1 << 8) | M1;
174 } else {
175 info->npll_ctrl = 0xc0000000 | (log2P << 16);
176 info->npll_coef = (N2 << 24) | (M2 << 16) | (N1 << 8) | M1;
177 }
178
179 /* use the second PLL for shader/rop clock, if it differs from core */
180 if (perflvl->shader && perflvl->shader != perflvl->core) {
181 ret = nv40_calc_pll(dev, 0x004008, &pll, perflvl->shader,
182 &N1, &M1, NULL, NULL, &log2P);
183 if (ret < 0)
184 goto out;
185
186 info->spll = 0xc0000000 | (log2P << 16) | (N1 << 8) | M1;
187 info->ctrl = 0x00000223;
188 } else {
189 info->spll = 0x00000000;
190 info->ctrl = 0x00000333;
191 }
192
193 /* memory clock */
194 if (!perflvl->memory) {
195 info->mpll_ctrl = 0x00000000;
196 goto out;
197 }
198
199 ret = nv40_calc_pll(dev, 0x004020, &pll, perflvl->memory,
200 &N1, &M1, &N2, &M2, &log2P);
201 if (ret < 0)
202 goto out;
203
204 info->mpll_ctrl = 0x80000000 | (log2P << 16);
205 info->mpll_ctrl |= min2(pll.bias_p + log2P, pll.max_p) << 20;
206 if (N2 == M2) {
207 info->mpll_ctrl |= 0x00000100;
208 info->mpll_coef = (N1 << 8) | M1;
209 } else {
210 info->mpll_ctrl |= 0x40000000;
211 info->mpll_coef = (N2 << 24) | (M2 << 16) | (N1 << 8) | M1;
212 }
213
214out:
215 if (ret < 0) {
216 kfree(info);
217 info = ERR_PTR(ret);
218 }
219 return info;
220}
221
222static bool
223nv40_pm_gr_idle(void *data)
224{
225 struct drm_device *dev = data;
226 struct nouveau_device *device = nouveau_dev(dev);
227
228 if ((nv_rd32(device, 0x400760) & 0x000000f0) >> 4 !=
229 (nv_rd32(device, 0x400760) & 0x0000000f))
230 return false;
231
232 if (nv_rd32(device, 0x400700))
233 return false;
234
235 return true;
236}
237
238int
239nv40_pm_clocks_set(struct drm_device *dev, void *pre_state)
240{
241 struct nouveau_device *device = nouveau_dev(dev);
242 struct nouveau_fifo *pfifo = nouveau_fifo(device);
243 struct nouveau_drm *drm = nouveau_drm(dev);
244 struct nv40_pm_state *info = pre_state;
245 unsigned long flags;
246 struct bit_entry M;
247 u32 crtc_mask = 0;
248 u8 sr1[2];
249 int i, ret = -EAGAIN;
250
251 /* determine which CRTCs are active, fetch VGA_SR1 for each */
252 for (i = 0; i < 2; i++) {
253 u32 vbl = nv_rd32(device, 0x600808 + (i * 0x2000));
254 u32 cnt = 0;
255 do {
256 if (vbl != nv_rd32(device, 0x600808 + (i * 0x2000))) {
257 nv_wr08(device, 0x0c03c4 + (i * 0x2000), 0x01);
258 sr1[i] = nv_rd08(device, 0x0c03c5 + (i * 0x2000));
259 if (!(sr1[i] & 0x20))
260 crtc_mask |= (1 << i);
261 break;
262 }
263 udelay(1);
264 } while (cnt++ < 32);
265 }
266
267 /* halt and idle engines */
268 pfifo->pause(pfifo, &flags);
269
270 if (!nv_wait_cb(device, nv40_pm_gr_idle, dev))
271 goto resume;
272
273 ret = 0;
274
275 /* set engine clocks */
276 nv_mask(device, 0x00c040, 0x00000333, 0x00000000);
277 nv_wr32(device, 0x004004, info->npll_coef);
278 nv_mask(device, 0x004000, 0xc0070100, info->npll_ctrl);
279 nv_mask(device, 0x004008, 0xc007ffff, info->spll);
280 mdelay(5);
281 nv_mask(device, 0x00c040, 0x00000333, info->ctrl);
282
283 if (!info->mpll_ctrl)
284 goto resume;
285
286 /* wait for vblank start on active crtcs, disable memory access */
287 for (i = 0; i < 2; i++) {
288 if (!(crtc_mask & (1 << i)))
289 continue;
290 nv_wait(device, 0x600808 + (i * 0x2000), 0x00010000, 0x00000000);
291 nv_wait(device, 0x600808 + (i * 0x2000), 0x00010000, 0x00010000);
292 nv_wr08(device, 0x0c03c4 + (i * 0x2000), 0x01);
293 nv_wr08(device, 0x0c03c5 + (i * 0x2000), sr1[i] | 0x20);
294 }
295
296 /* prepare ram for reclocking */
297 nv_wr32(device, 0x1002d4, 0x00000001); /* precharge */
298 nv_wr32(device, 0x1002d0, 0x00000001); /* refresh */
299 nv_wr32(device, 0x1002d0, 0x00000001); /* refresh */
300 nv_mask(device, 0x100210, 0x80000000, 0x00000000); /* no auto refresh */
301 nv_wr32(device, 0x1002dc, 0x00000001); /* enable self-refresh */
302
303 /* change the PLL of each memory partition */
304 nv_mask(device, 0x00c040, 0x0000c000, 0x00000000);
305 switch (nv_device(drm->device)->chipset) {
306 case 0x40:
307 case 0x45:
308 case 0x41:
309 case 0x42:
310 case 0x47:
311 nv_mask(device, 0x004044, 0xc0771100, info->mpll_ctrl);
312 nv_mask(device, 0x00402c, 0xc0771100, info->mpll_ctrl);
313 nv_wr32(device, 0x004048, info->mpll_coef);
314 nv_wr32(device, 0x004030, info->mpll_coef);
315 case 0x43:
316 case 0x49:
317 case 0x4b:
318 nv_mask(device, 0x004038, 0xc0771100, info->mpll_ctrl);
319 nv_wr32(device, 0x00403c, info->mpll_coef);
320 default:
321 nv_mask(device, 0x004020, 0xc0771100, info->mpll_ctrl);
322 nv_wr32(device, 0x004024, info->mpll_coef);
323 break;
324 }
325 udelay(100);
326 nv_mask(device, 0x00c040, 0x0000c000, 0x0000c000);
327
328 /* re-enable normal operation of memory controller */
329 nv_wr32(device, 0x1002dc, 0x00000000);
330 nv_mask(device, 0x100210, 0x80000000, 0x80000000);
331 udelay(100);
332
333 /* execute memory reset script from vbios */
334 if (!bit_table(dev, 'M', &M))
335 nouveau_bios_run_init_table(dev, ROM16(M.data[0]), NULL, 0);
336
337 /* make sure we're in vblank (hopefully the same one as before), and
338 * then re-enable crtc memory access
339 */
340 for (i = 0; i < 2; i++) {
341 if (!(crtc_mask & (1 << i)))
342 continue;
343 nv_wait(device, 0x600808 + (i * 0x2000), 0x00010000, 0x00010000);
344 nv_wr08(device, 0x0c03c4 + (i * 0x2000), 0x01);
345 nv_wr08(device, 0x0c03c5 + (i * 0x2000), sr1[i]);
346 }
347
348 /* resume engines */
349resume:
350 pfifo->start(pfifo, &flags);
351 kfree(info);
352 return ret;
353}
diff --git a/drivers/gpu/drm/nouveau/nv50_pm.c b/drivers/gpu/drm/nouveau/nv50_pm.c
deleted file mode 100644
index 4efc33fa73fc..000000000000
--- a/drivers/gpu/drm/nouveau/nv50_pm.c
+++ /dev/null
@@ -1,855 +0,0 @@
1/*
2 * Copyright 2010 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <drm/drmP.h>
26#include "nouveau_drm.h"
27#include "nouveau_bios.h"
28#include "dispnv04/hw.h"
29#include "nouveau_pm.h"
30#include "nouveau_hwsq.h"
31
32#include "nv50_display.h"
33
34#include <subdev/bios/pll.h>
35#include <subdev/clock.h>
36#include <subdev/timer.h>
37#include <subdev/fb.h>
38
39enum clk_src {
40 clk_src_crystal,
41 clk_src_href,
42 clk_src_hclk,
43 clk_src_hclkm3,
44 clk_src_hclkm3d2,
45 clk_src_host,
46 clk_src_nvclk,
47 clk_src_sclk,
48 clk_src_mclk,
49 clk_src_vdec,
50 clk_src_dom6
51};
52
53static u32 read_clk(struct drm_device *, enum clk_src);
54
55static u32
56read_div(struct drm_device *dev)
57{
58 struct nouveau_device *device = nouveau_dev(dev);
59 struct nouveau_drm *drm = nouveau_drm(dev);
60
61 switch (nv_device(drm->device)->chipset) {
62 case 0x50: /* it exists, but only has bit 31, not the dividers.. */
63 case 0x84:
64 case 0x86:
65 case 0x98:
66 case 0xa0:
67 return nv_rd32(device, 0x004700);
68 case 0x92:
69 case 0x94:
70 case 0x96:
71 return nv_rd32(device, 0x004800);
72 default:
73 return 0x00000000;
74 }
75}
76
77static u32
78read_pll_src(struct drm_device *dev, u32 base)
79{
80 struct nouveau_device *device = nouveau_dev(dev);
81 struct nouveau_drm *drm = nouveau_drm(dev);
82 u32 coef, ref = read_clk(dev, clk_src_crystal);
83 u32 rsel = nv_rd32(device, 0x00e18c);
84 int P, N, M, id;
85
86 switch (nv_device(drm->device)->chipset) {
87 case 0x50:
88 case 0xa0:
89 switch (base) {
90 case 0x4020:
91 case 0x4028: id = !!(rsel & 0x00000004); break;
92 case 0x4008: id = !!(rsel & 0x00000008); break;
93 case 0x4030: id = 0; break;
94 default:
95 NV_ERROR(drm, "ref: bad pll 0x%06x\n", base);
96 return 0;
97 }
98
99 coef = nv_rd32(device, 0x00e81c + (id * 0x0c));
100 ref *= (coef & 0x01000000) ? 2 : 4;
101 P = (coef & 0x00070000) >> 16;
102 N = ((coef & 0x0000ff00) >> 8) + 1;
103 M = ((coef & 0x000000ff) >> 0) + 1;
104 break;
105 case 0x84:
106 case 0x86:
107 case 0x92:
108 coef = nv_rd32(device, 0x00e81c);
109 P = (coef & 0x00070000) >> 16;
110 N = (coef & 0x0000ff00) >> 8;
111 M = (coef & 0x000000ff) >> 0;
112 break;
113 case 0x94:
114 case 0x96:
115 case 0x98:
116 rsel = nv_rd32(device, 0x00c050);
117 switch (base) {
118 case 0x4020: rsel = (rsel & 0x00000003) >> 0; break;
119 case 0x4008: rsel = (rsel & 0x0000000c) >> 2; break;
120 case 0x4028: rsel = (rsel & 0x00001800) >> 11; break;
121 case 0x4030: rsel = 3; break;
122 default:
123 NV_ERROR(drm, "ref: bad pll 0x%06x\n", base);
124 return 0;
125 }
126
127 switch (rsel) {
128 case 0: id = 1; break;
129 case 1: return read_clk(dev, clk_src_crystal);
130 case 2: return read_clk(dev, clk_src_href);
131 case 3: id = 0; break;
132 }
133
134 coef = nv_rd32(device, 0x00e81c + (id * 0x28));
135 P = (nv_rd32(device, 0x00e824 + (id * 0x28)) >> 16) & 7;
136 P += (coef & 0x00070000) >> 16;
137 N = (coef & 0x0000ff00) >> 8;
138 M = (coef & 0x000000ff) >> 0;
139 break;
140 default:
141 BUG_ON(1);
142 }
143
144 if (M)
145 return (ref * N / M) >> P;
146 return 0;
147}
148
149static u32
150read_pll_ref(struct drm_device *dev, u32 base)
151{
152 struct nouveau_device *device = nouveau_dev(dev);
153 struct nouveau_drm *drm = nouveau_drm(dev);
154 u32 src, mast = nv_rd32(device, 0x00c040);
155
156 switch (base) {
157 case 0x004028:
158 src = !!(mast & 0x00200000);
159 break;
160 case 0x004020:
161 src = !!(mast & 0x00400000);
162 break;
163 case 0x004008:
164 src = !!(mast & 0x00010000);
165 break;
166 case 0x004030:
167 src = !!(mast & 0x02000000);
168 break;
169 case 0x00e810:
170 return read_clk(dev, clk_src_crystal);
171 default:
172 NV_ERROR(drm, "bad pll 0x%06x\n", base);
173 return 0;
174 }
175
176 if (src)
177 return read_clk(dev, clk_src_href);
178 return read_pll_src(dev, base);
179}
180
181static u32
182read_pll(struct drm_device *dev, u32 base)
183{
184 struct nouveau_device *device = nouveau_dev(dev);
185 struct nouveau_drm *drm = nouveau_drm(dev);
186 u32 mast = nv_rd32(device, 0x00c040);
187 u32 ctrl = nv_rd32(device, base + 0);
188 u32 coef = nv_rd32(device, base + 4);
189 u32 ref = read_pll_ref(dev, base);
190 u32 clk = 0;
191 int N1, N2, M1, M2;
192
193 if (base == 0x004028 && (mast & 0x00100000)) {
194 /* wtf, appears to only disable post-divider on nva0 */
195 if (nv_device(drm->device)->chipset != 0xa0)
196 return read_clk(dev, clk_src_dom6);
197 }
198
199 N2 = (coef & 0xff000000) >> 24;
200 M2 = (coef & 0x00ff0000) >> 16;
201 N1 = (coef & 0x0000ff00) >> 8;
202 M1 = (coef & 0x000000ff);
203 if ((ctrl & 0x80000000) && M1) {
204 clk = ref * N1 / M1;
205 if ((ctrl & 0x40000100) == 0x40000000) {
206 if (M2)
207 clk = clk * N2 / M2;
208 else
209 clk = 0;
210 }
211 }
212
213 return clk;
214}
215
216static u32
217read_clk(struct drm_device *dev, enum clk_src src)
218{
219 struct nouveau_device *device = nouveau_dev(dev);
220 struct nouveau_drm *drm = nouveau_drm(dev);
221 u32 mast = nv_rd32(device, 0x00c040);
222 u32 P = 0;
223
224 switch (src) {
225 case clk_src_crystal:
226 return device->crystal;
227 case clk_src_href:
228 return 100000; /* PCIE reference clock */
229 case clk_src_hclk:
230 return read_clk(dev, clk_src_href) * 27778 / 10000;
231 case clk_src_hclkm3:
232 return read_clk(dev, clk_src_hclk) * 3;
233 case clk_src_hclkm3d2:
234 return read_clk(dev, clk_src_hclk) * 3 / 2;
235 case clk_src_host:
236 switch (mast & 0x30000000) {
237 case 0x00000000: return read_clk(dev, clk_src_href);
238 case 0x10000000: break;
239 case 0x20000000: /* !0x50 */
240 case 0x30000000: return read_clk(dev, clk_src_hclk);
241 }
242 break;
243 case clk_src_nvclk:
244 if (!(mast & 0x00100000))
245 P = (nv_rd32(device, 0x004028) & 0x00070000) >> 16;
246 switch (mast & 0x00000003) {
247 case 0x00000000: return read_clk(dev, clk_src_crystal) >> P;
248 case 0x00000001: return read_clk(dev, clk_src_dom6);
249 case 0x00000002: return read_pll(dev, 0x004020) >> P;
250 case 0x00000003: return read_pll(dev, 0x004028) >> P;
251 }
252 break;
253 case clk_src_sclk:
254 P = (nv_rd32(device, 0x004020) & 0x00070000) >> 16;
255 switch (mast & 0x00000030) {
256 case 0x00000000:
257 if (mast & 0x00000080)
258 return read_clk(dev, clk_src_host) >> P;
259 return read_clk(dev, clk_src_crystal) >> P;
260 case 0x00000010: break;
261 case 0x00000020: return read_pll(dev, 0x004028) >> P;
262 case 0x00000030: return read_pll(dev, 0x004020) >> P;
263 }
264 break;
265 case clk_src_mclk:
266 P = (nv_rd32(device, 0x004008) & 0x00070000) >> 16;
267 if (nv_rd32(device, 0x004008) & 0x00000200) {
268 switch (mast & 0x0000c000) {
269 case 0x00000000:
270 return read_clk(dev, clk_src_crystal) >> P;
271 case 0x00008000:
272 case 0x0000c000:
273 return read_clk(dev, clk_src_href) >> P;
274 }
275 } else {
276 return read_pll(dev, 0x004008) >> P;
277 }
278 break;
279 case clk_src_vdec:
280 P = (read_div(dev) & 0x00000700) >> 8;
281 switch (nv_device(drm->device)->chipset) {
282 case 0x84:
283 case 0x86:
284 case 0x92:
285 case 0x94:
286 case 0x96:
287 case 0xa0:
288 switch (mast & 0x00000c00) {
289 case 0x00000000:
290 if (nv_device(drm->device)->chipset == 0xa0) /* wtf?? */
291 return read_clk(dev, clk_src_nvclk) >> P;
292 return read_clk(dev, clk_src_crystal) >> P;
293 case 0x00000400:
294 return 0;
295 case 0x00000800:
296 if (mast & 0x01000000)
297 return read_pll(dev, 0x004028) >> P;
298 return read_pll(dev, 0x004030) >> P;
299 case 0x00000c00:
300 return read_clk(dev, clk_src_nvclk) >> P;
301 }
302 break;
303 case 0x98:
304 switch (mast & 0x00000c00) {
305 case 0x00000000:
306 return read_clk(dev, clk_src_nvclk) >> P;
307 case 0x00000400:
308 return 0;
309 case 0x00000800:
310 return read_clk(dev, clk_src_hclkm3d2) >> P;
311 case 0x00000c00:
312 return read_clk(dev, clk_src_mclk) >> P;
313 }
314 break;
315 }
316 break;
317 case clk_src_dom6:
318 switch (nv_device(drm->device)->chipset) {
319 case 0x50:
320 case 0xa0:
321 return read_pll(dev, 0x00e810) >> 2;
322 case 0x84:
323 case 0x86:
324 case 0x92:
325 case 0x94:
326 case 0x96:
327 case 0x98:
328 P = (read_div(dev) & 0x00000007) >> 0;
329 switch (mast & 0x0c000000) {
330 case 0x00000000: return read_clk(dev, clk_src_href);
331 case 0x04000000: break;
332 case 0x08000000: return read_clk(dev, clk_src_hclk);
333 case 0x0c000000:
334 return read_clk(dev, clk_src_hclkm3) >> P;
335 }
336 break;
337 default:
338 break;
339 }
340 default:
341 break;
342 }
343
344 NV_DEBUG(drm, "unknown clock source %d 0x%08x\n", src, mast);
345 return 0;
346}
347
348int
349nv50_pm_clocks_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
350{
351 struct nouveau_drm *drm = nouveau_drm(dev);
352 if (nv_device(drm->device)->chipset == 0xaa ||
353 nv_device(drm->device)->chipset == 0xac)
354 return 0;
355
356 perflvl->core = read_clk(dev, clk_src_nvclk);
357 perflvl->shader = read_clk(dev, clk_src_sclk);
358 perflvl->memory = read_clk(dev, clk_src_mclk);
359 if (nv_device(drm->device)->chipset != 0x50) {
360 perflvl->vdec = read_clk(dev, clk_src_vdec);
361 perflvl->dom6 = read_clk(dev, clk_src_dom6);
362 }
363
364 return 0;
365}
366
367struct nv50_pm_state {
368 struct nouveau_pm_level *perflvl;
369 struct hwsq_ucode eclk_hwsq;
370 struct hwsq_ucode mclk_hwsq;
371 u32 mscript;
372 u32 mmast;
373 u32 mctrl;
374 u32 mcoef;
375};
376
377static u32
378calc_pll(struct drm_device *dev, u32 reg, struct nvbios_pll *pll,
379 u32 clk, int *N1, int *M1, int *log2P)
380{
381 struct nouveau_device *device = nouveau_dev(dev);
382 struct nouveau_bios *bios = nouveau_bios(device);
383 struct nouveau_clock *pclk = nouveau_clock(device);
384 struct nouveau_pll_vals coef;
385 int ret;
386
387 ret = nvbios_pll_parse(bios, reg, pll);
388 if (ret)
389 return 0;
390
391 pll->vco2.max_freq = 0;
392 pll->refclk = read_pll_ref(dev, reg);
393 if (!pll->refclk)
394 return 0;
395
396 ret = pclk->pll_calc(pclk, pll, clk, &coef);
397 if (ret == 0)
398 return 0;
399
400 *N1 = coef.N1;
401 *M1 = coef.M1;
402 *log2P = coef.log2P;
403 return ret;
404}
405
406static inline u32
407calc_div(u32 src, u32 target, int *div)
408{
409 u32 clk0 = src, clk1 = src;
410 for (*div = 0; *div <= 7; (*div)++) {
411 if (clk0 <= target) {
412 clk1 = clk0 << (*div ? 1 : 0);
413 break;
414 }
415 clk0 >>= 1;
416 }
417
418 if (target - clk0 <= clk1 - target)
419 return clk0;
420 (*div)--;
421 return clk1;
422}
423
424static inline u32
425clk_same(u32 a, u32 b)
426{
427 return ((a / 1000) == (b / 1000));
428}
429
430static void
431mclk_precharge(struct nouveau_mem_exec_func *exec)
432{
433 struct nv50_pm_state *info = exec->priv;
434 struct hwsq_ucode *hwsq = &info->mclk_hwsq;
435
436 hwsq_wr32(hwsq, 0x1002d4, 0x00000001);
437}
438
439static void
440mclk_refresh(struct nouveau_mem_exec_func *exec)
441{
442 struct nv50_pm_state *info = exec->priv;
443 struct hwsq_ucode *hwsq = &info->mclk_hwsq;
444
445 hwsq_wr32(hwsq, 0x1002d0, 0x00000001);
446}
447
448static void
449mclk_refresh_auto(struct nouveau_mem_exec_func *exec, bool enable)
450{
451 struct nv50_pm_state *info = exec->priv;
452 struct hwsq_ucode *hwsq = &info->mclk_hwsq;
453
454 hwsq_wr32(hwsq, 0x100210, enable ? 0x80000000 : 0x00000000);
455}
456
457static void
458mclk_refresh_self(struct nouveau_mem_exec_func *exec, bool enable)
459{
460 struct nv50_pm_state *info = exec->priv;
461 struct hwsq_ucode *hwsq = &info->mclk_hwsq;
462
463 hwsq_wr32(hwsq, 0x1002dc, enable ? 0x00000001 : 0x00000000);
464}
465
466static void
467mclk_wait(struct nouveau_mem_exec_func *exec, u32 nsec)
468{
469 struct nv50_pm_state *info = exec->priv;
470 struct hwsq_ucode *hwsq = &info->mclk_hwsq;
471
472 if (nsec > 1000)
473 hwsq_usec(hwsq, (nsec + 500) / 1000);
474}
475
476static u32
477mclk_mrg(struct nouveau_mem_exec_func *exec, int mr)
478{
479 struct nouveau_device *device = nouveau_dev(exec->dev);
480 if (mr <= 1)
481 return nv_rd32(device, 0x1002c0 + ((mr - 0) * 4));
482 if (mr <= 3)
483 return nv_rd32(device, 0x1002e0 + ((mr - 2) * 4));
484 return 0;
485}
486
487static void
488mclk_mrs(struct nouveau_mem_exec_func *exec, int mr, u32 data)
489{
490 struct nouveau_device *device = nouveau_dev(exec->dev);
491 struct nouveau_fb *pfb = nouveau_fb(device);
492 struct nv50_pm_state *info = exec->priv;
493 struct hwsq_ucode *hwsq = &info->mclk_hwsq;
494
495 if (mr <= 1) {
496 if (pfb->ram->ranks > 1)
497 hwsq_wr32(hwsq, 0x1002c8 + ((mr - 0) * 4), data);
498 hwsq_wr32(hwsq, 0x1002c0 + ((mr - 0) * 4), data);
499 } else
500 if (mr <= 3) {
501 if (pfb->ram->ranks > 1)
502 hwsq_wr32(hwsq, 0x1002e8 + ((mr - 2) * 4), data);
503 hwsq_wr32(hwsq, 0x1002e0 + ((mr - 2) * 4), data);
504 }
505}
506
507static void
508mclk_clock_set(struct nouveau_mem_exec_func *exec)
509{
510 struct nouveau_device *device = nouveau_dev(exec->dev);
511 struct nv50_pm_state *info = exec->priv;
512 struct hwsq_ucode *hwsq = &info->mclk_hwsq;
513 u32 ctrl = nv_rd32(device, 0x004008);
514
515 info->mmast = nv_rd32(device, 0x00c040);
516 info->mmast &= ~0xc0000000; /* get MCLK_2 from HREF */
517 info->mmast |= 0x0000c000; /* use MCLK_2 as MPLL_BYPASS clock */
518
519 hwsq_wr32(hwsq, 0xc040, info->mmast);
520 hwsq_wr32(hwsq, 0x4008, ctrl | 0x00000200); /* bypass MPLL */
521 if (info->mctrl & 0x80000000)
522 hwsq_wr32(hwsq, 0x400c, info->mcoef);
523 hwsq_wr32(hwsq, 0x4008, info->mctrl);
524}
525
526static void
527mclk_timing_set(struct nouveau_mem_exec_func *exec)
528{
529 struct nouveau_device *device = nouveau_dev(exec->dev);
530 struct nv50_pm_state *info = exec->priv;
531 struct nouveau_pm_level *perflvl = info->perflvl;
532 struct hwsq_ucode *hwsq = &info->mclk_hwsq;
533 int i;
534
535 for (i = 0; i < 9; i++) {
536 u32 reg = 0x100220 + (i * 4);
537 u32 val = nv_rd32(device, reg);
538 if (val != perflvl->timing.reg[i])
539 hwsq_wr32(hwsq, reg, perflvl->timing.reg[i]);
540 }
541}
542
543static int
544calc_mclk(struct drm_device *dev, struct nouveau_pm_level *perflvl,
545 struct nv50_pm_state *info)
546{
547 struct nouveau_drm *drm = nouveau_drm(dev);
548 struct nouveau_device *device = nouveau_dev(dev);
549 u32 crtc_mask = 0; /*XXX: nv50_display_active_crtcs(dev); */
550 struct nouveau_mem_exec_func exec = {
551 .dev = dev,
552 .precharge = mclk_precharge,
553 .refresh = mclk_refresh,
554 .refresh_auto = mclk_refresh_auto,
555 .refresh_self = mclk_refresh_self,
556 .wait = mclk_wait,
557 .mrg = mclk_mrg,
558 .mrs = mclk_mrs,
559 .clock_set = mclk_clock_set,
560 .timing_set = mclk_timing_set,
561 .priv = info
562 };
563 struct hwsq_ucode *hwsq = &info->mclk_hwsq;
564 struct nvbios_pll pll;
565 int N, M, P;
566 int ret;
567
568 /* use pcie refclock if possible, otherwise use mpll */
569 info->mctrl = nv_rd32(device, 0x004008);
570 info->mctrl &= ~0x81ff0200;
571 if (clk_same(perflvl->memory, read_clk(dev, clk_src_href))) {
572 info->mctrl |= 0x00000200 | (pll.bias_p << 19);
573 } else {
574 ret = calc_pll(dev, 0x4008, &pll, perflvl->memory, &N, &M, &P);
575 if (ret == 0)
576 return -EINVAL;
577
578 info->mctrl |= 0x80000000 | (P << 22) | (P << 16);
579 info->mctrl |= pll.bias_p << 19;
580 info->mcoef = (N << 8) | M;
581 }
582
583 /* build the ucode which will reclock the memory for us */
584 hwsq_init(hwsq);
585 if (crtc_mask) {
586 hwsq_op5f(hwsq, crtc_mask, 0x00); /* wait for scanout */
587 hwsq_op5f(hwsq, crtc_mask, 0x01); /* wait for vblank */
588 }
589 if (nv_device(drm->device)->chipset >= 0x92)
590 hwsq_wr32(hwsq, 0x611200, 0x00003300); /* disable scanout */
591 hwsq_setf(hwsq, 0x10, 0); /* disable bus access */
592 hwsq_op5f(hwsq, 0x00, 0x01); /* no idea :s */
593
594 ret = nouveau_mem_exec(&exec, perflvl);
595 if (ret)
596 return ret;
597
598 hwsq_setf(hwsq, 0x10, 1); /* enable bus access */
599 hwsq_op5f(hwsq, 0x00, 0x00); /* no idea, reverse of 0x00, 0x01? */
600 if (nv_device(drm->device)->chipset >= 0x92)
601 hwsq_wr32(hwsq, 0x611200, 0x00003330); /* enable scanout */
602 hwsq_fini(hwsq);
603 return 0;
604}
605
606void *
607nv50_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
608{
609 struct nouveau_device *device = nouveau_dev(dev);
610 struct nouveau_drm *drm = nouveau_drm(dev);
611 struct nv50_pm_state *info;
612 struct hwsq_ucode *hwsq;
613 struct nvbios_pll pll;
614 u32 out, mast, divs, ctrl;
615 int clk, ret = -EINVAL;
616 int N, M, P1, P2;
617
618 if (nv_device(drm->device)->chipset == 0xaa ||
619 nv_device(drm->device)->chipset == 0xac)
620 return ERR_PTR(-ENODEV);
621
622 info = kmalloc(sizeof(*info), GFP_KERNEL);
623 if (!info)
624 return ERR_PTR(-ENOMEM);
625 info->perflvl = perflvl;
626
627 /* memory: build hwsq ucode which we'll use to reclock memory.
628 * use pcie refclock if possible, otherwise use mpll */
629 info->mclk_hwsq.len = 0;
630 if (perflvl->memory) {
631 ret = calc_mclk(dev, perflvl, info);
632 if (ret)
633 goto error;
634 info->mscript = perflvl->memscript;
635 }
636
637 divs = read_div(dev);
638 mast = info->mmast;
639
640 /* start building HWSQ script for engine reclocking */
641 hwsq = &info->eclk_hwsq;
642 hwsq_init(hwsq);
643 hwsq_setf(hwsq, 0x10, 0); /* disable bus access */
644 hwsq_op5f(hwsq, 0x00, 0x01); /* wait for access disabled? */
645
646 /* vdec/dom6: switch to "safe" clocks temporarily */
647 if (perflvl->vdec) {
648 mast &= ~0x00000c00;
649 divs &= ~0x00000700;
650 }
651
652 if (perflvl->dom6) {
653 mast &= ~0x0c000000;
654 divs &= ~0x00000007;
655 }
656
657 hwsq_wr32(hwsq, 0x00c040, mast);
658
659 /* vdec: avoid modifying xpll until we know exactly how the other
660 * clock domains work, i suspect at least some of them can also be
661 * tied to xpll...
662 */
663 if (perflvl->vdec) {
664 /* see how close we can get using nvclk as a source */
665 clk = calc_div(perflvl->core, perflvl->vdec, &P1);
666
667 /* see how close we can get using xpll/hclk as a source */
668 if (nv_device(drm->device)->chipset != 0x98)
669 out = read_pll(dev, 0x004030);
670 else
671 out = read_clk(dev, clk_src_hclkm3d2);
672 out = calc_div(out, perflvl->vdec, &P2);
673
674 /* select whichever gets us closest */
675 if (abs((int)perflvl->vdec - clk) <=
676 abs((int)perflvl->vdec - out)) {
677 if (nv_device(drm->device)->chipset != 0x98)
678 mast |= 0x00000c00;
679 divs |= P1 << 8;
680 } else {
681 mast |= 0x00000800;
682 divs |= P2 << 8;
683 }
684 }
685
686 /* dom6: nfi what this is, but we're limited to various combinations
687 * of the host clock frequency
688 */
689 if (perflvl->dom6) {
690 if (clk_same(perflvl->dom6, read_clk(dev, clk_src_href))) {
691 mast |= 0x00000000;
692 } else
693 if (clk_same(perflvl->dom6, read_clk(dev, clk_src_hclk))) {
694 mast |= 0x08000000;
695 } else {
696 clk = read_clk(dev, clk_src_hclk) * 3;
697 clk = calc_div(clk, perflvl->dom6, &P1);
698
699 mast |= 0x0c000000;
700 divs |= P1;
701 }
702 }
703
704 /* vdec/dom6: complete switch to new clocks */
705 switch (nv_device(drm->device)->chipset) {
706 case 0x92:
707 case 0x94:
708 case 0x96:
709 hwsq_wr32(hwsq, 0x004800, divs);
710 break;
711 default:
712 hwsq_wr32(hwsq, 0x004700, divs);
713 break;
714 }
715
716 hwsq_wr32(hwsq, 0x00c040, mast);
717
718 /* core/shader: make sure sclk/nvclk are disconnected from their
719 * PLLs (nvclk to dom6, sclk to hclk)
720 */
721 if (nv_device(drm->device)->chipset < 0x92)
722 mast = (mast & ~0x001000b0) | 0x00100080;
723 else
724 mast = (mast & ~0x000000b3) | 0x00000081;
725
726 hwsq_wr32(hwsq, 0x00c040, mast);
727
728 /* core: for the moment at least, always use nvpll */
729 clk = calc_pll(dev, 0x4028, &pll, perflvl->core, &N, &M, &P1);
730 if (clk == 0)
731 goto error;
732
733 ctrl = nv_rd32(device, 0x004028) & ~0xc03f0100;
734 mast &= ~0x00100000;
735 mast |= 3;
736
737 hwsq_wr32(hwsq, 0x004028, 0x80000000 | (P1 << 19) | (P1 << 16) | ctrl);
738 hwsq_wr32(hwsq, 0x00402c, (N << 8) | M);
739
740 /* shader: tie to nvclk if possible, otherwise use spll. have to be
741 * very careful that the shader clock is at least twice the core, or
742 * some chipsets will be very unhappy. i expect most or all of these
743 * cases will be handled by tying to nvclk, but it's possible there's
744 * corners
745 */
746 ctrl = nv_rd32(device, 0x004020) & ~0xc03f0100;
747
748 if (P1-- && perflvl->shader == (perflvl->core << 1)) {
749 hwsq_wr32(hwsq, 0x004020, (P1 << 19) | (P1 << 16) | ctrl);
750 hwsq_wr32(hwsq, 0x00c040, 0x00000020 | mast);
751 } else {
752 clk = calc_pll(dev, 0x4020, &pll, perflvl->shader, &N, &M, &P1);
753 if (clk == 0)
754 goto error;
755 ctrl |= 0x80000000;
756
757 hwsq_wr32(hwsq, 0x004020, (P1 << 19) | (P1 << 16) | ctrl);
758 hwsq_wr32(hwsq, 0x004024, (N << 8) | M);
759 hwsq_wr32(hwsq, 0x00c040, 0x00000030 | mast);
760 }
761
762 hwsq_setf(hwsq, 0x10, 1); /* enable bus access */
763 hwsq_op5f(hwsq, 0x00, 0x00); /* wait for access enabled? */
764 hwsq_fini(hwsq);
765
766 return info;
767error:
768 kfree(info);
769 return ERR_PTR(ret);
770}
771
772static int
773prog_hwsq(struct drm_device *dev, struct hwsq_ucode *hwsq)
774{
775 struct nouveau_device *device = nouveau_dev(dev);
776 struct nouveau_drm *drm = nouveau_drm(dev);
777 u32 hwsq_data, hwsq_kick;
778 int i;
779
780 if (nv_device(drm->device)->chipset < 0x94) {
781 hwsq_data = 0x001400;
782 hwsq_kick = 0x00000003;
783 } else {
784 hwsq_data = 0x080000;
785 hwsq_kick = 0x00000001;
786 }
787 /* upload hwsq ucode */
788 nv_mask(device, 0x001098, 0x00000008, 0x00000000);
789 nv_wr32(device, 0x001304, 0x00000000);
790 if (nv_device(drm->device)->chipset >= 0x92)
791 nv_wr32(device, 0x001318, 0x00000000);
792 for (i = 0; i < hwsq->len / 4; i++)
793 nv_wr32(device, hwsq_data + (i * 4), hwsq->ptr.u32[i]);
794 nv_mask(device, 0x001098, 0x00000018, 0x00000018);
795
796 /* launch, and wait for completion */
797 nv_wr32(device, 0x00130c, hwsq_kick);
798 if (!nv_wait(device, 0x001308, 0x00000100, 0x00000000)) {
799 NV_ERROR(drm, "hwsq ucode exec timed out\n");
800 NV_ERROR(drm, "0x001308: 0x%08x\n", nv_rd32(device, 0x001308));
801 for (i = 0; i < hwsq->len / 4; i++) {
802 NV_ERROR(drm, "0x%06x: 0x%08x\n", 0x1400 + (i * 4),
803 nv_rd32(device, 0x001400 + (i * 4)));
804 }
805
806 return -EIO;
807 }
808
809 return 0;
810}
811
812int
813nv50_pm_clocks_set(struct drm_device *dev, void *data)
814{
815 struct nouveau_device *device = nouveau_dev(dev);
816 struct nv50_pm_state *info = data;
817 struct bit_entry M;
818 int ret = -EBUSY;
819
820 /* halt and idle execution engines */
821 nv_mask(device, 0x002504, 0x00000001, 0x00000001);
822 if (!nv_wait(device, 0x002504, 0x00000010, 0x00000010))
823 goto resume;
824 if (!nv_wait(device, 0x00251c, 0x0000003f, 0x0000003f))
825 goto resume;
826
827 /* program memory clock, if necessary - must come before engine clock
828 * reprogramming due to how we construct the hwsq scripts in pre()
829 */
830#define nouveau_bios_init_exec(a,b) nouveau_bios_run_init_table((a), (b), NULL, 0)
831 if (info->mclk_hwsq.len) {
832 /* execute some scripts that do ??? from the vbios.. */
833 if (!bit_table(dev, 'M', &M) && M.version == 1) {
834 if (M.length >= 6)
835 nouveau_bios_init_exec(dev, ROM16(M.data[5]));
836 if (M.length >= 8)
837 nouveau_bios_init_exec(dev, ROM16(M.data[7]));
838 if (M.length >= 10)
839 nouveau_bios_init_exec(dev, ROM16(M.data[9]));
840 nouveau_bios_init_exec(dev, info->mscript);
841 }
842
843 ret = prog_hwsq(dev, &info->mclk_hwsq);
844 if (ret)
845 goto resume;
846 }
847
848 /* program engine clocks */
849 ret = prog_hwsq(dev, &info->eclk_hwsq);
850
851resume:
852 nv_mask(device, 0x002504, 0x00000001, 0x00000000);
853 kfree(info);
854 return ret;
855}
diff --git a/drivers/gpu/drm/nouveau/nva3_pm.c b/drivers/gpu/drm/nouveau/nva3_pm.c
deleted file mode 100644
index 0d0ed597fea8..000000000000
--- a/drivers/gpu/drm/nouveau/nva3_pm.c
+++ /dev/null
@@ -1,624 +0,0 @@
1/*
2 * Copyright 2010 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <drm/drmP.h>
26#include "nouveau_drm.h"
27#include "nouveau_bios.h"
28#include "nouveau_pm.h"
29
30#include <subdev/bios/pll.h>
31#include <subdev/bios.h>
32#include <subdev/clock.h>
33#include <subdev/timer.h>
34#include <subdev/fb.h>
35
36static u32 read_clk(struct drm_device *, int, bool);
37static u32 read_pll(struct drm_device *, int, u32);
38
39static u32
40read_vco(struct drm_device *dev, int clk)
41{
42 struct nouveau_device *device = nouveau_dev(dev);
43 u32 sctl = nv_rd32(device, 0x4120 + (clk * 4));
44 if ((sctl & 0x00000030) != 0x00000030)
45 return read_pll(dev, 0x41, 0x00e820);
46 return read_pll(dev, 0x42, 0x00e8a0);
47}
48
49static u32
50read_clk(struct drm_device *dev, int clk, bool ignore_en)
51{
52 struct nouveau_device *device = nouveau_dev(dev);
53 struct nouveau_drm *drm = nouveau_drm(dev);
54 u32 sctl, sdiv, sclk;
55
56 /* refclk for the 0xe8xx plls is a fixed frequency */
57 if (clk >= 0x40) {
58 if (nv_device(drm->device)->chipset == 0xaf) {
59 /* no joke.. seriously.. sigh.. */
60 return nv_rd32(device, 0x00471c) * 1000;
61 }
62
63 return device->crystal;
64 }
65
66 sctl = nv_rd32(device, 0x4120 + (clk * 4));
67 if (!ignore_en && !(sctl & 0x00000100))
68 return 0;
69
70 switch (sctl & 0x00003000) {
71 case 0x00000000:
72 return device->crystal;
73 case 0x00002000:
74 if (sctl & 0x00000040)
75 return 108000;
76 return 100000;
77 case 0x00003000:
78 sclk = read_vco(dev, clk);
79 sdiv = ((sctl & 0x003f0000) >> 16) + 2;
80 return (sclk * 2) / sdiv;
81 default:
82 return 0;
83 }
84}
85
86static u32
87read_pll(struct drm_device *dev, int clk, u32 pll)
88{
89 struct nouveau_device *device = nouveau_dev(dev);
90 u32 ctrl = nv_rd32(device, pll + 0);
91 u32 sclk = 0, P = 1, N = 1, M = 1;
92
93 if (!(ctrl & 0x00000008)) {
94 if (ctrl & 0x00000001) {
95 u32 coef = nv_rd32(device, pll + 4);
96 M = (coef & 0x000000ff) >> 0;
97 N = (coef & 0x0000ff00) >> 8;
98 P = (coef & 0x003f0000) >> 16;
99
100 /* no post-divider on these.. */
101 if ((pll & 0x00ff00) == 0x00e800)
102 P = 1;
103
104 sclk = read_clk(dev, 0x00 + clk, false);
105 }
106 } else {
107 sclk = read_clk(dev, 0x10 + clk, false);
108 }
109
110 if (M * P)
111 return sclk * N / (M * P);
112 return 0;
113}
114
115struct creg {
116 u32 clk;
117 u32 pll;
118};
119
120static int
121calc_clk(struct drm_device *dev, int clk, u32 pll, u32 khz, struct creg *reg)
122{
123 struct nouveau_drm *drm = nouveau_drm(dev);
124 struct nouveau_device *device = nouveau_dev(dev);
125 struct nouveau_bios *bios = nouveau_bios(device);
126 struct nvbios_pll limits;
127 u32 oclk, sclk, sdiv;
128 int P, N, M, diff;
129 int ret;
130
131 reg->pll = 0;
132 reg->clk = 0;
133 if (!khz) {
134 NV_DEBUG(drm, "no clock for 0x%04x/0x%02x\n", pll, clk);
135 return 0;
136 }
137
138 switch (khz) {
139 case 27000:
140 reg->clk = 0x00000100;
141 return khz;
142 case 100000:
143 reg->clk = 0x00002100;
144 return khz;
145 case 108000:
146 reg->clk = 0x00002140;
147 return khz;
148 default:
149 sclk = read_vco(dev, clk);
150 sdiv = min((sclk * 2) / (khz - 2999), (u32)65);
151 /* if the clock has a PLL attached, and we can get a within
152 * [-2, 3) MHz of a divider, we'll disable the PLL and use
153 * the divider instead.
154 *
155 * divider can go as low as 2, limited here because NVIDIA
156 * and the VBIOS on my NVA8 seem to prefer using the PLL
157 * for 810MHz - is there a good reason?
158 */
159 if (sdiv > 4) {
160 oclk = (sclk * 2) / sdiv;
161 diff = khz - oclk;
162 if (!pll || (diff >= -2000 && diff < 3000)) {
163 reg->clk = (((sdiv - 2) << 16) | 0x00003100);
164 return oclk;
165 }
166 }
167
168 if (!pll) {
169 NV_ERROR(drm, "bad freq %02x: %d %d\n", clk, khz, sclk);
170 return -ERANGE;
171 }
172
173 break;
174 }
175
176 ret = nvbios_pll_parse(bios, pll, &limits);
177 if (ret)
178 return ret;
179
180 limits.refclk = read_clk(dev, clk - 0x10, true);
181 if (!limits.refclk)
182 return -EINVAL;
183
184 ret = nva3_calc_pll(dev, &limits, khz, &N, NULL, &M, &P);
185 if (ret >= 0) {
186 reg->clk = nv_rd32(device, 0x4120 + (clk * 4));
187 reg->pll = (P << 16) | (N << 8) | M;
188 }
189
190 return ret;
191}
192
193static void
194prog_pll(struct drm_device *dev, int clk, u32 pll, struct creg *reg)
195{
196 struct nouveau_device *device = nouveau_dev(dev);
197 struct nouveau_drm *drm = nouveau_drm(dev);
198 const u32 src0 = 0x004120 + (clk * 4);
199 const u32 src1 = 0x004160 + (clk * 4);
200 const u32 ctrl = pll + 0;
201 const u32 coef = pll + 4;
202
203 if (!reg->clk && !reg->pll) {
204 NV_DEBUG(drm, "no clock for %02x\n", clk);
205 return;
206 }
207
208 if (reg->pll) {
209 nv_mask(device, src0, 0x00000101, 0x00000101);
210 nv_wr32(device, coef, reg->pll);
211 nv_mask(device, ctrl, 0x00000015, 0x00000015);
212 nv_mask(device, ctrl, 0x00000010, 0x00000000);
213 nv_wait(device, ctrl, 0x00020000, 0x00020000);
214 nv_mask(device, ctrl, 0x00000010, 0x00000010);
215 nv_mask(device, ctrl, 0x00000008, 0x00000000);
216 nv_mask(device, src1, 0x00000100, 0x00000000);
217 nv_mask(device, src1, 0x00000001, 0x00000000);
218 } else {
219 nv_mask(device, src1, 0x003f3141, 0x00000101 | reg->clk);
220 nv_mask(device, ctrl, 0x00000018, 0x00000018);
221 udelay(20);
222 nv_mask(device, ctrl, 0x00000001, 0x00000000);
223 nv_mask(device, src0, 0x00000100, 0x00000000);
224 nv_mask(device, src0, 0x00000001, 0x00000000);
225 }
226}
227
228static void
229prog_clk(struct drm_device *dev, int clk, struct creg *reg)
230{
231 struct nouveau_device *device = nouveau_dev(dev);
232 struct nouveau_drm *drm = nouveau_drm(dev);
233
234 if (!reg->clk) {
235 NV_DEBUG(drm, "no clock for %02x\n", clk);
236 return;
237 }
238
239 nv_mask(device, 0x004120 + (clk * 4), 0x003f3141, 0x00000101 | reg->clk);
240}
241
242int
243nva3_pm_clocks_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
244{
245 perflvl->core = read_pll(dev, 0x00, 0x4200);
246 perflvl->shader = read_pll(dev, 0x01, 0x4220);
247 perflvl->memory = read_pll(dev, 0x02, 0x4000);
248 perflvl->unka0 = read_clk(dev, 0x20, false);
249 perflvl->vdec = read_clk(dev, 0x21, false);
250 perflvl->daemon = read_clk(dev, 0x25, false);
251 perflvl->copy = perflvl->core;
252 return 0;
253}
254
255struct nva3_pm_state {
256 struct nouveau_pm_level *perflvl;
257
258 struct creg nclk;
259 struct creg sclk;
260 struct creg vdec;
261 struct creg unka0;
262
263 struct creg mclk;
264 u8 *rammap;
265 u8 rammap_ver;
266 u8 rammap_len;
267 u8 *ramcfg;
268 u8 ramcfg_len;
269 u32 r004018;
270 u32 r100760;
271};
272
273void *
274nva3_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
275{
276 struct nva3_pm_state *info;
277 u8 ramcfg_cnt;
278 int ret;
279
280 info = kzalloc(sizeof(*info), GFP_KERNEL);
281 if (!info)
282 return ERR_PTR(-ENOMEM);
283
284 ret = calc_clk(dev, 0x10, 0x4200, perflvl->core, &info->nclk);
285 if (ret < 0)
286 goto out;
287
288 ret = calc_clk(dev, 0x11, 0x4220, perflvl->shader, &info->sclk);
289 if (ret < 0)
290 goto out;
291
292 ret = calc_clk(dev, 0x12, 0x4000, perflvl->memory, &info->mclk);
293 if (ret < 0)
294 goto out;
295
296 ret = calc_clk(dev, 0x20, 0x0000, perflvl->unka0, &info->unka0);
297 if (ret < 0)
298 goto out;
299
300 ret = calc_clk(dev, 0x21, 0x0000, perflvl->vdec, &info->vdec);
301 if (ret < 0)
302 goto out;
303
304 info->rammap = nouveau_perf_rammap(dev, perflvl->memory,
305 &info->rammap_ver,
306 &info->rammap_len,
307 &ramcfg_cnt, &info->ramcfg_len);
308 if (info->rammap_ver != 0x10 || info->rammap_len < 5)
309 info->rammap = NULL;
310
311 info->ramcfg = nouveau_perf_ramcfg(dev, perflvl->memory,
312 &info->rammap_ver,
313 &info->ramcfg_len);
314 if (info->rammap_ver != 0x10)
315 info->ramcfg = NULL;
316
317 info->perflvl = perflvl;
318out:
319 if (ret < 0) {
320 kfree(info);
321 info = ERR_PTR(ret);
322 }
323 return info;
324}
325
326static bool
327nva3_pm_grcp_idle(void *data)
328{
329 struct drm_device *dev = data;
330 struct nouveau_device *device = nouveau_dev(dev);
331
332 if (!(nv_rd32(device, 0x400304) & 0x00000001))
333 return true;
334 if (nv_rd32(device, 0x400308) == 0x0050001c)
335 return true;
336 return false;
337}
338
339static void
340mclk_precharge(struct nouveau_mem_exec_func *exec)
341{
342 struct nouveau_device *device = nouveau_dev(exec->dev);
343 nv_wr32(device, 0x1002d4, 0x00000001);
344}
345
346static void
347mclk_refresh(struct nouveau_mem_exec_func *exec)
348{
349 struct nouveau_device *device = nouveau_dev(exec->dev);
350 nv_wr32(device, 0x1002d0, 0x00000001);
351}
352
353static void
354mclk_refresh_auto(struct nouveau_mem_exec_func *exec, bool enable)
355{
356 struct nouveau_device *device = nouveau_dev(exec->dev);
357 nv_wr32(device, 0x100210, enable ? 0x80000000 : 0x00000000);
358}
359
360static void
361mclk_refresh_self(struct nouveau_mem_exec_func *exec, bool enable)
362{
363 struct nouveau_device *device = nouveau_dev(exec->dev);
364 nv_wr32(device, 0x1002dc, enable ? 0x00000001 : 0x00000000);
365}
366
367static void
368mclk_wait(struct nouveau_mem_exec_func *exec, u32 nsec)
369{
370 struct nouveau_device *device = nouveau_dev(exec->dev);
371 volatile u32 post = nv_rd32(device, 0); (void)post;
372 udelay((nsec + 500) / 1000);
373}
374
375static u32
376mclk_mrg(struct nouveau_mem_exec_func *exec, int mr)
377{
378 struct nouveau_device *device = nouveau_dev(exec->dev);
379 if (mr <= 1)
380 return nv_rd32(device, 0x1002c0 + ((mr - 0) * 4));
381 if (mr <= 3)
382 return nv_rd32(device, 0x1002e0 + ((mr - 2) * 4));
383 return 0;
384}
385
386static void
387mclk_mrs(struct nouveau_mem_exec_func *exec, int mr, u32 data)
388{
389 struct nouveau_device *device = nouveau_dev(exec->dev);
390 struct nouveau_fb *pfb = nouveau_fb(device);
391 if (mr <= 1) {
392 if (pfb->ram->ranks > 1)
393 nv_wr32(device, 0x1002c8 + ((mr - 0) * 4), data);
394 nv_wr32(device, 0x1002c0 + ((mr - 0) * 4), data);
395 } else
396 if (mr <= 3) {
397 if (pfb->ram->ranks > 1)
398 nv_wr32(device, 0x1002e8 + ((mr - 2) * 4), data);
399 nv_wr32(device, 0x1002e0 + ((mr - 2) * 4), data);
400 }
401}
402
403static void
404mclk_clock_set(struct nouveau_mem_exec_func *exec)
405{
406 struct nouveau_device *device = nouveau_dev(exec->dev);
407 struct nva3_pm_state *info = exec->priv;
408 u32 ctrl;
409
410 ctrl = nv_rd32(device, 0x004000);
411 if (!(ctrl & 0x00000008) && info->mclk.pll) {
412 nv_wr32(device, 0x004000, (ctrl |= 0x00000008));
413 nv_mask(device, 0x1110e0, 0x00088000, 0x00088000);
414 nv_wr32(device, 0x004018, 0x00001000);
415 nv_wr32(device, 0x004000, (ctrl &= ~0x00000001));
416 nv_wr32(device, 0x004004, info->mclk.pll);
417 nv_wr32(device, 0x004000, (ctrl |= 0x00000001));
418 udelay(64);
419 nv_wr32(device, 0x004018, 0x00005000 | info->r004018);
420 udelay(20);
421 } else
422 if (!info->mclk.pll) {
423 nv_mask(device, 0x004168, 0x003f3040, info->mclk.clk);
424 nv_wr32(device, 0x004000, (ctrl |= 0x00000008));
425 nv_mask(device, 0x1110e0, 0x00088000, 0x00088000);
426 nv_wr32(device, 0x004018, 0x0000d000 | info->r004018);
427 }
428
429 if (info->rammap) {
430 if (info->ramcfg && (info->rammap[4] & 0x08)) {
431 u32 unk5a0 = (ROM16(info->ramcfg[5]) << 8) |
432 info->ramcfg[5];
433 u32 unk5a4 = ROM16(info->ramcfg[7]);
434 u32 unk804 = (info->ramcfg[9] & 0xf0) << 16 |
435 (info->ramcfg[3] & 0x0f) << 16 |
436 (info->ramcfg[9] & 0x0f) |
437 0x80000000;
438 nv_wr32(device, 0x1005a0, unk5a0);
439 nv_wr32(device, 0x1005a4, unk5a4);
440 nv_wr32(device, 0x10f804, unk804);
441 nv_mask(device, 0x10053c, 0x00001000, 0x00000000);
442 } else {
443 nv_mask(device, 0x10053c, 0x00001000, 0x00001000);
444 nv_mask(device, 0x10f804, 0x80000000, 0x00000000);
445 nv_mask(device, 0x100760, 0x22222222, info->r100760);
446 nv_mask(device, 0x1007a0, 0x22222222, info->r100760);
447 nv_mask(device, 0x1007e0, 0x22222222, info->r100760);
448 }
449 }
450
451 if (info->mclk.pll) {
452 nv_mask(device, 0x1110e0, 0x00088000, 0x00011000);
453 nv_wr32(device, 0x004000, (ctrl &= ~0x00000008));
454 }
455}
456
457static void
458mclk_timing_set(struct nouveau_mem_exec_func *exec)
459{
460 struct nouveau_device *device = nouveau_dev(exec->dev);
461 struct nva3_pm_state *info = exec->priv;
462 struct nouveau_pm_level *perflvl = info->perflvl;
463 int i;
464
465 for (i = 0; i < 9; i++)
466 nv_wr32(device, 0x100220 + (i * 4), perflvl->timing.reg[i]);
467
468 if (info->ramcfg) {
469 u32 data = (info->ramcfg[2] & 0x08) ? 0x00000000 : 0x00001000;
470 nv_mask(device, 0x100200, 0x00001000, data);
471 }
472
473 if (info->ramcfg) {
474 u32 unk714 = nv_rd32(device, 0x100714) & ~0xf0000010;
475 u32 unk718 = nv_rd32(device, 0x100718) & ~0x00000100;
476 u32 unk71c = nv_rd32(device, 0x10071c) & ~0x00000100;
477 if ( (info->ramcfg[2] & 0x20))
478 unk714 |= 0xf0000000;
479 if (!(info->ramcfg[2] & 0x04))
480 unk714 |= 0x00000010;
481 nv_wr32(device, 0x100714, unk714);
482
483 if (info->ramcfg[2] & 0x01)
484 unk71c |= 0x00000100;
485 nv_wr32(device, 0x10071c, unk71c);
486
487 if (info->ramcfg[2] & 0x02)
488 unk718 |= 0x00000100;
489 nv_wr32(device, 0x100718, unk718);
490
491 if (info->ramcfg[2] & 0x10)
492 nv_wr32(device, 0x111100, 0x48000000); /*XXX*/
493 }
494}
495
496static void
497prog_mem(struct drm_device *dev, struct nva3_pm_state *info)
498{
499 struct nouveau_device *device = nouveau_dev(dev);
500 struct nouveau_mem_exec_func exec = {
501 .dev = dev,
502 .precharge = mclk_precharge,
503 .refresh = mclk_refresh,
504 .refresh_auto = mclk_refresh_auto,
505 .refresh_self = mclk_refresh_self,
506 .wait = mclk_wait,
507 .mrg = mclk_mrg,
508 .mrs = mclk_mrs,
509 .clock_set = mclk_clock_set,
510 .timing_set = mclk_timing_set,
511 .priv = info
512 };
513 u32 ctrl;
514
515 /* XXX: where the fuck does 750MHz come from? */
516 if (info->perflvl->memory <= 750000) {
517 info->r004018 = 0x10000000;
518 info->r100760 = 0x22222222;
519 }
520
521 ctrl = nv_rd32(device, 0x004000);
522 if (ctrl & 0x00000008) {
523 if (info->mclk.pll) {
524 nv_mask(device, 0x004128, 0x00000101, 0x00000101);
525 nv_wr32(device, 0x004004, info->mclk.pll);
526 nv_wr32(device, 0x004000, (ctrl |= 0x00000001));
527 nv_wr32(device, 0x004000, (ctrl &= 0xffffffef));
528 nv_wait(device, 0x004000, 0x00020000, 0x00020000);
529 nv_wr32(device, 0x004000, (ctrl |= 0x00000010));
530 nv_wr32(device, 0x004018, 0x00005000 | info->r004018);
531 nv_wr32(device, 0x004000, (ctrl |= 0x00000004));
532 }
533 } else {
534 u32 ssel = 0x00000101;
535 if (info->mclk.clk)
536 ssel |= info->mclk.clk;
537 else
538 ssel |= 0x00080000; /* 324MHz, shouldn't matter... */
539 nv_mask(device, 0x004168, 0x003f3141, ctrl);
540 }
541
542 if (info->ramcfg) {
543 if (info->ramcfg[2] & 0x10) {
544 nv_mask(device, 0x111104, 0x00000600, 0x00000000);
545 } else {
546 nv_mask(device, 0x111100, 0x40000000, 0x40000000);
547 nv_mask(device, 0x111104, 0x00000180, 0x00000000);
548 }
549 }
550 if (info->rammap && !(info->rammap[4] & 0x02))
551 nv_mask(device, 0x100200, 0x00000800, 0x00000000);
552 nv_wr32(device, 0x611200, 0x00003300);
553 if (!(info->ramcfg[2] & 0x10))
554 nv_wr32(device, 0x111100, 0x4c020000); /*XXX*/
555
556 nouveau_mem_exec(&exec, info->perflvl);
557
558 nv_wr32(device, 0x611200, 0x00003330);
559 if (info->rammap && (info->rammap[4] & 0x02))
560 nv_mask(device, 0x100200, 0x00000800, 0x00000800);
561 if (info->ramcfg) {
562 if (info->ramcfg[2] & 0x10) {
563 nv_mask(device, 0x111104, 0x00000180, 0x00000180);
564 nv_mask(device, 0x111100, 0x40000000, 0x00000000);
565 } else {
566 nv_mask(device, 0x111104, 0x00000600, 0x00000600);
567 }
568 }
569
570 if (info->mclk.pll) {
571 nv_mask(device, 0x004168, 0x00000001, 0x00000000);
572 nv_mask(device, 0x004168, 0x00000100, 0x00000000);
573 } else {
574 nv_mask(device, 0x004000, 0x00000001, 0x00000000);
575 nv_mask(device, 0x004128, 0x00000001, 0x00000000);
576 nv_mask(device, 0x004128, 0x00000100, 0x00000000);
577 }
578}
579
580int
581nva3_pm_clocks_set(struct drm_device *dev, void *pre_state)
582{
583 struct nouveau_device *device = nouveau_dev(dev);
584 struct nouveau_drm *drm = nouveau_drm(dev);
585 struct nva3_pm_state *info = pre_state;
586 int ret = -EAGAIN;
587
588 /* prevent any new grctx switches from starting */
589 nv_wr32(device, 0x400324, 0x00000000);
590 nv_wr32(device, 0x400328, 0x0050001c); /* wait flag 0x1c */
591 /* wait for any pending grctx switches to complete */
592 if (!nv_wait_cb(device, nva3_pm_grcp_idle, dev)) {
593 NV_ERROR(drm, "pm: ctxprog didn't go idle\n");
594 goto cleanup;
595 }
596 /* freeze PFIFO */
597 nv_mask(device, 0x002504, 0x00000001, 0x00000001);
598 if (!nv_wait(device, 0x002504, 0x00000010, 0x00000010)) {
599 NV_ERROR(drm, "pm: fifo didn't go idle\n");
600 goto cleanup;
601 }
602
603 prog_pll(dev, 0x00, 0x004200, &info->nclk);
604 prog_pll(dev, 0x01, 0x004220, &info->sclk);
605 prog_clk(dev, 0x20, &info->unka0);
606 prog_clk(dev, 0x21, &info->vdec);
607
608 if (info->mclk.clk || info->mclk.pll)
609 prog_mem(dev, info);
610
611 ret = 0;
612
613cleanup:
614 /* unfreeze PFIFO */
615 nv_mask(device, 0x002504, 0x00000001, 0x00000000);
616 /* restore ctxprog to normal */
617 nv_wr32(device, 0x400324, 0x00000000);
618 nv_wr32(device, 0x400328, 0x0070009c); /* set flag 0x1c */
619 /* unblock it if necessary */
620 if (nv_rd32(device, 0x400308) == 0x0050001c)
621 nv_mask(device, 0x400824, 0x10000000, 0x10000000);
622 kfree(info);
623 return ret;
624}
diff --git a/drivers/gpu/drm/nouveau/nvc0_pm.c b/drivers/gpu/drm/nouveau/nvc0_pm.c
deleted file mode 100644
index 3b7041cb013f..000000000000
--- a/drivers/gpu/drm/nouveau/nvc0_pm.c
+++ /dev/null
@@ -1,599 +0,0 @@
1/*
2 * Copyright 2011 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "nouveau_drm.h"
26#include "nouveau_bios.h"
27#include "nouveau_pm.h"
28
29#include <subdev/bios/pll.h>
30#include <subdev/bios.h>
31#include <subdev/clock.h>
32#include <subdev/timer.h>
33#include <subdev/fb.h>
34
35static u32 read_div(struct drm_device *, int, u32, u32);
36static u32 read_pll(struct drm_device *, u32);
37
38static u32
39read_vco(struct drm_device *dev, u32 dsrc)
40{
41 struct nouveau_device *device = nouveau_dev(dev);
42 u32 ssrc = nv_rd32(device, dsrc);
43 if (!(ssrc & 0x00000100))
44 return read_pll(dev, 0x00e800);
45 return read_pll(dev, 0x00e820);
46}
47
48static u32
49read_pll(struct drm_device *dev, u32 pll)
50{
51 struct nouveau_device *device = nouveau_dev(dev);
52 u32 ctrl = nv_rd32(device, pll + 0);
53 u32 coef = nv_rd32(device, pll + 4);
54 u32 P = (coef & 0x003f0000) >> 16;
55 u32 N = (coef & 0x0000ff00) >> 8;
56 u32 M = (coef & 0x000000ff) >> 0;
57 u32 sclk, doff;
58
59 if (!(ctrl & 0x00000001))
60 return 0;
61
62 switch (pll & 0xfff000) {
63 case 0x00e000:
64 sclk = 27000;
65 P = 1;
66 break;
67 case 0x137000:
68 doff = (pll - 0x137000) / 0x20;
69 sclk = read_div(dev, doff, 0x137120, 0x137140);
70 break;
71 case 0x132000:
72 switch (pll) {
73 case 0x132000:
74 sclk = read_pll(dev, 0x132020);
75 break;
76 case 0x132020:
77 sclk = read_div(dev, 0, 0x137320, 0x137330);
78 break;
79 default:
80 return 0;
81 }
82 break;
83 default:
84 return 0;
85 }
86
87 return sclk * N / M / P;
88}
89
90static u32
91read_div(struct drm_device *dev, int doff, u32 dsrc, u32 dctl)
92{
93 struct nouveau_device *device = nouveau_dev(dev);
94 u32 ssrc = nv_rd32(device, dsrc + (doff * 4));
95 u32 sctl = nv_rd32(device, dctl + (doff * 4));
96
97 switch (ssrc & 0x00000003) {
98 case 0:
99 if ((ssrc & 0x00030000) != 0x00030000)
100 return 27000;
101 return 108000;
102 case 2:
103 return 100000;
104 case 3:
105 if (sctl & 0x80000000) {
106 u32 sclk = read_vco(dev, dsrc + (doff * 4));
107 u32 sdiv = (sctl & 0x0000003f) + 2;
108 return (sclk * 2) / sdiv;
109 }
110
111 return read_vco(dev, dsrc + (doff * 4));
112 default:
113 return 0;
114 }
115}
116
117static u32
118read_mem(struct drm_device *dev)
119{
120 struct nouveau_device *device = nouveau_dev(dev);
121 u32 ssel = nv_rd32(device, 0x1373f0);
122 if (ssel & 0x00000001)
123 return read_div(dev, 0, 0x137300, 0x137310);
124 return read_pll(dev, 0x132000);
125}
126
127static u32
128read_clk(struct drm_device *dev, int clk)
129{
130 struct nouveau_device *device = nouveau_dev(dev);
131 u32 sctl = nv_rd32(device, 0x137250 + (clk * 4));
132 u32 ssel = nv_rd32(device, 0x137100);
133 u32 sclk, sdiv;
134
135 if (ssel & (1 << clk)) {
136 if (clk < 7)
137 sclk = read_pll(dev, 0x137000 + (clk * 0x20));
138 else
139 sclk = read_pll(dev, 0x1370e0);
140 sdiv = ((sctl & 0x00003f00) >> 8) + 2;
141 } else {
142 sclk = read_div(dev, clk, 0x137160, 0x1371d0);
143 sdiv = ((sctl & 0x0000003f) >> 0) + 2;
144 }
145
146 if (sctl & 0x80000000)
147 return (sclk * 2) / sdiv;
148 return sclk;
149}
150
151int
152nvc0_pm_clocks_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
153{
154 perflvl->shader = read_clk(dev, 0x00);
155 perflvl->core = perflvl->shader / 2;
156 perflvl->memory = read_mem(dev);
157 perflvl->rop = read_clk(dev, 0x01);
158 perflvl->hub07 = read_clk(dev, 0x02);
159 perflvl->hub06 = read_clk(dev, 0x07);
160 perflvl->hub01 = read_clk(dev, 0x08);
161 perflvl->copy = read_clk(dev, 0x09);
162 perflvl->daemon = read_clk(dev, 0x0c);
163 perflvl->vdec = read_clk(dev, 0x0e);
164 return 0;
165}
166
167struct nvc0_pm_clock {
168 u32 freq;
169 u32 ssel;
170 u32 mdiv;
171 u32 dsrc;
172 u32 ddiv;
173 u32 coef;
174};
175
176struct nvc0_pm_state {
177 struct nouveau_pm_level *perflvl;
178 struct nvc0_pm_clock eng[16];
179 struct nvc0_pm_clock mem;
180};
181
182static u32
183calc_div(struct drm_device *dev, int clk, u32 ref, u32 freq, u32 *ddiv)
184{
185 u32 div = min((ref * 2) / freq, (u32)65);
186 if (div < 2)
187 div = 2;
188
189 *ddiv = div - 2;
190 return (ref * 2) / div;
191}
192
193static u32
194calc_src(struct drm_device *dev, int clk, u32 freq, u32 *dsrc, u32 *ddiv)
195{
196 u32 sclk;
197
198 /* use one of the fixed frequencies if possible */
199 *ddiv = 0x00000000;
200 switch (freq) {
201 case 27000:
202 case 108000:
203 *dsrc = 0x00000000;
204 if (freq == 108000)
205 *dsrc |= 0x00030000;
206 return freq;
207 case 100000:
208 *dsrc = 0x00000002;
209 return freq;
210 default:
211 *dsrc = 0x00000003;
212 break;
213 }
214
215 /* otherwise, calculate the closest divider */
216 sclk = read_vco(dev, clk);
217 if (clk < 7)
218 sclk = calc_div(dev, clk, sclk, freq, ddiv);
219 return sclk;
220}
221
222static u32
223calc_pll(struct drm_device *dev, int clk, u32 freq, u32 *coef)
224{
225 struct nouveau_device *device = nouveau_dev(dev);
226 struct nouveau_bios *bios = nouveau_bios(device);
227 struct nvbios_pll limits;
228 int N, M, P, ret;
229
230 ret = nvbios_pll_parse(bios, 0x137000 + (clk * 0x20), &limits);
231 if (ret)
232 return 0;
233
234 limits.refclk = read_div(dev, clk, 0x137120, 0x137140);
235 if (!limits.refclk)
236 return 0;
237
238 ret = nva3_calc_pll(dev, &limits, freq, &N, NULL, &M, &P);
239 if (ret <= 0)
240 return 0;
241
242 *coef = (P << 16) | (N << 8) | M;
243 return ret;
244}
245
246/* A (likely rather simplified and incomplete) view of the clock tree
247 *
248 * Key:
249 *
250 * S: source select
251 * D: divider
252 * P: pll
253 * F: switch
254 *
255 * Engine clocks:
256 *
257 * 137250(D) ---- 137100(F0) ---- 137160(S)/1371d0(D) ------------------- ref
258 * (F1) ---- 1370X0(P) ---- 137120(S)/137140(D) ---- ref
259 *
260 * Not all registers exist for all clocks. For example: clocks >= 8 don't
261 * have their own PLL (all tied to clock 7's PLL when in PLL mode), nor do
262 * they have the divider at 1371d0, though the source selection at 137160
263 * still exists. You must use the divider at 137250 for these instead.
264 *
265 * Memory clock:
266 *
267 * TBD, read_mem() above is likely very wrong...
268 *
269 */
270
271static int
272calc_clk(struct drm_device *dev, int clk, struct nvc0_pm_clock *info, u32 freq)
273{
274 u32 src0, div0, div1D, div1P = 0;
275 u32 clk0, clk1 = 0;
276
277 /* invalid clock domain */
278 if (!freq)
279 return 0;
280
281 /* first possible path, using only dividers */
282 clk0 = calc_src(dev, clk, freq, &src0, &div0);
283 clk0 = calc_div(dev, clk, clk0, freq, &div1D);
284
285 /* see if we can get any closer using PLLs */
286 if (clk0 != freq && (0x00004387 & (1 << clk))) {
287 if (clk < 7)
288 clk1 = calc_pll(dev, clk, freq, &info->coef);
289 else
290 clk1 = read_pll(dev, 0x1370e0);
291 clk1 = calc_div(dev, clk, clk1, freq, &div1P);
292 }
293
294 /* select the method which gets closest to target freq */
295 if (abs((int)freq - clk0) <= abs((int)freq - clk1)) {
296 info->dsrc = src0;
297 if (div0) {
298 info->ddiv |= 0x80000000;
299 info->ddiv |= div0 << 8;
300 info->ddiv |= div0;
301 }
302 if (div1D) {
303 info->mdiv |= 0x80000000;
304 info->mdiv |= div1D;
305 }
306 info->ssel = 0;
307 info->freq = clk0;
308 } else {
309 if (div1P) {
310 info->mdiv |= 0x80000000;
311 info->mdiv |= div1P << 8;
312 }
313 info->ssel = (1 << clk);
314 info->freq = clk1;
315 }
316
317 return 0;
318}
319
320static int
321calc_mem(struct drm_device *dev, struct nvc0_pm_clock *info, u32 freq)
322{
323 struct nouveau_device *device = nouveau_dev(dev);
324 struct nouveau_bios *bios = nouveau_bios(device);
325 struct nvbios_pll pll;
326 int N, M, P, ret;
327 u32 ctrl;
328
329 /* mclk pll input freq comes from another pll, make sure it's on */
330 ctrl = nv_rd32(device, 0x132020);
331 if (!(ctrl & 0x00000001)) {
332 /* if not, program it to 567MHz. nfi where this value comes
333 * from - it looks like it's in the pll limits table for
334 * 132000 but the binary driver ignores all my attempts to
335 * change this value.
336 */
337 nv_wr32(device, 0x137320, 0x00000103);
338 nv_wr32(device, 0x137330, 0x81200606);
339 nv_wait(device, 0x132020, 0x00010000, 0x00010000);
340 nv_wr32(device, 0x132024, 0x0001150f);
341 nv_mask(device, 0x132020, 0x00000001, 0x00000001);
342 nv_wait(device, 0x137390, 0x00020000, 0x00020000);
343 nv_mask(device, 0x132020, 0x00000004, 0x00000004);
344 }
345
346 /* for the moment, until the clock tree is better understood, use
347 * pll mode for all clock frequencies
348 */
349 ret = nvbios_pll_parse(bios, 0x132000, &pll);
350 if (ret == 0) {
351 pll.refclk = read_pll(dev, 0x132020);
352 if (pll.refclk) {
353 ret = nva3_calc_pll(dev, &pll, freq, &N, NULL, &M, &P);
354 if (ret > 0) {
355 info->coef = (P << 16) | (N << 8) | M;
356 return 0;
357 }
358 }
359 }
360
361 return -EINVAL;
362}
363
364void *
365nvc0_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
366{
367 struct nouveau_device *device = nouveau_dev(dev);
368 struct nvc0_pm_state *info;
369 int ret;
370
371 info = kzalloc(sizeof(*info), GFP_KERNEL);
372 if (!info)
373 return ERR_PTR(-ENOMEM);
374
375 /* NFI why this is still in the performance table, the ROPCs appear
376 * to get their clock from clock 2 ("hub07", actually hub05 on this
377 * chip, but, anyway...) as well. nvatiming confirms hub05 and ROP
378 * are always the same freq with the binary driver even when the
379 * performance table says they should differ.
380 */
381 if (device->chipset == 0xd9)
382 perflvl->rop = 0;
383
384 if ((ret = calc_clk(dev, 0x00, &info->eng[0x00], perflvl->shader)) ||
385 (ret = calc_clk(dev, 0x01, &info->eng[0x01], perflvl->rop)) ||
386 (ret = calc_clk(dev, 0x02, &info->eng[0x02], perflvl->hub07)) ||
387 (ret = calc_clk(dev, 0x07, &info->eng[0x07], perflvl->hub06)) ||
388 (ret = calc_clk(dev, 0x08, &info->eng[0x08], perflvl->hub01)) ||
389 (ret = calc_clk(dev, 0x09, &info->eng[0x09], perflvl->copy)) ||
390 (ret = calc_clk(dev, 0x0c, &info->eng[0x0c], perflvl->daemon)) ||
391 (ret = calc_clk(dev, 0x0e, &info->eng[0x0e], perflvl->vdec))) {
392 kfree(info);
393 return ERR_PTR(ret);
394 }
395
396 if (perflvl->memory) {
397 ret = calc_mem(dev, &info->mem, perflvl->memory);
398 if (ret) {
399 kfree(info);
400 return ERR_PTR(ret);
401 }
402 }
403
404 info->perflvl = perflvl;
405 return info;
406}
407
408static void
409prog_clk(struct drm_device *dev, int clk, struct nvc0_pm_clock *info)
410{
411 struct nouveau_device *device = nouveau_dev(dev);
412
413 /* program dividers at 137160/1371d0 first */
414 if (clk < 7 && !info->ssel) {
415 nv_mask(device, 0x1371d0 + (clk * 0x04), 0x80003f3f, info->ddiv);
416 nv_wr32(device, 0x137160 + (clk * 0x04), info->dsrc);
417 }
418
419 /* switch clock to non-pll mode */
420 nv_mask(device, 0x137100, (1 << clk), 0x00000000);
421 nv_wait(device, 0x137100, (1 << clk), 0x00000000);
422
423 /* reprogram pll */
424 if (clk < 7) {
425 /* make sure it's disabled first... */
426 u32 base = 0x137000 + (clk * 0x20);
427 u32 ctrl = nv_rd32(device, base + 0x00);
428 if (ctrl & 0x00000001) {
429 nv_mask(device, base + 0x00, 0x00000004, 0x00000000);
430 nv_mask(device, base + 0x00, 0x00000001, 0x00000000);
431 }
432 /* program it to new values, if necessary */
433 if (info->ssel) {
434 nv_wr32(device, base + 0x04, info->coef);
435 nv_mask(device, base + 0x00, 0x00000001, 0x00000001);
436 nv_wait(device, base + 0x00, 0x00020000, 0x00020000);
437 nv_mask(device, base + 0x00, 0x00020004, 0x00000004);
438 }
439 }
440
441 /* select pll/non-pll mode, and program final clock divider */
442 nv_mask(device, 0x137100, (1 << clk), info->ssel);
443 nv_wait(device, 0x137100, (1 << clk), info->ssel);
444 nv_mask(device, 0x137250 + (clk * 0x04), 0x00003f3f, info->mdiv);
445}
446
447static void
448mclk_precharge(struct nouveau_mem_exec_func *exec)
449{
450}
451
452static void
453mclk_refresh(struct nouveau_mem_exec_func *exec)
454{
455}
456
457static void
458mclk_refresh_auto(struct nouveau_mem_exec_func *exec, bool enable)
459{
460 struct nouveau_device *device = nouveau_dev(exec->dev);
461 nv_wr32(device, 0x10f210, enable ? 0x80000000 : 0x00000000);
462}
463
464static void
465mclk_refresh_self(struct nouveau_mem_exec_func *exec, bool enable)
466{
467}
468
469static void
470mclk_wait(struct nouveau_mem_exec_func *exec, u32 nsec)
471{
472 udelay((nsec + 500) / 1000);
473}
474
475static u32
476mclk_mrg(struct nouveau_mem_exec_func *exec, int mr)
477{
478 struct nouveau_device *device = nouveau_dev(exec->dev);
479 struct nouveau_fb *pfb = nouveau_fb(device);
480 if (pfb->ram->type != NV_MEM_TYPE_GDDR5) {
481 if (mr <= 1)
482 return nv_rd32(device, 0x10f300 + ((mr - 0) * 4));
483 return nv_rd32(device, 0x10f320 + ((mr - 2) * 4));
484 } else {
485 if (mr == 0)
486 return nv_rd32(device, 0x10f300 + (mr * 4));
487 else
488 if (mr <= 7)
489 return nv_rd32(device, 0x10f32c + (mr * 4));
490 return nv_rd32(device, 0x10f34c);
491 }
492}
493
494static void
495mclk_mrs(struct nouveau_mem_exec_func *exec, int mr, u32 data)
496{
497 struct nouveau_device *device = nouveau_dev(exec->dev);
498 struct nouveau_fb *pfb = nouveau_fb(device);
499 if (pfb->ram->type != NV_MEM_TYPE_GDDR5) {
500 if (mr <= 1) {
501 nv_wr32(device, 0x10f300 + ((mr - 0) * 4), data);
502 if (pfb->ram->ranks > 1)
503 nv_wr32(device, 0x10f308 + ((mr - 0) * 4), data);
504 } else
505 if (mr <= 3) {
506 nv_wr32(device, 0x10f320 + ((mr - 2) * 4), data);
507 if (pfb->ram->ranks > 1)
508 nv_wr32(device, 0x10f328 + ((mr - 2) * 4), data);
509 }
510 } else {
511 if (mr == 0) nv_wr32(device, 0x10f300 + (mr * 4), data);
512 else if (mr <= 7) nv_wr32(device, 0x10f32c + (mr * 4), data);
513 else if (mr == 15) nv_wr32(device, 0x10f34c, data);
514 }
515}
516
517static void
518mclk_clock_set(struct nouveau_mem_exec_func *exec)
519{
520 struct nouveau_device *device = nouveau_dev(exec->dev);
521 struct nvc0_pm_state *info = exec->priv;
522 u32 ctrl = nv_rd32(device, 0x132000);
523
524 nv_wr32(device, 0x137360, 0x00000001);
525 nv_wr32(device, 0x137370, 0x00000000);
526 nv_wr32(device, 0x137380, 0x00000000);
527 if (ctrl & 0x00000001)
528 nv_wr32(device, 0x132000, (ctrl &= ~0x00000001));
529
530 nv_wr32(device, 0x132004, info->mem.coef);
531 nv_wr32(device, 0x132000, (ctrl |= 0x00000001));
532 nv_wait(device, 0x137390, 0x00000002, 0x00000002);
533 nv_wr32(device, 0x132018, 0x00005000);
534
535 nv_wr32(device, 0x137370, 0x00000001);
536 nv_wr32(device, 0x137380, 0x00000001);
537 nv_wr32(device, 0x137360, 0x00000000);
538}
539
540static void
541mclk_timing_set(struct nouveau_mem_exec_func *exec)
542{
543 struct nouveau_device *device = nouveau_dev(exec->dev);
544 struct nvc0_pm_state *info = exec->priv;
545 struct nouveau_pm_level *perflvl = info->perflvl;
546 int i;
547
548 for (i = 0; i < 5; i++)
549 nv_wr32(device, 0x10f290 + (i * 4), perflvl->timing.reg[i]);
550}
551
552static void
553prog_mem(struct drm_device *dev, struct nvc0_pm_state *info)
554{
555 struct nouveau_device *device = nouveau_dev(dev);
556 struct nouveau_mem_exec_func exec = {
557 .dev = dev,
558 .precharge = mclk_precharge,
559 .refresh = mclk_refresh,
560 .refresh_auto = mclk_refresh_auto,
561 .refresh_self = mclk_refresh_self,
562 .wait = mclk_wait,
563 .mrg = mclk_mrg,
564 .mrs = mclk_mrs,
565 .clock_set = mclk_clock_set,
566 .timing_set = mclk_timing_set,
567 .priv = info
568 };
569
570 if (device->chipset < 0xd0)
571 nv_wr32(device, 0x611200, 0x00003300);
572 else
573 nv_wr32(device, 0x62c000, 0x03030000);
574
575 nouveau_mem_exec(&exec, info->perflvl);
576
577 if (device->chipset < 0xd0)
578 nv_wr32(device, 0x611200, 0x00003330);
579 else
580 nv_wr32(device, 0x62c000, 0x03030300);
581}
582int
583nvc0_pm_clocks_set(struct drm_device *dev, void *data)
584{
585 struct nvc0_pm_state *info = data;
586 int i;
587
588 if (info->mem.coef)
589 prog_mem(dev, info);
590
591 for (i = 0; i < 16; i++) {
592 if (!info->eng[i].freq)
593 continue;
594 prog_clk(dev, i, &info->eng[i]);
595 }
596
597 kfree(info);
598 return 0;
599}
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 835caba026d3..5e827c29d194 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -107,10 +107,17 @@ void qxl_display_read_client_monitors_config(struct qxl_device *qdev)
107 qxl_io_log(qdev, "failed crc check for client_monitors_config," 107 qxl_io_log(qdev, "failed crc check for client_monitors_config,"
108 " retrying\n"); 108 " retrying\n");
109 } 109 }
110 drm_helper_hpd_irq_event(qdev->ddev); 110
111 if (!drm_helper_hpd_irq_event(qdev->ddev)) {
112 /* notify that the monitor configuration changed, to
113 adjust at the arbitrary resolution */
114 drm_kms_helper_hotplug_event(qdev->ddev);
115 }
111} 116}
112 117
113static int qxl_add_monitors_config_modes(struct drm_connector *connector) 118static int qxl_add_monitors_config_modes(struct drm_connector *connector,
119 unsigned *pwidth,
120 unsigned *pheight)
114{ 121{
115 struct drm_device *dev = connector->dev; 122 struct drm_device *dev = connector->dev;
116 struct qxl_device *qdev = dev->dev_private; 123 struct qxl_device *qdev = dev->dev_private;
@@ -126,11 +133,15 @@ static int qxl_add_monitors_config_modes(struct drm_connector *connector)
126 mode = drm_cvt_mode(dev, head->width, head->height, 60, false, false, 133 mode = drm_cvt_mode(dev, head->width, head->height, 60, false, false,
127 false); 134 false);
128 mode->type |= DRM_MODE_TYPE_PREFERRED; 135 mode->type |= DRM_MODE_TYPE_PREFERRED;
136 *pwidth = head->width;
137 *pheight = head->height;
129 drm_mode_probed_add(connector, mode); 138 drm_mode_probed_add(connector, mode);
130 return 1; 139 return 1;
131} 140}
132 141
133static int qxl_add_common_modes(struct drm_connector *connector) 142static int qxl_add_common_modes(struct drm_connector *connector,
143 unsigned pwidth,
144 unsigned pheight)
134{ 145{
135 struct drm_device *dev = connector->dev; 146 struct drm_device *dev = connector->dev;
136 struct drm_display_mode *mode = NULL; 147 struct drm_display_mode *mode = NULL;
@@ -159,12 +170,9 @@ static int qxl_add_common_modes(struct drm_connector *connector)
159 }; 170 };
160 171
161 for (i = 0; i < ARRAY_SIZE(common_modes); i++) { 172 for (i = 0; i < ARRAY_SIZE(common_modes); i++) {
162 if (common_modes[i].w < 320 || common_modes[i].h < 200)
163 continue;
164
165 mode = drm_cvt_mode(dev, common_modes[i].w, common_modes[i].h, 173 mode = drm_cvt_mode(dev, common_modes[i].w, common_modes[i].h,
166 60, false, false, false); 174 60, false, false, false);
167 if (common_modes[i].w == 1024 && common_modes[i].h == 768) 175 if (common_modes[i].w == pwidth && common_modes[i].h == pheight)
168 mode->type |= DRM_MODE_TYPE_PREFERRED; 176 mode->type |= DRM_MODE_TYPE_PREFERRED;
169 drm_mode_probed_add(connector, mode); 177 drm_mode_probed_add(connector, mode);
170 } 178 }
@@ -720,16 +728,18 @@ static int qxl_conn_get_modes(struct drm_connector *connector)
720{ 728{
721 int ret = 0; 729 int ret = 0;
722 struct qxl_device *qdev = connector->dev->dev_private; 730 struct qxl_device *qdev = connector->dev->dev_private;
731 unsigned pwidth = 1024;
732 unsigned pheight = 768;
723 733
724 DRM_DEBUG_KMS("monitors_config=%p\n", qdev->monitors_config); 734 DRM_DEBUG_KMS("monitors_config=%p\n", qdev->monitors_config);
725 /* TODO: what should we do here? only show the configured modes for the 735 /* TODO: what should we do here? only show the configured modes for the
726 * device, or allow the full list, or both? */ 736 * device, or allow the full list, or both? */
727 if (qdev->monitors_config && qdev->monitors_config->count) { 737 if (qdev->monitors_config && qdev->monitors_config->count) {
728 ret = qxl_add_monitors_config_modes(connector); 738 ret = qxl_add_monitors_config_modes(connector, &pwidth, &pheight);
729 if (ret < 0) 739 if (ret < 0)
730 return ret; 740 return ret;
731 } 741 }
732 ret += qxl_add_common_modes(connector); 742 ret += qxl_add_common_modes(connector, pwidth, pheight);
733 return ret; 743 return ret;
734} 744}
735 745
@@ -793,7 +803,10 @@ static enum drm_connector_status qxl_conn_detect(
793 qdev->client_monitors_config->count > output->index && 803 qdev->client_monitors_config->count > output->index &&
794 qxl_head_enabled(&qdev->client_monitors_config->heads[output->index])); 804 qxl_head_enabled(&qdev->client_monitors_config->heads[output->index]));
795 805
796 DRM_DEBUG("\n"); 806 DRM_DEBUG("#%d connected: %d\n", output->index, connected);
807 if (!connected)
808 qxl_monitors_config_set(qdev, output->index, 0, 0, 0, 0, 0);
809
797 return connected ? connector_status_connected 810 return connected ? connector_status_connected
798 : connector_status_disconnected; 811 : connector_status_disconnected;
799} 812}
@@ -835,8 +848,21 @@ static const struct drm_encoder_funcs qxl_enc_funcs = {
835 .destroy = qxl_enc_destroy, 848 .destroy = qxl_enc_destroy,
836}; 849};
837 850
851static int qxl_mode_create_hotplug_mode_update_property(struct qxl_device *qdev)
852{
853 if (qdev->hotplug_mode_update_property)
854 return 0;
855
856 qdev->hotplug_mode_update_property =
857 drm_property_create_range(qdev->ddev, DRM_MODE_PROP_IMMUTABLE,
858 "hotplug_mode_update", 0, 1);
859
860 return 0;
861}
862
838static int qdev_output_init(struct drm_device *dev, int num_output) 863static int qdev_output_init(struct drm_device *dev, int num_output)
839{ 864{
865 struct qxl_device *qdev = dev->dev_private;
840 struct qxl_output *qxl_output; 866 struct qxl_output *qxl_output;
841 struct drm_connector *connector; 867 struct drm_connector *connector;
842 struct drm_encoder *encoder; 868 struct drm_encoder *encoder;
@@ -863,6 +889,8 @@ static int qdev_output_init(struct drm_device *dev, int num_output)
863 drm_encoder_helper_add(encoder, &qxl_enc_helper_funcs); 889 drm_encoder_helper_add(encoder, &qxl_enc_helper_funcs);
864 drm_connector_helper_add(connector, &qxl_connector_helper_funcs); 890 drm_connector_helper_add(connector, &qxl_connector_helper_funcs);
865 891
892 drm_object_attach_property(&connector->base,
893 qdev->hotplug_mode_update_property, 0);
866 drm_sysfs_connector_add(connector); 894 drm_sysfs_connector_add(connector);
867 return 0; 895 return 0;
868} 896}
@@ -975,6 +1003,9 @@ int qxl_modeset_init(struct qxl_device *qdev)
975 qdev->ddev->mode_config.max_height = 8192; 1003 qdev->ddev->mode_config.max_height = 8192;
976 1004
977 qdev->ddev->mode_config.fb_base = qdev->vram_base; 1005 qdev->ddev->mode_config.fb_base = qdev->vram_base;
1006
1007 qxl_mode_create_hotplug_mode_update_property(qdev);
1008
978 for (i = 0 ; i < qxl_num_crtc; ++i) { 1009 for (i = 0 ; i < qxl_num_crtc; ++i) {
979 qdev_crtc_init(qdev->ddev, i); 1010 qdev_crtc_init(qdev->ddev, i);
980 qdev_output_init(qdev->ddev, i); 1011 qdev_output_init(qdev->ddev, i);
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index 41d22ed26060..7bda32f68d3b 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -323,6 +323,8 @@ struct qxl_device {
323 struct work_struct gc_work; 323 struct work_struct gc_work;
324 324
325 struct work_struct fb_work; 325 struct work_struct fb_work;
326
327 struct drm_property *hotplug_mode_update_property;
326}; 328};
327 329
328/* forward declaration for QXL_INFO_IO */ 330/* forward declaration for QXL_INFO_IO */
diff --git a/drivers/gpu/drm/qxl/qxl_fb.c b/drivers/gpu/drm/qxl/qxl_fb.c
index 88722f233430..f437b30ce689 100644
--- a/drivers/gpu/drm/qxl/qxl_fb.c
+++ b/drivers/gpu/drm/qxl/qxl_fb.c
@@ -108,7 +108,7 @@ static void qxl_fb_dirty_flush(struct fb_info *info)
108 u32 x1, x2, y1, y2; 108 u32 x1, x2, y1, y2;
109 109
110 /* TODO: hard coding 32 bpp */ 110 /* TODO: hard coding 32 bpp */
111 int stride = qfbdev->qfb.base.pitches[0] * 4; 111 int stride = qfbdev->qfb.base.pitches[0];
112 112
113 x1 = qfbdev->dirty.x1; 113 x1 = qfbdev->dirty.x1;
114 x2 = qfbdev->dirty.x2; 114 x2 = qfbdev->dirty.x2;
diff --git a/drivers/gpu/drm/qxl/qxl_kms.c b/drivers/gpu/drm/qxl/qxl_kms.c
index 9e8da9ee9731..e5ca498be920 100644
--- a/drivers/gpu/drm/qxl/qxl_kms.c
+++ b/drivers/gpu/drm/qxl/qxl_kms.c
@@ -120,7 +120,7 @@ int qxl_device_init(struct qxl_device *qdev,
120 struct pci_dev *pdev, 120 struct pci_dev *pdev,
121 unsigned long flags) 121 unsigned long flags)
122{ 122{
123 int r; 123 int r, sb;
124 124
125 qdev->dev = &pdev->dev; 125 qdev->dev = &pdev->dev;
126 qdev->ddev = ddev; 126 qdev->ddev = ddev;
@@ -136,21 +136,39 @@ int qxl_device_init(struct qxl_device *qdev,
136 qdev->rom_base = pci_resource_start(pdev, 2); 136 qdev->rom_base = pci_resource_start(pdev, 2);
137 qdev->rom_size = pci_resource_len(pdev, 2); 137 qdev->rom_size = pci_resource_len(pdev, 2);
138 qdev->vram_base = pci_resource_start(pdev, 0); 138 qdev->vram_base = pci_resource_start(pdev, 0);
139 qdev->surfaceram_base = pci_resource_start(pdev, 1);
140 qdev->surfaceram_size = pci_resource_len(pdev, 1);
141 qdev->io_base = pci_resource_start(pdev, 3); 139 qdev->io_base = pci_resource_start(pdev, 3);
142 140
143 qdev->vram_mapping = io_mapping_create_wc(qdev->vram_base, pci_resource_len(pdev, 0)); 141 qdev->vram_mapping = io_mapping_create_wc(qdev->vram_base, pci_resource_len(pdev, 0));
144 qdev->surface_mapping = io_mapping_create_wc(qdev->surfaceram_base, qdev->surfaceram_size); 142
145 DRM_DEBUG_KMS("qxl: vram %llx-%llx(%dM %dk), surface %llx-%llx(%dM %dk)\n", 143 if (pci_resource_len(pdev, 4) > 0) {
144 /* 64bit surface bar present */
145 sb = 4;
146 qdev->surfaceram_base = pci_resource_start(pdev, sb);
147 qdev->surfaceram_size = pci_resource_len(pdev, sb);
148 qdev->surface_mapping =
149 io_mapping_create_wc(qdev->surfaceram_base,
150 qdev->surfaceram_size);
151 }
152 if (qdev->surface_mapping == NULL) {
153 /* 64bit surface bar not present (or mapping failed) */
154 sb = 1;
155 qdev->surfaceram_base = pci_resource_start(pdev, sb);
156 qdev->surfaceram_size = pci_resource_len(pdev, sb);
157 qdev->surface_mapping =
158 io_mapping_create_wc(qdev->surfaceram_base,
159 qdev->surfaceram_size);
160 }
161
162 DRM_DEBUG_KMS("qxl: vram %llx-%llx(%dM %dk), surface %llx-%llx(%dM %dk, %s)\n",
146 (unsigned long long)qdev->vram_base, 163 (unsigned long long)qdev->vram_base,
147 (unsigned long long)pci_resource_end(pdev, 0), 164 (unsigned long long)pci_resource_end(pdev, 0),
148 (int)pci_resource_len(pdev, 0) / 1024 / 1024, 165 (int)pci_resource_len(pdev, 0) / 1024 / 1024,
149 (int)pci_resource_len(pdev, 0) / 1024, 166 (int)pci_resource_len(pdev, 0) / 1024,
150 (unsigned long long)qdev->surfaceram_base, 167 (unsigned long long)qdev->surfaceram_base,
151 (unsigned long long)pci_resource_end(pdev, 1), 168 (unsigned long long)pci_resource_end(pdev, sb),
152 (int)qdev->surfaceram_size / 1024 / 1024, 169 (int)qdev->surfaceram_size / 1024 / 1024,
153 (int)qdev->surfaceram_size / 1024); 170 (int)qdev->surfaceram_size / 1024,
171 (sb == 4) ? "64bit" : "32bit");
154 172
155 qdev->rom = ioremap(qdev->rom_base, qdev->rom_size); 173 qdev->rom = ioremap(qdev->rom_base, qdev->rom_size);
156 if (!qdev->rom) { 174 if (!qdev->rom) {
@@ -230,9 +248,13 @@ int qxl_device_init(struct qxl_device *qdev,
230 qdev->surfaces_mem_slot = setup_slot(qdev, 1, 248 qdev->surfaces_mem_slot = setup_slot(qdev, 1,
231 (unsigned long)qdev->surfaceram_base, 249 (unsigned long)qdev->surfaceram_base,
232 (unsigned long)qdev->surfaceram_base + qdev->surfaceram_size); 250 (unsigned long)qdev->surfaceram_base + qdev->surfaceram_size);
233 DRM_INFO("main mem slot %d [%lx,%x)\n", 251 DRM_INFO("main mem slot %d [%lx,%x]\n",
234 qdev->main_mem_slot, 252 qdev->main_mem_slot,
235 (unsigned long)qdev->vram_base, qdev->rom->ram_header_offset); 253 (unsigned long)qdev->vram_base, qdev->rom->ram_header_offset);
254 DRM_INFO("surface mem slot %d [%lx,%lx]\n",
255 qdev->surfaces_mem_slot,
256 (unsigned long)qdev->surfaceram_base,
257 (unsigned long)qdev->surfaceram_size);
236 258
237 259
238 qdev->gc_queue = create_singlethread_workqueue("qxl_gc"); 260 qdev->gc_queue = create_singlethread_workqueue("qxl_gc");
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
index 037786d7c1dc..c7e7e6590c2b 100644
--- a/drivers/gpu/drm/qxl/qxl_ttm.c
+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
@@ -516,6 +516,8 @@ int qxl_ttm_init(struct qxl_device *qdev)
516 (unsigned)qdev->vram_size / (1024 * 1024)); 516 (unsigned)qdev->vram_size / (1024 * 1024));
517 DRM_INFO("qxl: %luM of IO pages memory ready (VRAM domain)\n", 517 DRM_INFO("qxl: %luM of IO pages memory ready (VRAM domain)\n",
518 ((unsigned)num_io_pages * PAGE_SIZE) / (1024 * 1024)); 518 ((unsigned)num_io_pages * PAGE_SIZE) / (1024 * 1024));
519 DRM_INFO("qxl: %uM of Surface memory size\n",
520 (unsigned)qdev->surfaceram_size / (1024 * 1024));
519 if (unlikely(qdev->mman.bdev.dev_mapping == NULL)) 521 if (unlikely(qdev->mman.bdev.dev_mapping == NULL))
520 qdev->mman.bdev.dev_mapping = qdev->ddev->dev_mapping; 522 qdev->mman.bdev.dev_mapping = qdev->ddev->dev_mapping;
521 r = qxl_ttm_debugfs_init(qdev); 523 r = qxl_ttm_debugfs_init(qdev);
diff --git a/drivers/gpu/drm/radeon/atombios.h b/drivers/gpu/drm/radeon/atombios.h
index af10f8571d87..92be50c39ffd 100644
--- a/drivers/gpu/drm/radeon/atombios.h
+++ b/drivers/gpu/drm/radeon/atombios.h
@@ -1711,7 +1711,9 @@ typedef struct _PIXEL_CLOCK_PARAMETERS_V6
1711#define PIXEL_CLOCK_V6_MISC_HDMI_BPP_MASK 0x0c 1711#define PIXEL_CLOCK_V6_MISC_HDMI_BPP_MASK 0x0c
1712#define PIXEL_CLOCK_V6_MISC_HDMI_24BPP 0x00 1712#define PIXEL_CLOCK_V6_MISC_HDMI_24BPP 0x00
1713#define PIXEL_CLOCK_V6_MISC_HDMI_36BPP 0x04 1713#define PIXEL_CLOCK_V6_MISC_HDMI_36BPP 0x04
1714#define PIXEL_CLOCK_V6_MISC_HDMI_36BPP_V6 0x08 //for V6, the correct defintion for 36bpp should be 2 for 36bpp(2:1)
1714#define PIXEL_CLOCK_V6_MISC_HDMI_30BPP 0x08 1715#define PIXEL_CLOCK_V6_MISC_HDMI_30BPP 0x08
1716#define PIXEL_CLOCK_V6_MISC_HDMI_30BPP_V6 0x04 //for V6, the correct defintion for 30bpp should be 1 for 36bpp(5:4)
1715#define PIXEL_CLOCK_V6_MISC_HDMI_48BPP 0x0c 1717#define PIXEL_CLOCK_V6_MISC_HDMI_48BPP 0x0c
1716#define PIXEL_CLOCK_V6_MISC_REF_DIV_SRC 0x10 1718#define PIXEL_CLOCK_V6_MISC_REF_DIV_SRC 0x10
1717#define PIXEL_CLOCK_V6_MISC_GEN_DPREFCLK 0x40 1719#define PIXEL_CLOCK_V6_MISC_GEN_DPREFCLK 0x40
@@ -2223,7 +2225,7 @@ typedef struct _SET_VOLTAGE_PARAMETERS_V2
2223 USHORT usVoltageLevel; // real voltage level 2225 USHORT usVoltageLevel; // real voltage level
2224}SET_VOLTAGE_PARAMETERS_V2; 2226}SET_VOLTAGE_PARAMETERS_V2;
2225 2227
2226 2228// used by both SetVoltageTable v1.3 and v1.4
2227typedef struct _SET_VOLTAGE_PARAMETERS_V1_3 2229typedef struct _SET_VOLTAGE_PARAMETERS_V1_3
2228{ 2230{
2229 UCHAR ucVoltageType; // To tell which voltage to set up, VDDC/MVDDC/MVDDQ/VDDCI 2231 UCHAR ucVoltageType; // To tell which voltage to set up, VDDC/MVDDC/MVDDQ/VDDCI
@@ -2290,15 +2292,36 @@ typedef struct _GET_LEAKAGE_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_1
2290#define ATOM_GET_VOLTAGE_VID 0x00 2292#define ATOM_GET_VOLTAGE_VID 0x00
2291#define ATOM_GET_VOTLAGE_INIT_SEQ 0x03 2293#define ATOM_GET_VOTLAGE_INIT_SEQ 0x03
2292#define ATOM_GET_VOLTTAGE_PHASE_PHASE_VID 0x04 2294#define ATOM_GET_VOLTTAGE_PHASE_PHASE_VID 0x04
2293// for SI, this state map to 0xff02 voltage state in Power Play table, which is power boost state 2295#define ATOM_GET_VOLTAGE_SVID2 0x07 //Get SVI2 Regulator Info
2294#define ATOM_GET_VOLTAGE_STATE0_LEAKAGE_VID 0x10
2295 2296
2297// for SI, this state map to 0xff02 voltage state in Power Play table, which is power boost state
2298#define ATOM_GET_VOLTAGE_STATE0_LEAKAGE_VID 0x10
2296// for SI, this state map to 0xff01 voltage state in Power Play table, which is performance state 2299// for SI, this state map to 0xff01 voltage state in Power Play table, which is performance state
2297#define ATOM_GET_VOLTAGE_STATE1_LEAKAGE_VID 0x11 2300#define ATOM_GET_VOLTAGE_STATE1_LEAKAGE_VID 0x11
2298// undefined power state 2301
2299#define ATOM_GET_VOLTAGE_STATE2_LEAKAGE_VID 0x12 2302#define ATOM_GET_VOLTAGE_STATE2_LEAKAGE_VID 0x12
2300#define ATOM_GET_VOLTAGE_STATE3_LEAKAGE_VID 0x13 2303#define ATOM_GET_VOLTAGE_STATE3_LEAKAGE_VID 0x13
2301 2304
2305// New Added from CI Hawaii for GetVoltageInfoTable, input parameter structure
2306typedef struct _GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_2
2307{
2308 UCHAR ucVoltageType; // Input: To tell which voltage to set up, VDDC/MVDDC/MVDDQ/VDDCI
2309 UCHAR ucVoltageMode; // Input: Indicate action: Get voltage info
2310 USHORT usVoltageLevel; // Input: real voltage level in unit of mv or Voltage Phase (0, 1, 2, .. ) or Leakage Id
2311 ULONG ulSCLKFreq; // Input: when ucVoltageMode= ATOM_GET_VOLTAGE_EVV_VOLTAGE, DPM state SCLK frequency, Define in PPTable SCLK/Voltage dependence table
2312}GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_2;
2313
2314// New in GetVoltageInfo v1.2 ucVoltageMode
2315#define ATOM_GET_VOLTAGE_EVV_VOLTAGE 0x09
2316
2317// New Added from CI Hawaii for EVV feature
2318typedef struct _GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_2
2319{
2320 USHORT usVoltageLevel; // real voltage level in unit of mv
2321 USHORT usVoltageId; // Voltage Id programmed in Voltage Regulator
2322 ULONG ulReseved;
2323}GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_2;
2324
2302/****************************************************************************/ 2325/****************************************************************************/
2303// Structures used by TVEncoderControlTable 2326// Structures used by TVEncoderControlTable
2304/****************************************************************************/ 2327/****************************************************************************/
@@ -3864,6 +3887,8 @@ typedef struct _ATOM_GPIO_PIN_ASSIGNMENT
3864#define PP_AC_DC_SWITCH_GPIO_PINID 60 3887#define PP_AC_DC_SWITCH_GPIO_PINID 60
3865//from SMU7.x, if ucGPIO_ID=VDDC_REGULATOR_VRHOT_GPIO_PINID in GPIO_LUTable, VRHot feature is enable 3888//from SMU7.x, if ucGPIO_ID=VDDC_REGULATOR_VRHOT_GPIO_PINID in GPIO_LUTable, VRHot feature is enable
3866#define VDDC_VRHOT_GPIO_PINID 61 3889#define VDDC_VRHOT_GPIO_PINID 61
3890//if ucGPIO_ID=VDDC_PCC_GPIO_PINID in GPIO_LUTable, Peak Current Control feature is enabled
3891#define VDDC_PCC_GPIO_PINID 62
3867 3892
3868typedef struct _ATOM_GPIO_PIN_LUT 3893typedef struct _ATOM_GPIO_PIN_LUT
3869{ 3894{
@@ -4169,10 +4194,10 @@ typedef struct _ATOM_COMMON_RECORD_HEADER
4169#define ATOM_OBJECT_LINK_RECORD_TYPE 18 //Once this record is present under one object, it indicats the oobject is linked to another obj described by the record 4194#define ATOM_OBJECT_LINK_RECORD_TYPE 18 //Once this record is present under one object, it indicats the oobject is linked to another obj described by the record
4170#define ATOM_CONNECTOR_REMOTE_CAP_RECORD_TYPE 19 4195#define ATOM_CONNECTOR_REMOTE_CAP_RECORD_TYPE 19
4171#define ATOM_ENCODER_CAP_RECORD_TYPE 20 4196#define ATOM_ENCODER_CAP_RECORD_TYPE 20
4172 4197#define ATOM_BRACKET_LAYOUT_RECORD_TYPE 21
4173 4198
4174//Must be updated when new record type is added,equal to that record definition! 4199//Must be updated when new record type is added,equal to that record definition!
4175#define ATOM_MAX_OBJECT_RECORD_NUMBER ATOM_ENCODER_CAP_RECORD_TYPE 4200#define ATOM_MAX_OBJECT_RECORD_NUMBER ATOM_BRACKET_LAYOUT_RECORD_TYPE
4176 4201
4177typedef struct _ATOM_I2C_RECORD 4202typedef struct _ATOM_I2C_RECORD
4178{ 4203{
@@ -4397,6 +4422,31 @@ typedef struct _ATOM_CONNECTOR_REMOTE_CAP_RECORD
4397 USHORT usReserved; 4422 USHORT usReserved;
4398}ATOM_CONNECTOR_REMOTE_CAP_RECORD; 4423}ATOM_CONNECTOR_REMOTE_CAP_RECORD;
4399 4424
4425typedef struct _ATOM_CONNECTOR_LAYOUT_INFO
4426{
4427 USHORT usConnectorObjectId;
4428 UCHAR ucConnectorType;
4429 UCHAR ucPosition;
4430}ATOM_CONNECTOR_LAYOUT_INFO;
4431
4432// define ATOM_CONNECTOR_LAYOUT_INFO.ucConnectorType to describe the display connector size
4433#define CONNECTOR_TYPE_DVI_D 1
4434#define CONNECTOR_TYPE_DVI_I 2
4435#define CONNECTOR_TYPE_VGA 3
4436#define CONNECTOR_TYPE_HDMI 4
4437#define CONNECTOR_TYPE_DISPLAY_PORT 5
4438#define CONNECTOR_TYPE_MINI_DISPLAY_PORT 6
4439
4440typedef struct _ATOM_BRACKET_LAYOUT_RECORD
4441{
4442 ATOM_COMMON_RECORD_HEADER sheader;
4443 UCHAR ucLength;
4444 UCHAR ucWidth;
4445 UCHAR ucConnNum;
4446 UCHAR ucReserved;
4447 ATOM_CONNECTOR_LAYOUT_INFO asConnInfo[1];
4448}ATOM_BRACKET_LAYOUT_RECORD;
4449
4400/****************************************************************************/ 4450/****************************************************************************/
4401// ASIC voltage data table 4451// ASIC voltage data table
4402/****************************************************************************/ 4452/****************************************************************************/
@@ -4524,8 +4574,9 @@ typedef struct _ATOM_VOLTAGE_OBJECT_HEADER_V3{
4524#define VOLTAGE_OBJ_VR_I2C_INIT_SEQ 3 //VOLTAGE REGULATOR INIT sequece through I2C -> ATOM_I2C_VOLTAGE_OBJECT_V3 4574#define VOLTAGE_OBJ_VR_I2C_INIT_SEQ 3 //VOLTAGE REGULATOR INIT sequece through I2C -> ATOM_I2C_VOLTAGE_OBJECT_V3
4525#define VOLTAGE_OBJ_PHASE_LUT 4 //Set Vregulator Phase lookup table ->ATOM_GPIO_VOLTAGE_OBJECT_V3 4575#define VOLTAGE_OBJ_PHASE_LUT 4 //Set Vregulator Phase lookup table ->ATOM_GPIO_VOLTAGE_OBJECT_V3
4526#define VOLTAGE_OBJ_SVID2 7 //Indicate voltage control by SVID2 ->ATOM_SVID2_VOLTAGE_OBJECT_V3 4576#define VOLTAGE_OBJ_SVID2 7 //Indicate voltage control by SVID2 ->ATOM_SVID2_VOLTAGE_OBJECT_V3
4527#define VOLTAGE_OBJ_PWRBOOST_LEAKAGE_LUT 0x10 //Powerboost Voltage and LeakageId lookup table->ATOM_LEAKAGE_VOLTAGE_OBJECT_V3 4577#define VOLTAGE_OBJ_EVV 8
4528#define VOLTAGE_OBJ_HIGH_STATE_LEAKAGE_LUT 0x11 //High voltage state Voltage and LeakageId lookup table->ATOM_LEAKAGE_VOLTAGE_OBJECT_V3 4578#define VOLTAGE_OBJ_PWRBOOST_LEAKAGE_LUT 0x10 //Powerboost Voltage and LeakageId lookup table->ATOM_LEAKAGE_VOLTAGE_OBJECT_V3
4579#define VOLTAGE_OBJ_HIGH_STATE_LEAKAGE_LUT 0x11 //High voltage state Voltage and LeakageId lookup table->ATOM_LEAKAGE_VOLTAGE_OBJECT_V3
4529#define VOLTAGE_OBJ_HIGH1_STATE_LEAKAGE_LUT 0x12 //High1 voltage state Voltage and LeakageId lookup table->ATOM_LEAKAGE_VOLTAGE_OBJECT_V3 4580#define VOLTAGE_OBJ_HIGH1_STATE_LEAKAGE_LUT 0x12 //High1 voltage state Voltage and LeakageId lookup table->ATOM_LEAKAGE_VOLTAGE_OBJECT_V3
4530 4581
4531typedef struct _VOLTAGE_LUT_ENTRY_V2 4582typedef struct _VOLTAGE_LUT_ENTRY_V2
@@ -4552,6 +4603,10 @@ typedef struct _ATOM_I2C_VOLTAGE_OBJECT_V3
4552 VOLTAGE_LUT_ENTRY asVolI2cLut[1]; // end with 0xff 4603 VOLTAGE_LUT_ENTRY asVolI2cLut[1]; // end with 0xff
4553}ATOM_I2C_VOLTAGE_OBJECT_V3; 4604}ATOM_I2C_VOLTAGE_OBJECT_V3;
4554 4605
4606// ATOM_I2C_VOLTAGE_OBJECT_V3.ucVoltageControlFlag
4607#define VOLTAGE_DATA_ONE_BYTE 0
4608#define VOLTAGE_DATA_TWO_BYTE 1
4609
4555typedef struct _ATOM_GPIO_VOLTAGE_OBJECT_V3 4610typedef struct _ATOM_GPIO_VOLTAGE_OBJECT_V3
4556{ 4611{
4557 ATOM_VOLTAGE_OBJECT_HEADER_V3 sHeader; // voltage mode = VOLTAGE_OBJ_GPIO_LUT or VOLTAGE_OBJ_PHASE_LUT 4612 ATOM_VOLTAGE_OBJECT_HEADER_V3 sHeader; // voltage mode = VOLTAGE_OBJ_GPIO_LUT or VOLTAGE_OBJ_PHASE_LUT
@@ -4584,7 +4639,8 @@ typedef struct _ATOM_SVID2_VOLTAGE_OBJECT_V3
4584// 1:0 – offset trim, 4639// 1:0 – offset trim,
4585 USHORT usLoadLine_PSI; 4640 USHORT usLoadLine_PSI;
4586// GPU GPIO pin Id to SVID2 regulator VRHot pin. possible value 0~31. 0 means GPIO0, 31 means GPIO31 4641// GPU GPIO pin Id to SVID2 regulator VRHot pin. possible value 0~31. 0 means GPIO0, 31 means GPIO31
4587 UCHAR ucReserved[2]; 4642 UCHAR ucSVDGpioId; //0~31 indicate GPIO0~31
4643 UCHAR ucSVCGpioId; //0~31 indicate GPIO0~31
4588 ULONG ulReserved; 4644 ULONG ulReserved;
4589}ATOM_SVID2_VOLTAGE_OBJECT_V3; 4645}ATOM_SVID2_VOLTAGE_OBJECT_V3;
4590 4646
@@ -4637,6 +4693,49 @@ typedef struct _ATOM_ASIC_PROFILING_INFO_V2_1
4637 USHORT usElbVDDCI_LevelArrayOffset; // offset of 2 dimension voltage level USHORT array 4693 USHORT usElbVDDCI_LevelArrayOffset; // offset of 2 dimension voltage level USHORT array
4638}ATOM_ASIC_PROFILING_INFO_V2_1; 4694}ATOM_ASIC_PROFILING_INFO_V2_1;
4639 4695
4696typedef struct _ATOM_ASIC_PROFILING_INFO_V3_1
4697{
4698 ATOM_COMMON_TABLE_HEADER asHeader;
4699 ULONG ulEvvDerateTdp;
4700 ULONG ulEvvDerateTdc;
4701 ULONG ulBoardCoreTemp;
4702 ULONG ulMaxVddc;
4703 ULONG ulMinVddc;
4704 ULONG ulLoadLineSlop;
4705 ULONG ulLeakageTemp;
4706 ULONG ulLeakageVoltage;
4707 ULONG ulCACmEncodeRange;
4708 ULONG ulCACmEncodeAverage;
4709 ULONG ulCACbEncodeRange;
4710 ULONG ulCACbEncodeAverage;
4711 ULONG ulKt_bEncodeRange;
4712 ULONG ulKt_bEncodeAverage;
4713 ULONG ulKv_mEncodeRange;
4714 ULONG ulKv_mEncodeAverage;
4715 ULONG ulKv_bEncodeRange;
4716 ULONG ulKv_bEncodeAverage;
4717 ULONG ulLkgEncodeLn_MaxDivMin;
4718 ULONG ulLkgEncodeMin;
4719 ULONG ulEfuseLogisticAlpha;
4720 USHORT usPowerDpm0;
4721 USHORT usCurrentDpm0;
4722 USHORT usPowerDpm1;
4723 USHORT usCurrentDpm1;
4724 USHORT usPowerDpm2;
4725 USHORT usCurrentDpm2;
4726 USHORT usPowerDpm3;
4727 USHORT usCurrentDpm3;
4728 USHORT usPowerDpm4;
4729 USHORT usCurrentDpm4;
4730 USHORT usPowerDpm5;
4731 USHORT usCurrentDpm5;
4732 USHORT usPowerDpm6;
4733 USHORT usCurrentDpm6;
4734 USHORT usPowerDpm7;
4735 USHORT usCurrentDpm7;
4736}ATOM_ASIC_PROFILING_INFO_V3_1;
4737
4738
4640typedef struct _ATOM_POWER_SOURCE_OBJECT 4739typedef struct _ATOM_POWER_SOURCE_OBJECT
4641{ 4740{
4642 UCHAR ucPwrSrcId; // Power source 4741 UCHAR ucPwrSrcId; // Power source
@@ -5808,6 +5907,8 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO_V3
5808#define ATOM_S7_DOS_MODE_PIXEL_DEPTHb0 0x0C 5907#define ATOM_S7_DOS_MODE_PIXEL_DEPTHb0 0x0C
5809#define ATOM_S7_DOS_MODE_PIXEL_FORMATb0 0xF0 5908#define ATOM_S7_DOS_MODE_PIXEL_FORMATb0 0xF0
5810#define ATOM_S7_DOS_8BIT_DAC_ENb1 0x01 5909#define ATOM_S7_DOS_8BIT_DAC_ENb1 0x01
5910#define ATOM_S7_ASIC_INIT_COMPLETEb1 0x02
5911#define ATOM_S7_ASIC_INIT_COMPLETE_MASK 0x00000200
5811#define ATOM_S7_DOS_MODE_NUMBERw1 0x0FFFF 5912#define ATOM_S7_DOS_MODE_NUMBERw1 0x0FFFF
5812 5913
5813#define ATOM_S7_DOS_8BIT_DAC_EN_SHIFT 8 5914#define ATOM_S7_DOS_8BIT_DAC_EN_SHIFT 8
@@ -6242,6 +6343,7 @@ typedef struct _ATOM_MC_INIT_PARAM_TABLE
6242#define _128Mx32 0x53 6343#define _128Mx32 0x53
6243#define _256Mx8 0x61 6344#define _256Mx8 0x61
6244#define _256Mx16 0x62 6345#define _256Mx16 0x62
6346#define _512Mx8 0x71
6245 6347
6246#define SAMSUNG 0x1 6348#define SAMSUNG 0x1
6247#define INFINEON 0x2 6349#define INFINEON 0x2
@@ -6987,9 +7089,10 @@ typedef struct _ATOM_DISP_OUT_INFO_V3
6987 UCHAR ucMaxDispEngineNum; 7089 UCHAR ucMaxDispEngineNum;
6988 UCHAR ucMaxActiveDispEngineNum; 7090 UCHAR ucMaxActiveDispEngineNum;
6989 UCHAR ucMaxPPLLNum; 7091 UCHAR ucMaxPPLLNum;
6990 UCHAR ucCoreRefClkSource; // value of CORE_REF_CLK_SOURCE 7092 UCHAR ucCoreRefClkSource; // value of CORE_REF_CLK_SOURCE
6991 UCHAR ucReserved[3]; 7093 UCHAR ucDispCaps;
6992 ASIC_TRANSMITTER_INFO_V2 asTransmitterInfo[1]; // for alligment only 7094 UCHAR ucReserved[2];
7095 ASIC_TRANSMITTER_INFO_V2 asTransmitterInfo[1]; // for alligment only
6993}ATOM_DISP_OUT_INFO_V3; 7096}ATOM_DISP_OUT_INFO_V3;
6994 7097
6995//ucDispCaps 7098//ucDispCaps
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index bf87f6d435f8..80a20120e625 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -1753,7 +1753,7 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc)
1753 if (pll != ATOM_PPLL_INVALID) 1753 if (pll != ATOM_PPLL_INVALID)
1754 return pll; 1754 return pll;
1755 } 1755 }
1756 } else { 1756 } else if (!ASIC_IS_DCE41(rdev)) { /* Don't share PLLs on DCE4.1 chips */
1757 /* use the same PPLL for all monitors with the same clock */ 1757 /* use the same PPLL for all monitors with the same clock */
1758 pll = radeon_get_shared_nondp_ppll(crtc); 1758 pll = radeon_get_shared_nondp_ppll(crtc);
1759 if (pll != ATOM_PPLL_INVALID) 1759 if (pll != ATOM_PPLL_INVALID)
@@ -1910,6 +1910,21 @@ static void atombios_crtc_disable(struct drm_crtc *crtc)
1910 int i; 1910 int i;
1911 1911
1912 atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); 1912 atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
1913 if (crtc->fb) {
1914 int r;
1915 struct radeon_framebuffer *radeon_fb;
1916 struct radeon_bo *rbo;
1917
1918 radeon_fb = to_radeon_framebuffer(crtc->fb);
1919 rbo = gem_to_radeon_bo(radeon_fb->obj);
1920 r = radeon_bo_reserve(rbo, false);
1921 if (unlikely(r))
1922 DRM_ERROR("failed to reserve rbo before unpin\n");
1923 else {
1924 radeon_bo_unpin(rbo);
1925 radeon_bo_unreserve(rbo);
1926 }
1927 }
1913 /* disable the GRPH */ 1928 /* disable the GRPH */
1914 if (ASIC_IS_DCE4(rdev)) 1929 if (ASIC_IS_DCE4(rdev))
1915 WREG32(EVERGREEN_GRPH_ENABLE + radeon_crtc->crtc_offset, 0); 1930 WREG32(EVERGREEN_GRPH_ENABLE + radeon_crtc->crtc_offset, 0);
@@ -1940,7 +1955,9 @@ static void atombios_crtc_disable(struct drm_crtc *crtc)
1940 break; 1955 break;
1941 case ATOM_PPLL0: 1956 case ATOM_PPLL0:
1942 /* disable the ppll */ 1957 /* disable the ppll */
1943 if ((rdev->family == CHIP_ARUBA) || (rdev->family == CHIP_BONAIRE)) 1958 if ((rdev->family == CHIP_ARUBA) ||
1959 (rdev->family == CHIP_BONAIRE) ||
1960 (rdev->family == CHIP_HAWAII))
1944 atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id, 1961 atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id,
1945 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss); 1962 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
1946 break; 1963 break;
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index 5e891b226acf..a42d61571f49 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -213,7 +213,7 @@ void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder,
213 props.type = BACKLIGHT_RAW; 213 props.type = BACKLIGHT_RAW;
214 snprintf(bl_name, sizeof(bl_name), 214 snprintf(bl_name, sizeof(bl_name),
215 "radeon_bl%d", dev->primary->index); 215 "radeon_bl%d", dev->primary->index);
216 bd = backlight_device_register(bl_name, &drm_connector->kdev, 216 bd = backlight_device_register(bl_name, drm_connector->kdev,
217 pdata, &radeon_atom_backlight_ops, &props); 217 pdata, &radeon_atom_backlight_ops, &props);
218 if (IS_ERR(bd)) { 218 if (IS_ERR(bd)) {
219 DRM_ERROR("Backlight registration failed\n"); 219 DRM_ERROR("Backlight registration failed\n");
@@ -1662,19 +1662,11 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
1662 atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0); 1662 atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0);
1663 /* enable the transmitter */ 1663 /* enable the transmitter */
1664 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0); 1664 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
1665 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
1666 } else { 1665 } else {
1667 /* setup and enable the encoder and transmitter */ 1666 /* setup and enable the encoder and transmitter */
1668 atombios_dig_encoder_setup(encoder, ATOM_ENABLE, 0); 1667 atombios_dig_encoder_setup(encoder, ATOM_ENABLE, 0);
1669 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0); 1668 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0);
1670 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0); 1669 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
1671 /* some dce3.x boards have a bug in their transmitter control table.
1672 * ACTION_ENABLE_OUTPUT can probably be dropped since ACTION_ENABLE
1673 * does the same thing and more.
1674 */
1675 if ((rdev->family != CHIP_RV710) && (rdev->family != CHIP_RV730) &&
1676 (rdev->family != CHIP_RS780) && (rdev->family != CHIP_RS880))
1677 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
1678 } 1670 }
1679 if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) { 1671 if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) {
1680 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { 1672 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
@@ -1692,16 +1684,11 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
1692 case DRM_MODE_DPMS_STANDBY: 1684 case DRM_MODE_DPMS_STANDBY:
1693 case DRM_MODE_DPMS_SUSPEND: 1685 case DRM_MODE_DPMS_SUSPEND:
1694 case DRM_MODE_DPMS_OFF: 1686 case DRM_MODE_DPMS_OFF:
1695 if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) { 1687 if (ASIC_IS_DCE4(rdev)) {
1696 /* disable the transmitter */
1697 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
1698 } else if (ASIC_IS_DCE4(rdev)) {
1699 /* disable the transmitter */ 1688 /* disable the transmitter */
1700 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0);
1701 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0); 1689 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
1702 } else { 1690 } else {
1703 /* disable the encoder and transmitter */ 1691 /* disable the encoder and transmitter */
1704 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0);
1705 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0); 1692 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
1706 atombios_dig_encoder_setup(encoder, ATOM_DISABLE, 0); 1693 atombios_dig_encoder_setup(encoder, ATOM_DISABLE, 0);
1707 } 1694 }
@@ -2410,6 +2397,15 @@ static void radeon_atom_encoder_prepare(struct drm_encoder *encoder)
2410 2397
2411 /* this is needed for the pll/ss setup to work correctly in some cases */ 2398 /* this is needed for the pll/ss setup to work correctly in some cases */
2412 atombios_set_encoder_crtc_source(encoder); 2399 atombios_set_encoder_crtc_source(encoder);
2400 /* set up the FMT blocks */
2401 if (ASIC_IS_DCE8(rdev))
2402 dce8_program_fmt(encoder);
2403 else if (ASIC_IS_DCE4(rdev))
2404 dce4_program_fmt(encoder);
2405 else if (ASIC_IS_DCE3(rdev))
2406 dce3_program_fmt(encoder);
2407 else if (ASIC_IS_AVIVO(rdev))
2408 avivo_program_fmt(encoder);
2413} 2409}
2414 2410
2415static void radeon_atom_encoder_commit(struct drm_encoder *encoder) 2411static void radeon_atom_encoder_commit(struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
index 51e947a97edf..1ed479976358 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.c
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
@@ -40,6 +40,20 @@
40#define VOLTAGE_VID_OFFSET_SCALE1 625 40#define VOLTAGE_VID_OFFSET_SCALE1 625
41#define VOLTAGE_VID_OFFSET_SCALE2 100 41#define VOLTAGE_VID_OFFSET_SCALE2 100
42 42
43static const struct ci_pt_defaults defaults_hawaii_xt =
44{
45 1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0xB0000,
46 { 0x84, 0x0, 0x0, 0x7F, 0x0, 0x0, 0x5A, 0x60, 0x51, 0x8E, 0x79, 0x6B, 0x5F, 0x90, 0x79 },
47 { 0x1EA, 0x1EA, 0x1EA, 0x224, 0x224, 0x224, 0x24F, 0x24F, 0x24F, 0x28E, 0x28E, 0x28E, 0x2BC, 0x2BC, 0x2BC }
48};
49
50static const struct ci_pt_defaults defaults_hawaii_pro =
51{
52 1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0x65062,
53 { 0x93, 0x0, 0x0, 0x97, 0x0, 0x0, 0x6B, 0x60, 0x51, 0x95, 0x79, 0x6B, 0x5F, 0x90, 0x79 },
54 { 0x1EA, 0x1EA, 0x1EA, 0x224, 0x224, 0x224, 0x24F, 0x24F, 0x24F, 0x28E, 0x28E, 0x28E, 0x2BC, 0x2BC, 0x2BC }
55};
56
43static const struct ci_pt_defaults defaults_bonaire_xt = 57static const struct ci_pt_defaults defaults_bonaire_xt =
44{ 58{
45 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000, 59 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
@@ -187,22 +201,38 @@ static void ci_initialize_powertune_defaults(struct radeon_device *rdev)
187 struct ci_power_info *pi = ci_get_pi(rdev); 201 struct ci_power_info *pi = ci_get_pi(rdev);
188 202
189 switch (rdev->pdev->device) { 203 switch (rdev->pdev->device) {
190 case 0x6650: 204 case 0x6650:
191 case 0x6658: 205 case 0x6658:
192 case 0x665C: 206 case 0x665C:
193 default: 207 default:
194 pi->powertune_defaults = &defaults_bonaire_xt; 208 pi->powertune_defaults = &defaults_bonaire_xt;
195 break; 209 break;
196 case 0x6651: 210 case 0x6651:
197 case 0x665D: 211 case 0x665D:
198 pi->powertune_defaults = &defaults_bonaire_pro; 212 pi->powertune_defaults = &defaults_bonaire_pro;
199 break; 213 break;
200 case 0x6640: 214 case 0x6640:
201 pi->powertune_defaults = &defaults_saturn_xt; 215 pi->powertune_defaults = &defaults_saturn_xt;
202 break; 216 break;
203 case 0x6641: 217 case 0x6641:
204 pi->powertune_defaults = &defaults_saturn_pro; 218 pi->powertune_defaults = &defaults_saturn_pro;
205 break; 219 break;
220 case 0x67B8:
221 case 0x67B0:
222 case 0x67A0:
223 case 0x67A1:
224 case 0x67A2:
225 case 0x67A8:
226 case 0x67A9:
227 case 0x67AA:
228 case 0x67B9:
229 case 0x67BE:
230 pi->powertune_defaults = &defaults_hawaii_xt;
231 break;
232 case 0x67BA:
233 case 0x67B1:
234 pi->powertune_defaults = &defaults_hawaii_pro;
235 break;
206 } 236 }
207 237
208 pi->dte_tj_offset = 0; 238 pi->dte_tj_offset = 0;
@@ -5142,9 +5172,15 @@ int ci_dpm_init(struct radeon_device *rdev)
5142 rdev->pm.dpm.dyn_state.valid_mclk_values.count = 0; 5172 rdev->pm.dpm.dyn_state.valid_mclk_values.count = 0;
5143 rdev->pm.dpm.dyn_state.valid_mclk_values.values = NULL; 5173 rdev->pm.dpm.dyn_state.valid_mclk_values.values = NULL;
5144 5174
5145 pi->thermal_temp_setting.temperature_low = 99500; 5175 if (rdev->family == CHIP_HAWAII) {
5146 pi->thermal_temp_setting.temperature_high = 100000; 5176 pi->thermal_temp_setting.temperature_low = 94500;
5147 pi->thermal_temp_setting.temperature_shutdown = 104000; 5177 pi->thermal_temp_setting.temperature_high = 95000;
5178 pi->thermal_temp_setting.temperature_shutdown = 104000;
5179 } else {
5180 pi->thermal_temp_setting.temperature_low = 99500;
5181 pi->thermal_temp_setting.temperature_high = 100000;
5182 pi->thermal_temp_setting.temperature_shutdown = 104000;
5183 }
5148 5184
5149 pi->uvd_enabled = false; 5185 pi->uvd_enabled = false;
5150 5186
diff --git a/drivers/gpu/drm/radeon/ci_smc.c b/drivers/gpu/drm/radeon/ci_smc.c
index 252e10a41cf5..9c745dd22438 100644
--- a/drivers/gpu/drm/radeon/ci_smc.c
+++ b/drivers/gpu/drm/radeon/ci_smc.c
@@ -217,6 +217,10 @@ int ci_load_smc_ucode(struct radeon_device *rdev, u32 limit)
217 ucode_start_address = BONAIRE_SMC_UCODE_START; 217 ucode_start_address = BONAIRE_SMC_UCODE_START;
218 ucode_size = BONAIRE_SMC_UCODE_SIZE; 218 ucode_size = BONAIRE_SMC_UCODE_SIZE;
219 break; 219 break;
220 case CHIP_HAWAII:
221 ucode_start_address = HAWAII_SMC_UCODE_START;
222 ucode_size = HAWAII_SMC_UCODE_SIZE;
223 break;
220 default: 224 default:
221 DRM_ERROR("unknown asic in smc ucode loader\n"); 225 DRM_ERROR("unknown asic in smc ucode loader\n");
222 BUG(); 226 BUG();
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index 9cd2bc989ac7..ae92aa041c6a 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -41,6 +41,14 @@ MODULE_FIRMWARE("radeon/BONAIRE_mc.bin");
41MODULE_FIRMWARE("radeon/BONAIRE_rlc.bin"); 41MODULE_FIRMWARE("radeon/BONAIRE_rlc.bin");
42MODULE_FIRMWARE("radeon/BONAIRE_sdma.bin"); 42MODULE_FIRMWARE("radeon/BONAIRE_sdma.bin");
43MODULE_FIRMWARE("radeon/BONAIRE_smc.bin"); 43MODULE_FIRMWARE("radeon/BONAIRE_smc.bin");
44MODULE_FIRMWARE("radeon/HAWAII_pfp.bin");
45MODULE_FIRMWARE("radeon/HAWAII_me.bin");
46MODULE_FIRMWARE("radeon/HAWAII_ce.bin");
47MODULE_FIRMWARE("radeon/HAWAII_mec.bin");
48MODULE_FIRMWARE("radeon/HAWAII_mc.bin");
49MODULE_FIRMWARE("radeon/HAWAII_rlc.bin");
50MODULE_FIRMWARE("radeon/HAWAII_sdma.bin");
51MODULE_FIRMWARE("radeon/HAWAII_smc.bin");
44MODULE_FIRMWARE("radeon/KAVERI_pfp.bin"); 52MODULE_FIRMWARE("radeon/KAVERI_pfp.bin");
45MODULE_FIRMWARE("radeon/KAVERI_me.bin"); 53MODULE_FIRMWARE("radeon/KAVERI_me.bin");
46MODULE_FIRMWARE("radeon/KAVERI_ce.bin"); 54MODULE_FIRMWARE("radeon/KAVERI_ce.bin");
@@ -67,11 +75,6 @@ extern void si_init_uvd_internal_cg(struct radeon_device *rdev);
67extern int cik_sdma_resume(struct radeon_device *rdev); 75extern int cik_sdma_resume(struct radeon_device *rdev);
68extern void cik_sdma_enable(struct radeon_device *rdev, bool enable); 76extern void cik_sdma_enable(struct radeon_device *rdev, bool enable);
69extern void cik_sdma_fini(struct radeon_device *rdev); 77extern void cik_sdma_fini(struct radeon_device *rdev);
70extern void cik_sdma_vm_set_page(struct radeon_device *rdev,
71 struct radeon_ib *ib,
72 uint64_t pe,
73 uint64_t addr, unsigned count,
74 uint32_t incr, uint32_t flags);
75static void cik_rlc_stop(struct radeon_device *rdev); 78static void cik_rlc_stop(struct radeon_device *rdev);
76static void cik_pcie_gen3_enable(struct radeon_device *rdev); 79static void cik_pcie_gen3_enable(struct radeon_device *rdev);
77static void cik_program_aspm(struct radeon_device *rdev); 80static void cik_program_aspm(struct radeon_device *rdev);
@@ -1302,6 +1305,171 @@ static const u32 kalindi_mgcg_cgcg_init[] =
1302 0xd80c, 0xff000ff0, 0x00000100 1305 0xd80c, 0xff000ff0, 0x00000100
1303}; 1306};
1304 1307
1308static const u32 hawaii_golden_spm_registers[] =
1309{
1310 0x30800, 0xe0ffffff, 0xe0000000
1311};
1312
1313static const u32 hawaii_golden_common_registers[] =
1314{
1315 0x30800, 0xffffffff, 0xe0000000,
1316 0x28350, 0xffffffff, 0x3a00161a,
1317 0x28354, 0xffffffff, 0x0000002e,
1318 0x9a10, 0xffffffff, 0x00018208,
1319 0x98f8, 0xffffffff, 0x12011003
1320};
1321
1322static const u32 hawaii_golden_registers[] =
1323{
1324 0x3354, 0x00000333, 0x00000333,
1325 0x9a10, 0x00010000, 0x00058208,
1326 0x9830, 0xffffffff, 0x00000000,
1327 0x9834, 0xf00fffff, 0x00000400,
1328 0x9838, 0x0002021c, 0x00020200,
1329 0xc78, 0x00000080, 0x00000000,
1330 0x5bb0, 0x000000f0, 0x00000070,
1331 0x5bc0, 0xf0311fff, 0x80300000,
1332 0x350c, 0x00810000, 0x408af000,
1333 0x7030, 0x31000111, 0x00000011,
1334 0x2f48, 0x73773777, 0x12010001,
1335 0x2120, 0x0000007f, 0x0000001b,
1336 0x21dc, 0x00007fb6, 0x00002191,
1337 0x3628, 0x0000003f, 0x0000000a,
1338 0x362c, 0x0000003f, 0x0000000a,
1339 0x2ae4, 0x00073ffe, 0x000022a2,
1340 0x240c, 0x000007ff, 0x00000000,
1341 0x8bf0, 0x00002001, 0x00000001,
1342 0x8b24, 0xffffffff, 0x00ffffff,
1343 0x30a04, 0x0000ff0f, 0x00000000,
1344 0x28a4c, 0x07ffffff, 0x06000000,
1345 0x3e78, 0x00000001, 0x00000002,
1346 0xc768, 0x00000008, 0x00000008,
1347 0xc770, 0x00000f00, 0x00000800,
1348 0xc774, 0x00000f00, 0x00000800,
1349 0xc798, 0x00ffffff, 0x00ff7fbf,
1350 0xc79c, 0x00ffffff, 0x00ff7faf,
1351 0x8c00, 0x000000ff, 0x00000800,
1352 0xe40, 0x00001fff, 0x00001fff,
1353 0x9060, 0x0000007f, 0x00000020,
1354 0x9508, 0x00010000, 0x00010000,
1355 0xae00, 0x00100000, 0x000ff07c,
1356 0xac14, 0x000003ff, 0x0000000f,
1357 0xac10, 0xffffffff, 0x7564fdec,
1358 0xac0c, 0xffffffff, 0x3120b9a8,
1359 0xac08, 0x20000000, 0x0f9c0000
1360};
1361
1362static const u32 hawaii_mgcg_cgcg_init[] =
1363{
1364 0xc420, 0xffffffff, 0xfffffffd,
1365 0x30800, 0xffffffff, 0xe0000000,
1366 0x3c2a0, 0xffffffff, 0x00000100,
1367 0x3c208, 0xffffffff, 0x00000100,
1368 0x3c2c0, 0xffffffff, 0x00000100,
1369 0x3c2c8, 0xffffffff, 0x00000100,
1370 0x3c2c4, 0xffffffff, 0x00000100,
1371 0x55e4, 0xffffffff, 0x00200100,
1372 0x3c280, 0xffffffff, 0x00000100,
1373 0x3c214, 0xffffffff, 0x06000100,
1374 0x3c220, 0xffffffff, 0x00000100,
1375 0x3c218, 0xffffffff, 0x06000100,
1376 0x3c204, 0xffffffff, 0x00000100,
1377 0x3c2e0, 0xffffffff, 0x00000100,
1378 0x3c224, 0xffffffff, 0x00000100,
1379 0x3c200, 0xffffffff, 0x00000100,
1380 0x3c230, 0xffffffff, 0x00000100,
1381 0x3c234, 0xffffffff, 0x00000100,
1382 0x3c250, 0xffffffff, 0x00000100,
1383 0x3c254, 0xffffffff, 0x00000100,
1384 0x3c258, 0xffffffff, 0x00000100,
1385 0x3c25c, 0xffffffff, 0x00000100,
1386 0x3c260, 0xffffffff, 0x00000100,
1387 0x3c27c, 0xffffffff, 0x00000100,
1388 0x3c278, 0xffffffff, 0x00000100,
1389 0x3c210, 0xffffffff, 0x06000100,
1390 0x3c290, 0xffffffff, 0x00000100,
1391 0x3c274, 0xffffffff, 0x00000100,
1392 0x3c2b4, 0xffffffff, 0x00000100,
1393 0x3c2b0, 0xffffffff, 0x00000100,
1394 0x3c270, 0xffffffff, 0x00000100,
1395 0x30800, 0xffffffff, 0xe0000000,
1396 0x3c020, 0xffffffff, 0x00010000,
1397 0x3c024, 0xffffffff, 0x00030002,
1398 0x3c028, 0xffffffff, 0x00040007,
1399 0x3c02c, 0xffffffff, 0x00060005,
1400 0x3c030, 0xffffffff, 0x00090008,
1401 0x3c034, 0xffffffff, 0x00010000,
1402 0x3c038, 0xffffffff, 0x00030002,
1403 0x3c03c, 0xffffffff, 0x00040007,
1404 0x3c040, 0xffffffff, 0x00060005,
1405 0x3c044, 0xffffffff, 0x00090008,
1406 0x3c048, 0xffffffff, 0x00010000,
1407 0x3c04c, 0xffffffff, 0x00030002,
1408 0x3c050, 0xffffffff, 0x00040007,
1409 0x3c054, 0xffffffff, 0x00060005,
1410 0x3c058, 0xffffffff, 0x00090008,
1411 0x3c05c, 0xffffffff, 0x00010000,
1412 0x3c060, 0xffffffff, 0x00030002,
1413 0x3c064, 0xffffffff, 0x00040007,
1414 0x3c068, 0xffffffff, 0x00060005,
1415 0x3c06c, 0xffffffff, 0x00090008,
1416 0x3c070, 0xffffffff, 0x00010000,
1417 0x3c074, 0xffffffff, 0x00030002,
1418 0x3c078, 0xffffffff, 0x00040007,
1419 0x3c07c, 0xffffffff, 0x00060005,
1420 0x3c080, 0xffffffff, 0x00090008,
1421 0x3c084, 0xffffffff, 0x00010000,
1422 0x3c088, 0xffffffff, 0x00030002,
1423 0x3c08c, 0xffffffff, 0x00040007,
1424 0x3c090, 0xffffffff, 0x00060005,
1425 0x3c094, 0xffffffff, 0x00090008,
1426 0x3c098, 0xffffffff, 0x00010000,
1427 0x3c09c, 0xffffffff, 0x00030002,
1428 0x3c0a0, 0xffffffff, 0x00040007,
1429 0x3c0a4, 0xffffffff, 0x00060005,
1430 0x3c0a8, 0xffffffff, 0x00090008,
1431 0x3c0ac, 0xffffffff, 0x00010000,
1432 0x3c0b0, 0xffffffff, 0x00030002,
1433 0x3c0b4, 0xffffffff, 0x00040007,
1434 0x3c0b8, 0xffffffff, 0x00060005,
1435 0x3c0bc, 0xffffffff, 0x00090008,
1436 0x3c0c0, 0xffffffff, 0x00010000,
1437 0x3c0c4, 0xffffffff, 0x00030002,
1438 0x3c0c8, 0xffffffff, 0x00040007,
1439 0x3c0cc, 0xffffffff, 0x00060005,
1440 0x3c0d0, 0xffffffff, 0x00090008,
1441 0x3c0d4, 0xffffffff, 0x00010000,
1442 0x3c0d8, 0xffffffff, 0x00030002,
1443 0x3c0dc, 0xffffffff, 0x00040007,
1444 0x3c0e0, 0xffffffff, 0x00060005,
1445 0x3c0e4, 0xffffffff, 0x00090008,
1446 0x3c0e8, 0xffffffff, 0x00010000,
1447 0x3c0ec, 0xffffffff, 0x00030002,
1448 0x3c0f0, 0xffffffff, 0x00040007,
1449 0x3c0f4, 0xffffffff, 0x00060005,
1450 0x3c0f8, 0xffffffff, 0x00090008,
1451 0xc318, 0xffffffff, 0x00020200,
1452 0x3350, 0xffffffff, 0x00000200,
1453 0x15c0, 0xffffffff, 0x00000400,
1454 0x55e8, 0xffffffff, 0x00000000,
1455 0x2f50, 0xffffffff, 0x00000902,
1456 0x3c000, 0xffffffff, 0x96940200,
1457 0x8708, 0xffffffff, 0x00900100,
1458 0xc424, 0xffffffff, 0x0020003f,
1459 0x38, 0xffffffff, 0x0140001c,
1460 0x3c, 0x000f0000, 0x000f0000,
1461 0x220, 0xffffffff, 0xc060000c,
1462 0x224, 0xc0000fff, 0x00000100,
1463 0xf90, 0xffffffff, 0x00000100,
1464 0xf98, 0x00000101, 0x00000000,
1465 0x20a8, 0xffffffff, 0x00000104,
1466 0x55e4, 0xff000fff, 0x00000100,
1467 0x30cc, 0xc0000fff, 0x00000104,
1468 0xc1e4, 0x00000001, 0x00000001,
1469 0xd00c, 0xff000ff0, 0x00000100,
1470 0xd80c, 0xff000ff0, 0x00000100
1471};
1472
1305static void cik_init_golden_registers(struct radeon_device *rdev) 1473static void cik_init_golden_registers(struct radeon_device *rdev)
1306{ 1474{
1307 switch (rdev->family) { 1475 switch (rdev->family) {
@@ -1347,6 +1515,20 @@ static void cik_init_golden_registers(struct radeon_device *rdev)
1347 spectre_golden_spm_registers, 1515 spectre_golden_spm_registers,
1348 (const u32)ARRAY_SIZE(spectre_golden_spm_registers)); 1516 (const u32)ARRAY_SIZE(spectre_golden_spm_registers));
1349 break; 1517 break;
1518 case CHIP_HAWAII:
1519 radeon_program_register_sequence(rdev,
1520 hawaii_mgcg_cgcg_init,
1521 (const u32)ARRAY_SIZE(hawaii_mgcg_cgcg_init));
1522 radeon_program_register_sequence(rdev,
1523 hawaii_golden_registers,
1524 (const u32)ARRAY_SIZE(hawaii_golden_registers));
1525 radeon_program_register_sequence(rdev,
1526 hawaii_golden_common_registers,
1527 (const u32)ARRAY_SIZE(hawaii_golden_common_registers));
1528 radeon_program_register_sequence(rdev,
1529 hawaii_golden_spm_registers,
1530 (const u32)ARRAY_SIZE(hawaii_golden_spm_registers));
1531 break;
1350 default: 1532 default:
1351 break; 1533 break;
1352 } 1534 }
@@ -1454,6 +1636,35 @@ static const u32 bonaire_io_mc_regs[BONAIRE_IO_MC_REGS_SIZE][2] =
1454 {0x0000009f, 0x00b48000} 1636 {0x0000009f, 0x00b48000}
1455}; 1637};
1456 1638
1639#define HAWAII_IO_MC_REGS_SIZE 22
1640
1641static const u32 hawaii_io_mc_regs[HAWAII_IO_MC_REGS_SIZE][2] =
1642{
1643 {0x0000007d, 0x40000000},
1644 {0x0000007e, 0x40180304},
1645 {0x0000007f, 0x0000ff00},
1646 {0x00000081, 0x00000000},
1647 {0x00000083, 0x00000800},
1648 {0x00000086, 0x00000000},
1649 {0x00000087, 0x00000100},
1650 {0x00000088, 0x00020100},
1651 {0x00000089, 0x00000000},
1652 {0x0000008b, 0x00040000},
1653 {0x0000008c, 0x00000100},
1654 {0x0000008e, 0xff010000},
1655 {0x00000090, 0xffffefff},
1656 {0x00000091, 0xfff3efff},
1657 {0x00000092, 0xfff3efbf},
1658 {0x00000093, 0xf7ffffff},
1659 {0x00000094, 0xffffff7f},
1660 {0x00000095, 0x00000fff},
1661 {0x00000096, 0x00116fff},
1662 {0x00000097, 0x60010000},
1663 {0x00000098, 0x10010000},
1664 {0x0000009f, 0x00c79000}
1665};
1666
1667
1457/** 1668/**
1458 * cik_srbm_select - select specific register instances 1669 * cik_srbm_select - select specific register instances
1459 * 1670 *
@@ -1498,11 +1709,17 @@ static int ci_mc_load_microcode(struct radeon_device *rdev)
1498 1709
1499 switch (rdev->family) { 1710 switch (rdev->family) {
1500 case CHIP_BONAIRE: 1711 case CHIP_BONAIRE:
1501 default:
1502 io_mc_regs = (u32 *)&bonaire_io_mc_regs; 1712 io_mc_regs = (u32 *)&bonaire_io_mc_regs;
1503 ucode_size = CIK_MC_UCODE_SIZE; 1713 ucode_size = CIK_MC_UCODE_SIZE;
1504 regs_size = BONAIRE_IO_MC_REGS_SIZE; 1714 regs_size = BONAIRE_IO_MC_REGS_SIZE;
1505 break; 1715 break;
1716 case CHIP_HAWAII:
1717 io_mc_regs = (u32 *)&hawaii_io_mc_regs;
1718 ucode_size = HAWAII_MC_UCODE_SIZE;
1719 regs_size = HAWAII_IO_MC_REGS_SIZE;
1720 break;
1721 default:
1722 return -EINVAL;
1506 } 1723 }
1507 1724
1508 running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK; 1725 running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK;
@@ -1564,8 +1781,8 @@ static int cik_init_microcode(struct radeon_device *rdev)
1564{ 1781{
1565 const char *chip_name; 1782 const char *chip_name;
1566 size_t pfp_req_size, me_req_size, ce_req_size, 1783 size_t pfp_req_size, me_req_size, ce_req_size,
1567 mec_req_size, rlc_req_size, mc_req_size, 1784 mec_req_size, rlc_req_size, mc_req_size = 0,
1568 sdma_req_size, smc_req_size; 1785 sdma_req_size, smc_req_size = 0;
1569 char fw_name[30]; 1786 char fw_name[30];
1570 int err; 1787 int err;
1571 1788
@@ -1583,6 +1800,17 @@ static int cik_init_microcode(struct radeon_device *rdev)
1583 sdma_req_size = CIK_SDMA_UCODE_SIZE * 4; 1800 sdma_req_size = CIK_SDMA_UCODE_SIZE * 4;
1584 smc_req_size = ALIGN(BONAIRE_SMC_UCODE_SIZE, 4); 1801 smc_req_size = ALIGN(BONAIRE_SMC_UCODE_SIZE, 4);
1585 break; 1802 break;
1803 case CHIP_HAWAII:
1804 chip_name = "HAWAII";
1805 pfp_req_size = CIK_PFP_UCODE_SIZE * 4;
1806 me_req_size = CIK_ME_UCODE_SIZE * 4;
1807 ce_req_size = CIK_CE_UCODE_SIZE * 4;
1808 mec_req_size = CIK_MEC_UCODE_SIZE * 4;
1809 rlc_req_size = BONAIRE_RLC_UCODE_SIZE * 4;
1810 mc_req_size = HAWAII_MC_UCODE_SIZE * 4;
1811 sdma_req_size = CIK_SDMA_UCODE_SIZE * 4;
1812 smc_req_size = ALIGN(HAWAII_SMC_UCODE_SIZE, 4);
1813 break;
1586 case CHIP_KAVERI: 1814 case CHIP_KAVERI:
1587 chip_name = "KAVERI"; 1815 chip_name = "KAVERI";
1588 pfp_req_size = CIK_PFP_UCODE_SIZE * 4; 1816 pfp_req_size = CIK_PFP_UCODE_SIZE * 4;
@@ -1763,9 +1991,227 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev)
1763 1991
1764 num_pipe_configs = rdev->config.cik.max_tile_pipes; 1992 num_pipe_configs = rdev->config.cik.max_tile_pipes;
1765 if (num_pipe_configs > 8) 1993 if (num_pipe_configs > 8)
1766 num_pipe_configs = 8; /* ??? */ 1994 num_pipe_configs = 16;
1767 1995
1768 if (num_pipe_configs == 8) { 1996 if (num_pipe_configs == 16) {
1997 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
1998 switch (reg_offset) {
1999 case 0:
2000 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2001 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
2002 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2003 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B));
2004 break;
2005 case 1:
2006 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2007 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
2008 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2009 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B));
2010 break;
2011 case 2:
2012 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2013 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
2014 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2015 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
2016 break;
2017 case 3:
2018 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2019 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
2020 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2021 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B));
2022 break;
2023 case 4:
2024 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2025 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
2026 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2027 TILE_SPLIT(split_equal_to_row_size));
2028 break;
2029 case 5:
2030 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2031 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2032 break;
2033 case 6:
2034 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
2035 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
2036 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2037 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
2038 break;
2039 case 7:
2040 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
2041 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
2042 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2043 TILE_SPLIT(split_equal_to_row_size));
2044 break;
2045 case 8:
2046 gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
2047 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16));
2048 break;
2049 case 9:
2050 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2051 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
2052 break;
2053 case 10:
2054 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2055 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2056 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2057 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2058 break;
2059 case 11:
2060 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2061 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2062 PIPE_CONFIG(ADDR_SURF_P16_32x32_8x16) |
2063 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2064 break;
2065 case 12:
2066 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
2067 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2068 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2069 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2070 break;
2071 case 13:
2072 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2073 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
2074 break;
2075 case 14:
2076 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2077 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2078 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2079 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2080 break;
2081 case 16:
2082 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2083 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2084 PIPE_CONFIG(ADDR_SURF_P16_32x32_8x16) |
2085 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2086 break;
2087 case 17:
2088 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
2089 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2090 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2091 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2092 break;
2093 case 27:
2094 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2095 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
2096 break;
2097 case 28:
2098 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2099 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2100 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2101 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2102 break;
2103 case 29:
2104 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2105 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2106 PIPE_CONFIG(ADDR_SURF_P16_32x32_8x16) |
2107 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2108 break;
2109 case 30:
2110 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
2111 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2112 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2113 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2114 break;
2115 default:
2116 gb_tile_moden = 0;
2117 break;
2118 }
2119 rdev->config.cik.tile_mode_array[reg_offset] = gb_tile_moden;
2120 WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden);
2121 }
2122 for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) {
2123 switch (reg_offset) {
2124 case 0:
2125 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2126 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2127 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2128 NUM_BANKS(ADDR_SURF_16_BANK));
2129 break;
2130 case 1:
2131 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2132 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2133 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2134 NUM_BANKS(ADDR_SURF_16_BANK));
2135 break;
2136 case 2:
2137 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2138 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2139 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2140 NUM_BANKS(ADDR_SURF_16_BANK));
2141 break;
2142 case 3:
2143 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2144 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2145 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2146 NUM_BANKS(ADDR_SURF_16_BANK));
2147 break;
2148 case 4:
2149 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2150 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2151 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2152 NUM_BANKS(ADDR_SURF_8_BANK));
2153 break;
2154 case 5:
2155 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2156 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2157 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2158 NUM_BANKS(ADDR_SURF_4_BANK));
2159 break;
2160 case 6:
2161 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2162 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2163 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2164 NUM_BANKS(ADDR_SURF_2_BANK));
2165 break;
2166 case 8:
2167 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2168 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2169 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2170 NUM_BANKS(ADDR_SURF_16_BANK));
2171 break;
2172 case 9:
2173 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2174 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2175 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2176 NUM_BANKS(ADDR_SURF_16_BANK));
2177 break;
2178 case 10:
2179 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2180 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2181 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2182 NUM_BANKS(ADDR_SURF_16_BANK));
2183 break;
2184 case 11:
2185 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2186 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2187 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2188 NUM_BANKS(ADDR_SURF_8_BANK));
2189 break;
2190 case 12:
2191 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2192 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2193 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2194 NUM_BANKS(ADDR_SURF_4_BANK));
2195 break;
2196 case 13:
2197 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2198 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2199 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2200 NUM_BANKS(ADDR_SURF_2_BANK));
2201 break;
2202 case 14:
2203 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2204 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2205 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2206 NUM_BANKS(ADDR_SURF_2_BANK));
2207 break;
2208 default:
2209 gb_tile_moden = 0;
2210 break;
2211 }
2212 WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden);
2213 }
2214 } else if (num_pipe_configs == 8) {
1769 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) { 2215 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
1770 switch (reg_offset) { 2216 switch (reg_offset) {
1771 case 0: 2217 case 0:
@@ -2650,7 +3096,10 @@ static void cik_setup_rb(struct radeon_device *rdev,
2650 for (j = 0; j < sh_per_se; j++) { 3096 for (j = 0; j < sh_per_se; j++) {
2651 cik_select_se_sh(rdev, i, j); 3097 cik_select_se_sh(rdev, i, j);
2652 data = cik_get_rb_disabled(rdev, max_rb_num, se_num, sh_per_se); 3098 data = cik_get_rb_disabled(rdev, max_rb_num, se_num, sh_per_se);
2653 disabled_rbs |= data << ((i * sh_per_se + j) * CIK_RB_BITMAP_WIDTH_PER_SH); 3099 if (rdev->family == CHIP_HAWAII)
3100 disabled_rbs |= data << ((i * sh_per_se + j) * HAWAII_RB_BITMAP_WIDTH_PER_SH);
3101 else
3102 disabled_rbs |= data << ((i * sh_per_se + j) * CIK_RB_BITMAP_WIDTH_PER_SH);
2654 } 3103 }
2655 } 3104 }
2656 cik_select_se_sh(rdev, 0xffffffff, 0xffffffff); 3105 cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
@@ -2667,6 +3116,12 @@ static void cik_setup_rb(struct radeon_device *rdev,
2667 data = 0; 3116 data = 0;
2668 for (j = 0; j < sh_per_se; j++) { 3117 for (j = 0; j < sh_per_se; j++) {
2669 switch (enabled_rbs & 3) { 3118 switch (enabled_rbs & 3) {
3119 case 0:
3120 if (j == 0)
3121 data |= PKR_MAP(RASTER_CONFIG_RB_MAP_3);
3122 else
3123 data |= PKR_MAP(RASTER_CONFIG_RB_MAP_0);
3124 break;
2670 case 1: 3125 case 1:
2671 data |= (RASTER_CONFIG_RB_MAP_0 << (i * sh_per_se + j) * 2); 3126 data |= (RASTER_CONFIG_RB_MAP_0 << (i * sh_per_se + j) * 2);
2672 break; 3127 break;
@@ -2719,6 +3174,23 @@ static void cik_gpu_init(struct radeon_device *rdev)
2719 rdev->config.cik.sc_earlyz_tile_fifo_size = 0x130; 3174 rdev->config.cik.sc_earlyz_tile_fifo_size = 0x130;
2720 gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN; 3175 gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN;
2721 break; 3176 break;
3177 case CHIP_HAWAII:
3178 rdev->config.cik.max_shader_engines = 4;
3179 rdev->config.cik.max_tile_pipes = 16;
3180 rdev->config.cik.max_cu_per_sh = 11;
3181 rdev->config.cik.max_sh_per_se = 1;
3182 rdev->config.cik.max_backends_per_se = 4;
3183 rdev->config.cik.max_texture_channel_caches = 16;
3184 rdev->config.cik.max_gprs = 256;
3185 rdev->config.cik.max_gs_threads = 32;
3186 rdev->config.cik.max_hw_contexts = 8;
3187
3188 rdev->config.cik.sc_prim_fifo_size_frontend = 0x20;
3189 rdev->config.cik.sc_prim_fifo_size_backend = 0x100;
3190 rdev->config.cik.sc_hiz_tile_fifo_size = 0x30;
3191 rdev->config.cik.sc_earlyz_tile_fifo_size = 0x130;
3192 gb_addr_config = HAWAII_GB_ADDR_CONFIG_GOLDEN;
3193 break;
2722 case CHIP_KAVERI: 3194 case CHIP_KAVERI:
2723 rdev->config.cik.max_shader_engines = 1; 3195 rdev->config.cik.max_shader_engines = 1;
2724 rdev->config.cik.max_tile_pipes = 4; 3196 rdev->config.cik.max_tile_pipes = 4;
@@ -3097,6 +3569,85 @@ void cik_semaphore_ring_emit(struct radeon_device *rdev,
3097 radeon_ring_write(ring, (upper_32_bits(addr) & 0xffff) | sel); 3569 radeon_ring_write(ring, (upper_32_bits(addr) & 0xffff) | sel);
3098} 3570}
3099 3571
3572/**
3573 * cik_copy_cpdma - copy pages using the CP DMA engine
3574 *
3575 * @rdev: radeon_device pointer
3576 * @src_offset: src GPU address
3577 * @dst_offset: dst GPU address
3578 * @num_gpu_pages: number of GPU pages to xfer
3579 * @fence: radeon fence object
3580 *
3581 * Copy GPU paging using the CP DMA engine (CIK+).
3582 * Used by the radeon ttm implementation to move pages if
3583 * registered as the asic copy callback.
3584 */
3585int cik_copy_cpdma(struct radeon_device *rdev,
3586 uint64_t src_offset, uint64_t dst_offset,
3587 unsigned num_gpu_pages,
3588 struct radeon_fence **fence)
3589{
3590 struct radeon_semaphore *sem = NULL;
3591 int ring_index = rdev->asic->copy.blit_ring_index;
3592 struct radeon_ring *ring = &rdev->ring[ring_index];
3593 u32 size_in_bytes, cur_size_in_bytes, control;
3594 int i, num_loops;
3595 int r = 0;
3596
3597 r = radeon_semaphore_create(rdev, &sem);
3598 if (r) {
3599 DRM_ERROR("radeon: moving bo (%d).\n", r);
3600 return r;
3601 }
3602
3603 size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
3604 num_loops = DIV_ROUND_UP(size_in_bytes, 0x1fffff);
3605 r = radeon_ring_lock(rdev, ring, num_loops * 7 + 18);
3606 if (r) {
3607 DRM_ERROR("radeon: moving bo (%d).\n", r);
3608 radeon_semaphore_free(rdev, &sem, NULL);
3609 return r;
3610 }
3611
3612 if (radeon_fence_need_sync(*fence, ring->idx)) {
3613 radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
3614 ring->idx);
3615 radeon_fence_note_sync(*fence, ring->idx);
3616 } else {
3617 radeon_semaphore_free(rdev, &sem, NULL);
3618 }
3619
3620 for (i = 0; i < num_loops; i++) {
3621 cur_size_in_bytes = size_in_bytes;
3622 if (cur_size_in_bytes > 0x1fffff)
3623 cur_size_in_bytes = 0x1fffff;
3624 size_in_bytes -= cur_size_in_bytes;
3625 control = 0;
3626 if (size_in_bytes == 0)
3627 control |= PACKET3_DMA_DATA_CP_SYNC;
3628 radeon_ring_write(ring, PACKET3(PACKET3_DMA_DATA, 5));
3629 radeon_ring_write(ring, control);
3630 radeon_ring_write(ring, lower_32_bits(src_offset));
3631 radeon_ring_write(ring, upper_32_bits(src_offset));
3632 radeon_ring_write(ring, lower_32_bits(dst_offset));
3633 radeon_ring_write(ring, upper_32_bits(dst_offset));
3634 radeon_ring_write(ring, cur_size_in_bytes);
3635 src_offset += cur_size_in_bytes;
3636 dst_offset += cur_size_in_bytes;
3637 }
3638
3639 r = radeon_fence_emit(rdev, fence, ring->idx);
3640 if (r) {
3641 radeon_ring_unlock_undo(rdev, ring);
3642 return r;
3643 }
3644
3645 radeon_ring_unlock_commit(rdev, ring);
3646 radeon_semaphore_free(rdev, &sem, *fence);
3647
3648 return r;
3649}
3650
3100/* 3651/*
3101 * IB stuff 3652 * IB stuff
3102 */ 3653 */
@@ -3403,7 +3954,8 @@ static int cik_cp_gfx_resume(struct radeon_device *rdev)
3403 int r; 3954 int r;
3404 3955
3405 WREG32(CP_SEM_WAIT_TIMER, 0x0); 3956 WREG32(CP_SEM_WAIT_TIMER, 0x0);
3406 WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0); 3957 if (rdev->family != CHIP_HAWAII)
3958 WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0);
3407 3959
3408 /* Set the write pointer delay */ 3960 /* Set the write pointer delay */
3409 WREG32(CP_RB_WPTR_DELAY, 0); 3961 WREG32(CP_RB_WPTR_DELAY, 0);
@@ -4740,12 +5292,17 @@ void cik_vm_fini(struct radeon_device *rdev)
4740static void cik_vm_decode_fault(struct radeon_device *rdev, 5292static void cik_vm_decode_fault(struct radeon_device *rdev,
4741 u32 status, u32 addr, u32 mc_client) 5293 u32 status, u32 addr, u32 mc_client)
4742{ 5294{
4743 u32 mc_id = (status & MEMORY_CLIENT_ID_MASK) >> MEMORY_CLIENT_ID_SHIFT; 5295 u32 mc_id;
4744 u32 vmid = (status & FAULT_VMID_MASK) >> FAULT_VMID_SHIFT; 5296 u32 vmid = (status & FAULT_VMID_MASK) >> FAULT_VMID_SHIFT;
4745 u32 protections = (status & PROTECTIONS_MASK) >> PROTECTIONS_SHIFT; 5297 u32 protections = (status & PROTECTIONS_MASK) >> PROTECTIONS_SHIFT;
4746 char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff, 5298 char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff,
4747 (mc_client >> 8) & 0xff, mc_client & 0xff, 0 }; 5299 (mc_client >> 8) & 0xff, mc_client & 0xff, 0 };
4748 5300
5301 if (rdev->family == CHIP_HAWAII)
5302 mc_id = (status & HAWAII_MEMORY_CLIENT_ID_MASK) >> MEMORY_CLIENT_ID_SHIFT;
5303 else
5304 mc_id = (status & MEMORY_CLIENT_ID_MASK) >> MEMORY_CLIENT_ID_SHIFT;
5305
4749 printk("VM fault (0x%02x, vmid %d) at page %u, %s from '%s' (0x%08x) (%d)\n", 5306 printk("VM fault (0x%02x, vmid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",
4750 protections, vmid, addr, 5307 protections, vmid, addr,
4751 (status & MEMORY_CLIENT_RW_MASK) ? "write" : "read", 5308 (status & MEMORY_CLIENT_RW_MASK) ? "write" : "read",
@@ -4834,62 +5391,6 @@ void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
4834 } 5391 }
4835} 5392}
4836 5393
4837/**
4838 * cik_vm_set_page - update the page tables using sDMA
4839 *
4840 * @rdev: radeon_device pointer
4841 * @ib: indirect buffer to fill with commands
4842 * @pe: addr of the page entry
4843 * @addr: dst addr to write into pe
4844 * @count: number of page entries to update
4845 * @incr: increase next addr by incr bytes
4846 * @flags: access flags
4847 *
4848 * Update the page tables using CP or sDMA (CIK).
4849 */
4850void cik_vm_set_page(struct radeon_device *rdev,
4851 struct radeon_ib *ib,
4852 uint64_t pe,
4853 uint64_t addr, unsigned count,
4854 uint32_t incr, uint32_t flags)
4855{
4856 uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
4857 uint64_t value;
4858 unsigned ndw;
4859
4860 if (rdev->asic->vm.pt_ring_index == RADEON_RING_TYPE_GFX_INDEX) {
4861 /* CP */
4862 while (count) {
4863 ndw = 2 + count * 2;
4864 if (ndw > 0x3FFE)
4865 ndw = 0x3FFE;
4866
4867 ib->ptr[ib->length_dw++] = PACKET3(PACKET3_WRITE_DATA, ndw);
4868 ib->ptr[ib->length_dw++] = (WRITE_DATA_ENGINE_SEL(0) |
4869 WRITE_DATA_DST_SEL(1));
4870 ib->ptr[ib->length_dw++] = pe;
4871 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
4872 for (; ndw > 2; ndw -= 2, --count, pe += 8) {
4873 if (flags & RADEON_VM_PAGE_SYSTEM) {
4874 value = radeon_vm_map_gart(rdev, addr);
4875 value &= 0xFFFFFFFFFFFFF000ULL;
4876 } else if (flags & RADEON_VM_PAGE_VALID) {
4877 value = addr;
4878 } else {
4879 value = 0;
4880 }
4881 addr += incr;
4882 value |= r600_flags;
4883 ib->ptr[ib->length_dw++] = value;
4884 ib->ptr[ib->length_dw++] = upper_32_bits(value);
4885 }
4886 }
4887 } else {
4888 /* DMA */
4889 cik_sdma_vm_set_page(rdev, ib, pe, addr, count, incr, flags);
4890 }
4891}
4892
4893/* 5394/*
4894 * RLC 5395 * RLC
4895 * The RLC is a multi-purpose microengine that handles a 5396 * The RLC is a multi-purpose microengine that handles a
@@ -5058,6 +5559,7 @@ static int cik_rlc_resume(struct radeon_device *rdev)
5058 5559
5059 switch (rdev->family) { 5560 switch (rdev->family) {
5060 case CHIP_BONAIRE: 5561 case CHIP_BONAIRE:
5562 case CHIP_HAWAII:
5061 default: 5563 default:
5062 size = BONAIRE_RLC_UCODE_SIZE; 5564 size = BONAIRE_RLC_UCODE_SIZE;
5063 break; 5565 break;
@@ -5556,7 +6058,7 @@ void cik_init_cp_pg_table(struct radeon_device *rdev)
5556 } 6058 }
5557 6059
5558 for (i = 0; i < CP_ME_TABLE_SIZE; i ++) { 6060 for (i = 0; i < CP_ME_TABLE_SIZE; i ++) {
5559 dst_ptr[bo_offset + i] = be32_to_cpu(fw_data[table_offset + i]); 6061 dst_ptr[bo_offset + i] = cpu_to_le32(be32_to_cpu(fw_data[table_offset + i]));
5560 } 6062 }
5561 bo_offset += CP_ME_TABLE_SIZE; 6063 bo_offset += CP_ME_TABLE_SIZE;
5562 } 6064 }
@@ -5778,52 +6280,57 @@ void cik_get_csb_buffer(struct radeon_device *rdev, volatile u32 *buffer)
5778 if (buffer == NULL) 6280 if (buffer == NULL)
5779 return; 6281 return;
5780 6282
5781 buffer[count++] = PACKET3(PACKET3_PREAMBLE_CNTL, 0); 6283 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
5782 buffer[count++] = PACKET3_PREAMBLE_BEGIN_CLEAR_STATE; 6284 buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
5783 6285
5784 buffer[count++] = PACKET3(PACKET3_CONTEXT_CONTROL, 1); 6286 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
5785 buffer[count++] = 0x80000000; 6287 buffer[count++] = cpu_to_le32(0x80000000);
5786 buffer[count++] = 0x80000000; 6288 buffer[count++] = cpu_to_le32(0x80000000);
5787 6289
5788 for (sect = rdev->rlc.cs_data; sect->section != NULL; ++sect) { 6290 for (sect = rdev->rlc.cs_data; sect->section != NULL; ++sect) {
5789 for (ext = sect->section; ext->extent != NULL; ++ext) { 6291 for (ext = sect->section; ext->extent != NULL; ++ext) {
5790 if (sect->id == SECT_CONTEXT) { 6292 if (sect->id == SECT_CONTEXT) {
5791 buffer[count++] = PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count); 6293 buffer[count++] =
5792 buffer[count++] = ext->reg_index - 0xa000; 6294 cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
6295 buffer[count++] = cpu_to_le32(ext->reg_index - 0xa000);
5793 for (i = 0; i < ext->reg_count; i++) 6296 for (i = 0; i < ext->reg_count; i++)
5794 buffer[count++] = ext->extent[i]; 6297 buffer[count++] = cpu_to_le32(ext->extent[i]);
5795 } else { 6298 } else {
5796 return; 6299 return;
5797 } 6300 }
5798 } 6301 }
5799 } 6302 }
5800 6303
5801 buffer[count++] = PACKET3(PACKET3_SET_CONTEXT_REG, 2); 6304 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 2));
5802 buffer[count++] = PA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START; 6305 buffer[count++] = cpu_to_le32(PA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START);
5803 switch (rdev->family) { 6306 switch (rdev->family) {
5804 case CHIP_BONAIRE: 6307 case CHIP_BONAIRE:
5805 buffer[count++] = 0x16000012; 6308 buffer[count++] = cpu_to_le32(0x16000012);
5806 buffer[count++] = 0x00000000; 6309 buffer[count++] = cpu_to_le32(0x00000000);
5807 break; 6310 break;
5808 case CHIP_KAVERI: 6311 case CHIP_KAVERI:
5809 buffer[count++] = 0x00000000; /* XXX */ 6312 buffer[count++] = cpu_to_le32(0x00000000); /* XXX */
5810 buffer[count++] = 0x00000000; 6313 buffer[count++] = cpu_to_le32(0x00000000);
5811 break; 6314 break;
5812 case CHIP_KABINI: 6315 case CHIP_KABINI:
5813 buffer[count++] = 0x00000000; /* XXX */ 6316 buffer[count++] = cpu_to_le32(0x00000000); /* XXX */
5814 buffer[count++] = 0x00000000; 6317 buffer[count++] = cpu_to_le32(0x00000000);
6318 break;
6319 case CHIP_HAWAII:
6320 buffer[count++] = 0x3a00161a;
6321 buffer[count++] = 0x0000002e;
5815 break; 6322 break;
5816 default: 6323 default:
5817 buffer[count++] = 0x00000000; 6324 buffer[count++] = cpu_to_le32(0x00000000);
5818 buffer[count++] = 0x00000000; 6325 buffer[count++] = cpu_to_le32(0x00000000);
5819 break; 6326 break;
5820 } 6327 }
5821 6328
5822 buffer[count++] = PACKET3(PACKET3_PREAMBLE_CNTL, 0); 6329 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
5823 buffer[count++] = PACKET3_PREAMBLE_END_CLEAR_STATE; 6330 buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
5824 6331
5825 buffer[count++] = PACKET3(PACKET3_CLEAR_STATE, 0); 6332 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
5826 buffer[count++] = 0; 6333 buffer[count++] = cpu_to_le32(0);
5827} 6334}
5828 6335
5829static void cik_init_pg(struct radeon_device *rdev) 6336static void cik_init_pg(struct radeon_device *rdev)
@@ -7118,7 +7625,7 @@ static int cik_startup(struct radeon_device *rdev)
7118 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 7625 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
7119 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET, 7626 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
7120 CP_RB0_RPTR, CP_RB0_WPTR, 7627 CP_RB0_RPTR, CP_RB0_WPTR,
7121 RADEON_CP_PACKET2); 7628 PACKET3(PACKET3_NOP, 0x3FFF));
7122 if (r) 7629 if (r)
7123 return r; 7630 return r;
7124 7631
@@ -7428,6 +7935,70 @@ void cik_fini(struct radeon_device *rdev)
7428 rdev->bios = NULL; 7935 rdev->bios = NULL;
7429} 7936}
7430 7937
7938void dce8_program_fmt(struct drm_encoder *encoder)
7939{
7940 struct drm_device *dev = encoder->dev;
7941 struct radeon_device *rdev = dev->dev_private;
7942 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
7943 struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
7944 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
7945 int bpc = 0;
7946 u32 tmp = 0;
7947 enum radeon_connector_dither dither = RADEON_FMT_DITHER_DISABLE;
7948
7949 if (connector) {
7950 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
7951 bpc = radeon_get_monitor_bpc(connector);
7952 dither = radeon_connector->dither;
7953 }
7954
7955 /* LVDS/eDP FMT is set up by atom */
7956 if (radeon_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
7957 return;
7958
7959 /* not needed for analog */
7960 if ((radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1) ||
7961 (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2))
7962 return;
7963
7964 if (bpc == 0)
7965 return;
7966
7967 switch (bpc) {
7968 case 6:
7969 if (dither == RADEON_FMT_DITHER_ENABLE)
7970 /* XXX sort out optimal dither settings */
7971 tmp |= (FMT_FRAME_RANDOM_ENABLE | FMT_HIGHPASS_RANDOM_ENABLE |
7972 FMT_SPATIAL_DITHER_EN | FMT_SPATIAL_DITHER_DEPTH(0));
7973 else
7974 tmp |= (FMT_TRUNCATE_EN | FMT_TRUNCATE_DEPTH(0));
7975 break;
7976 case 8:
7977 if (dither == RADEON_FMT_DITHER_ENABLE)
7978 /* XXX sort out optimal dither settings */
7979 tmp |= (FMT_FRAME_RANDOM_ENABLE | FMT_HIGHPASS_RANDOM_ENABLE |
7980 FMT_RGB_RANDOM_ENABLE |
7981 FMT_SPATIAL_DITHER_EN | FMT_SPATIAL_DITHER_DEPTH(1));
7982 else
7983 tmp |= (FMT_TRUNCATE_EN | FMT_TRUNCATE_DEPTH(1));
7984 break;
7985 case 10:
7986 if (dither == RADEON_FMT_DITHER_ENABLE)
7987 /* XXX sort out optimal dither settings */
7988 tmp |= (FMT_FRAME_RANDOM_ENABLE | FMT_HIGHPASS_RANDOM_ENABLE |
7989 FMT_RGB_RANDOM_ENABLE |
7990 FMT_SPATIAL_DITHER_EN | FMT_SPATIAL_DITHER_DEPTH(2));
7991 else
7992 tmp |= (FMT_TRUNCATE_EN | FMT_TRUNCATE_DEPTH(2));
7993 break;
7994 default:
7995 /* not needed */
7996 break;
7997 }
7998
7999 WREG32(FMT_BIT_DEPTH_CONTROL + radeon_crtc->crtc_offset, tmp);
8000}
8001
7431/* display watermark setup */ 8002/* display watermark setup */
7432/** 8003/**
7433 * dce8_line_buffer_adjust - Set up the line buffer 8004 * dce8_line_buffer_adjust - Set up the line buffer
diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c
index b6286068e111..9c9529de20ee 100644
--- a/drivers/gpu/drm/radeon/cik_sdma.c
+++ b/drivers/gpu/drm/radeon/cik_sdma.c
@@ -25,6 +25,7 @@
25#include <drm/drmP.h> 25#include <drm/drmP.h>
26#include "radeon.h" 26#include "radeon.h"
27#include "radeon_asic.h" 27#include "radeon_asic.h"
28#include "radeon_trace.h"
28#include "cikd.h" 29#include "cikd.h"
29 30
30/* sdma */ 31/* sdma */
@@ -101,14 +102,6 @@ void cik_sdma_fence_ring_emit(struct radeon_device *rdev,
101{ 102{
102 struct radeon_ring *ring = &rdev->ring[fence->ring]; 103 struct radeon_ring *ring = &rdev->ring[fence->ring];
103 u64 addr = rdev->fence_drv[fence->ring].gpu_addr; 104 u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
104 u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(1) |
105 SDMA_POLL_REG_MEM_EXTRA_FUNC(3)); /* == */
106 u32 ref_and_mask;
107
108 if (fence->ring == R600_RING_TYPE_DMA_INDEX)
109 ref_and_mask = SDMA0;
110 else
111 ref_and_mask = SDMA1;
112 105
113 /* write the fence */ 106 /* write the fence */
114 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_FENCE, 0, 0)); 107 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_FENCE, 0, 0));
@@ -118,12 +111,12 @@ void cik_sdma_fence_ring_emit(struct radeon_device *rdev,
118 /* generate an interrupt */ 111 /* generate an interrupt */
119 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_TRAP, 0, 0)); 112 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_TRAP, 0, 0));
120 /* flush HDP */ 113 /* flush HDP */
121 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits)); 114 /* We should be using the new POLL_REG_MEM special op packet here
122 radeon_ring_write(ring, GPU_HDP_FLUSH_DONE); 115 * but it causes sDMA to hang sometimes
123 radeon_ring_write(ring, GPU_HDP_FLUSH_REQ); 116 */
124 radeon_ring_write(ring, ref_and_mask); /* REFERENCE */ 117 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
125 radeon_ring_write(ring, ref_and_mask); /* MASK */ 118 radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2);
126 radeon_ring_write(ring, (4 << 16) | 10); /* RETRY_COUNT, POLL_INTERVAL */ 119 radeon_ring_write(ring, 0);
127} 120}
128 121
129/** 122/**
@@ -653,11 +646,12 @@ void cik_sdma_vm_set_page(struct radeon_device *rdev,
653 uint64_t addr, unsigned count, 646 uint64_t addr, unsigned count,
654 uint32_t incr, uint32_t flags) 647 uint32_t incr, uint32_t flags)
655{ 648{
656 uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
657 uint64_t value; 649 uint64_t value;
658 unsigned ndw; 650 unsigned ndw;
659 651
660 if (flags & RADEON_VM_PAGE_SYSTEM) { 652 trace_radeon_vm_set_page(pe, addr, count, incr, flags);
653
654 if (flags & R600_PTE_SYSTEM) {
661 while (count) { 655 while (count) {
662 ndw = count * 2; 656 ndw = count * 2;
663 if (ndw > 0xFFFFE) 657 if (ndw > 0xFFFFE)
@@ -669,16 +663,10 @@ void cik_sdma_vm_set_page(struct radeon_device *rdev,
669 ib->ptr[ib->length_dw++] = upper_32_bits(pe); 663 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
670 ib->ptr[ib->length_dw++] = ndw; 664 ib->ptr[ib->length_dw++] = ndw;
671 for (; ndw > 0; ndw -= 2, --count, pe += 8) { 665 for (; ndw > 0; ndw -= 2, --count, pe += 8) {
672 if (flags & RADEON_VM_PAGE_SYSTEM) { 666 value = radeon_vm_map_gart(rdev, addr);
673 value = radeon_vm_map_gart(rdev, addr); 667 value &= 0xFFFFFFFFFFFFF000ULL;
674 value &= 0xFFFFFFFFFFFFF000ULL;
675 } else if (flags & RADEON_VM_PAGE_VALID) {
676 value = addr;
677 } else {
678 value = 0;
679 }
680 addr += incr; 668 addr += incr;
681 value |= r600_flags; 669 value |= flags;
682 ib->ptr[ib->length_dw++] = value; 670 ib->ptr[ib->length_dw++] = value;
683 ib->ptr[ib->length_dw++] = upper_32_bits(value); 671 ib->ptr[ib->length_dw++] = upper_32_bits(value);
684 } 672 }
@@ -689,7 +677,7 @@ void cik_sdma_vm_set_page(struct radeon_device *rdev,
689 if (ndw > 0x7FFFF) 677 if (ndw > 0x7FFFF)
690 ndw = 0x7FFFF; 678 ndw = 0x7FFFF;
691 679
692 if (flags & RADEON_VM_PAGE_VALID) 680 if (flags & R600_PTE_VALID)
693 value = addr; 681 value = addr;
694 else 682 else
695 value = 0; 683 value = 0;
@@ -697,7 +685,7 @@ void cik_sdma_vm_set_page(struct radeon_device *rdev,
697 ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_GENERATE_PTE_PDE, 0, 0); 685 ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_GENERATE_PTE_PDE, 0, 0);
698 ib->ptr[ib->length_dw++] = pe; /* dst addr */ 686 ib->ptr[ib->length_dw++] = pe; /* dst addr */
699 ib->ptr[ib->length_dw++] = upper_32_bits(pe); 687 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
700 ib->ptr[ib->length_dw++] = r600_flags; /* mask */ 688 ib->ptr[ib->length_dw++] = flags; /* mask */
701 ib->ptr[ib->length_dw++] = 0; 689 ib->ptr[ib->length_dw++] = 0;
702 ib->ptr[ib->length_dw++] = value; /* value */ 690 ib->ptr[ib->length_dw++] = value; /* value */
703 ib->ptr[ib->length_dw++] = upper_32_bits(value); 691 ib->ptr[ib->length_dw++] = upper_32_bits(value);
@@ -724,18 +712,10 @@ void cik_sdma_vm_set_page(struct radeon_device *rdev,
724void cik_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm) 712void cik_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
725{ 713{
726 struct radeon_ring *ring = &rdev->ring[ridx]; 714 struct radeon_ring *ring = &rdev->ring[ridx];
727 u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(1) |
728 SDMA_POLL_REG_MEM_EXTRA_FUNC(3)); /* == */
729 u32 ref_and_mask;
730 715
731 if (vm == NULL) 716 if (vm == NULL)
732 return; 717 return;
733 718
734 if (ridx == R600_RING_TYPE_DMA_INDEX)
735 ref_and_mask = SDMA0;
736 else
737 ref_and_mask = SDMA1;
738
739 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000)); 719 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
740 if (vm->id < 8) { 720 if (vm->id < 8) {
741 radeon_ring_write(ring, (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2); 721 radeon_ring_write(ring, (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2);
@@ -770,12 +750,12 @@ void cik_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm
770 radeon_ring_write(ring, VMID(0)); 750 radeon_ring_write(ring, VMID(0));
771 751
772 /* flush HDP */ 752 /* flush HDP */
773 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits)); 753 /* We should be using the new POLL_REG_MEM special op packet here
774 radeon_ring_write(ring, GPU_HDP_FLUSH_DONE); 754 * but it causes sDMA to hang sometimes
775 radeon_ring_write(ring, GPU_HDP_FLUSH_REQ); 755 */
776 radeon_ring_write(ring, ref_and_mask); /* REFERENCE */ 756 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
777 radeon_ring_write(ring, ref_and_mask); /* MASK */ 757 radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2);
778 radeon_ring_write(ring, (4 << 16) | 10); /* RETRY_COUNT, POLL_INTERVAL */ 758 radeon_ring_write(ring, 0);
779 759
780 /* flush TLB */ 760 /* flush TLB */
781 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000)); 761 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
diff --git a/drivers/gpu/drm/radeon/cikd.h b/drivers/gpu/drm/radeon/cikd.h
index 203d2a09a1f5..5964af5e5b2d 100644
--- a/drivers/gpu/drm/radeon/cikd.h
+++ b/drivers/gpu/drm/radeon/cikd.h
@@ -25,8 +25,10 @@
25#define CIK_H 25#define CIK_H
26 26
27#define BONAIRE_GB_ADDR_CONFIG_GOLDEN 0x12010001 27#define BONAIRE_GB_ADDR_CONFIG_GOLDEN 0x12010001
28#define HAWAII_GB_ADDR_CONFIG_GOLDEN 0x12011003
28 29
29#define CIK_RB_BITMAP_WIDTH_PER_SH 2 30#define CIK_RB_BITMAP_WIDTH_PER_SH 2
31#define HAWAII_RB_BITMAP_WIDTH_PER_SH 4
30 32
31/* DIDT IND registers */ 33/* DIDT IND registers */
32#define DIDT_SQ_CTRL0 0x0 34#define DIDT_SQ_CTRL0 0x0
@@ -499,6 +501,7 @@
499 * bit 4: write 501 * bit 4: write
500 */ 502 */
501#define MEMORY_CLIENT_ID_MASK (0xff << 12) 503#define MEMORY_CLIENT_ID_MASK (0xff << 12)
504#define HAWAII_MEMORY_CLIENT_ID_MASK (0x1ff << 12)
502#define MEMORY_CLIENT_ID_SHIFT 12 505#define MEMORY_CLIENT_ID_SHIFT 12
503#define MEMORY_CLIENT_RW_MASK (1 << 24) 506#define MEMORY_CLIENT_RW_MASK (1 << 24)
504#define MEMORY_CLIENT_RW_SHIFT 24 507#define MEMORY_CLIENT_RW_SHIFT 24
@@ -906,6 +909,39 @@
906#define DPG_PIPE_STUTTER_CONTROL 0x6cd4 909#define DPG_PIPE_STUTTER_CONTROL 0x6cd4
907# define STUTTER_ENABLE (1 << 0) 910# define STUTTER_ENABLE (1 << 0)
908 911
912/* DCE8 FMT blocks */
913#define FMT_DYNAMIC_EXP_CNTL 0x6fb4
914# define FMT_DYNAMIC_EXP_EN (1 << 0)
915# define FMT_DYNAMIC_EXP_MODE (1 << 4)
916 /* 0 = 10bit -> 12bit, 1 = 8bit -> 12bit */
917#define FMT_CONTROL 0x6fb8
918# define FMT_PIXEL_ENCODING (1 << 16)
919 /* 0 = RGB 4:4:4 or YCbCr 4:4:4, 1 = YCbCr 4:2:2 */
920#define FMT_BIT_DEPTH_CONTROL 0x6fc8
921# define FMT_TRUNCATE_EN (1 << 0)
922# define FMT_TRUNCATE_MODE (1 << 1)
923# define FMT_TRUNCATE_DEPTH(x) ((x) << 4) /* 0 - 18bpp, 1 - 24bpp, 2 - 30bpp */
924# define FMT_SPATIAL_DITHER_EN (1 << 8)
925# define FMT_SPATIAL_DITHER_MODE(x) ((x) << 9)
926# define FMT_SPATIAL_DITHER_DEPTH(x) ((x) << 11) /* 0 - 18bpp, 1 - 24bpp, 2 - 30bpp */
927# define FMT_FRAME_RANDOM_ENABLE (1 << 13)
928# define FMT_RGB_RANDOM_ENABLE (1 << 14)
929# define FMT_HIGHPASS_RANDOM_ENABLE (1 << 15)
930# define FMT_TEMPORAL_DITHER_EN (1 << 16)
931# define FMT_TEMPORAL_DITHER_DEPTH(x) ((x) << 17) /* 0 - 18bpp, 1 - 24bpp, 2 - 30bpp */
932# define FMT_TEMPORAL_DITHER_OFFSET(x) ((x) << 21)
933# define FMT_TEMPORAL_LEVEL (1 << 24)
934# define FMT_TEMPORAL_DITHER_RESET (1 << 25)
935# define FMT_25FRC_SEL(x) ((x) << 26)
936# define FMT_50FRC_SEL(x) ((x) << 28)
937# define FMT_75FRC_SEL(x) ((x) << 30)
938#define FMT_CLAMP_CONTROL 0x6fe4
939# define FMT_CLAMP_DATA_EN (1 << 0)
940# define FMT_CLAMP_COLOR_FORMAT(x) ((x) << 16)
941# define FMT_CLAMP_6BPC 0
942# define FMT_CLAMP_8BPC 1
943# define FMT_CLAMP_10BPC 2
944
909#define GRBM_CNTL 0x8000 945#define GRBM_CNTL 0x8000
910#define GRBM_READ_TIMEOUT(x) ((x) << 0) 946#define GRBM_READ_TIMEOUT(x) ((x) << 0)
911 947
@@ -1129,6 +1165,8 @@
1129# define ADDR_SURF_P8_32x32_16x16 12 1165# define ADDR_SURF_P8_32x32_16x16 12
1130# define ADDR_SURF_P8_32x32_16x32 13 1166# define ADDR_SURF_P8_32x32_16x32 13
1131# define ADDR_SURF_P8_32x64_32x32 14 1167# define ADDR_SURF_P8_32x64_32x32 14
1168# define ADDR_SURF_P16_32x32_8x16 16
1169# define ADDR_SURF_P16_32x32_16x16 17
1132# define TILE_SPLIT(x) ((x) << 11) 1170# define TILE_SPLIT(x) ((x) << 11)
1133# define ADDR_SURF_TILE_SPLIT_64B 0 1171# define ADDR_SURF_TILE_SPLIT_64B 0
1134# define ADDR_SURF_TILE_SPLIT_128B 1 1172# define ADDR_SURF_TILE_SPLIT_128B 1
@@ -1422,6 +1460,7 @@
1422# define RASTER_CONFIG_RB_MAP_1 1 1460# define RASTER_CONFIG_RB_MAP_1 1
1423# define RASTER_CONFIG_RB_MAP_2 2 1461# define RASTER_CONFIG_RB_MAP_2 2
1424# define RASTER_CONFIG_RB_MAP_3 3 1462# define RASTER_CONFIG_RB_MAP_3 3
1463#define PKR_MAP(x) ((x) << 8)
1425 1464
1426#define VGT_EVENT_INITIATOR 0x28a90 1465#define VGT_EVENT_INITIATOR 0x28a90
1427# define SAMPLE_STREAMOUTSTATS1 (1 << 0) 1466# define SAMPLE_STREAMOUTSTATS1 (1 << 0)
@@ -1714,6 +1753,68 @@
1714# define PACKET3_PREAMBLE_BEGIN_CLEAR_STATE (2 << 28) 1753# define PACKET3_PREAMBLE_BEGIN_CLEAR_STATE (2 << 28)
1715# define PACKET3_PREAMBLE_END_CLEAR_STATE (3 << 28) 1754# define PACKET3_PREAMBLE_END_CLEAR_STATE (3 << 28)
1716#define PACKET3_DMA_DATA 0x50 1755#define PACKET3_DMA_DATA 0x50
1756/* 1. header
1757 * 2. CONTROL
1758 * 3. SRC_ADDR_LO or DATA [31:0]
1759 * 4. SRC_ADDR_HI [31:0]
1760 * 5. DST_ADDR_LO [31:0]
1761 * 6. DST_ADDR_HI [7:0]
1762 * 7. COMMAND [30:21] | BYTE_COUNT [20:0]
1763 */
1764/* CONTROL */
1765# define PACKET3_DMA_DATA_ENGINE(x) ((x) << 0)
1766 /* 0 - ME
1767 * 1 - PFP
1768 */
1769# define PACKET3_DMA_DATA_SRC_CACHE_POLICY(x) ((x) << 13)
1770 /* 0 - LRU
1771 * 1 - Stream
1772 * 2 - Bypass
1773 */
1774# define PACKET3_DMA_DATA_SRC_VOLATILE (1 << 15)
1775# define PACKET3_DMA_DATA_DST_SEL(x) ((x) << 20)
1776 /* 0 - DST_ADDR using DAS
1777 * 1 - GDS
1778 * 3 - DST_ADDR using L2
1779 */
1780# define PACKET3_DMA_DATA_DST_CACHE_POLICY(x) ((x) << 25)
1781 /* 0 - LRU
1782 * 1 - Stream
1783 * 2 - Bypass
1784 */
1785# define PACKET3_DMA_DATA_DST_VOLATILE (1 << 27)
1786# define PACKET3_DMA_DATA_SRC_SEL(x) ((x) << 29)
1787 /* 0 - SRC_ADDR using SAS
1788 * 1 - GDS
1789 * 2 - DATA
1790 * 3 - SRC_ADDR using L2
1791 */
1792# define PACKET3_DMA_DATA_CP_SYNC (1 << 31)
1793/* COMMAND */
1794# define PACKET3_DMA_DATA_DIS_WC (1 << 21)
1795# define PACKET3_DMA_DATA_CMD_SRC_SWAP(x) ((x) << 22)
1796 /* 0 - none
1797 * 1 - 8 in 16
1798 * 2 - 8 in 32
1799 * 3 - 8 in 64
1800 */
1801# define PACKET3_DMA_DATA_CMD_DST_SWAP(x) ((x) << 24)
1802 /* 0 - none
1803 * 1 - 8 in 16
1804 * 2 - 8 in 32
1805 * 3 - 8 in 64
1806 */
1807# define PACKET3_DMA_DATA_CMD_SAS (1 << 26)
1808 /* 0 - memory
1809 * 1 - register
1810 */
1811# define PACKET3_DMA_DATA_CMD_DAS (1 << 27)
1812 /* 0 - memory
1813 * 1 - register
1814 */
1815# define PACKET3_DMA_DATA_CMD_SAIC (1 << 28)
1816# define PACKET3_DMA_DATA_CMD_DAIC (1 << 29)
1817# define PACKET3_DMA_DATA_CMD_RAW_WAIT (1 << 30)
1717#define PACKET3_AQUIRE_MEM 0x58 1818#define PACKET3_AQUIRE_MEM 0x58
1718#define PACKET3_REWIND 0x59 1819#define PACKET3_REWIND 0x59
1719#define PACKET3_LOAD_UCONFIG_REG 0x5E 1820#define PACKET3_LOAD_UCONFIG_REG 0x5E
diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c
index 9fcd338c0fcf..009f46e0ce72 100644
--- a/drivers/gpu/drm/radeon/dce6_afmt.c
+++ b/drivers/gpu/drm/radeon/dce6_afmt.c
@@ -102,6 +102,49 @@ void dce6_afmt_select_pin(struct drm_encoder *encoder)
102 AFMT_AUDIO_SRC_SELECT(dig->afmt->pin->id)); 102 AFMT_AUDIO_SRC_SELECT(dig->afmt->pin->id));
103} 103}
104 104
105void dce6_afmt_write_latency_fields(struct drm_encoder *encoder,
106 struct drm_display_mode *mode)
107{
108 struct radeon_device *rdev = encoder->dev->dev_private;
109 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
110 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
111 struct drm_connector *connector;
112 struct radeon_connector *radeon_connector = NULL;
113 u32 tmp = 0, offset;
114
115 if (!dig->afmt->pin)
116 return;
117
118 offset = dig->afmt->pin->offset;
119
120 list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
121 if (connector->encoder == encoder) {
122 radeon_connector = to_radeon_connector(connector);
123 break;
124 }
125 }
126
127 if (!radeon_connector) {
128 DRM_ERROR("Couldn't find encoder's connector\n");
129 return;
130 }
131
132 if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
133 if (connector->latency_present[1])
134 tmp = VIDEO_LIPSYNC(connector->video_latency[1]) |
135 AUDIO_LIPSYNC(connector->audio_latency[1]);
136 else
137 tmp = VIDEO_LIPSYNC(255) | AUDIO_LIPSYNC(255);
138 } else {
139 if (connector->latency_present[0])
140 tmp = VIDEO_LIPSYNC(connector->video_latency[0]) |
141 AUDIO_LIPSYNC(connector->audio_latency[0]);
142 else
143 tmp = VIDEO_LIPSYNC(255) | AUDIO_LIPSYNC(255);
144 }
145 WREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp);
146}
147
105void dce6_afmt_write_speaker_allocation(struct drm_encoder *encoder) 148void dce6_afmt_write_speaker_allocation(struct drm_encoder *encoder)
106{ 149{
107 struct radeon_device *rdev = encoder->dev->dev_private; 150 struct radeon_device *rdev = encoder->dev->dev_private;
@@ -113,9 +156,6 @@ void dce6_afmt_write_speaker_allocation(struct drm_encoder *encoder)
113 u8 *sadb; 156 u8 *sadb;
114 int sad_count; 157 int sad_count;
115 158
116 /* XXX: setting this register causes hangs on some asics */
117 return;
118
119 if (!dig->afmt->pin) 159 if (!dig->afmt->pin)
120 return; 160 return;
121 161
@@ -201,20 +241,30 @@ void dce6_afmt_write_sad_regs(struct drm_encoder *encoder)
201 241
202 for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) { 242 for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
203 u32 value = 0; 243 u32 value = 0;
244 u8 stereo_freqs = 0;
245 int max_channels = -1;
204 int j; 246 int j;
205 247
206 for (j = 0; j < sad_count; j++) { 248 for (j = 0; j < sad_count; j++) {
207 struct cea_sad *sad = &sads[j]; 249 struct cea_sad *sad = &sads[j];
208 250
209 if (sad->format == eld_reg_to_type[i][1]) { 251 if (sad->format == eld_reg_to_type[i][1]) {
210 value = MAX_CHANNELS(sad->channels) | 252 if (sad->channels > max_channels) {
211 DESCRIPTOR_BYTE_2(sad->byte2) | 253 value = MAX_CHANNELS(sad->channels) |
212 SUPPORTED_FREQUENCIES(sad->freq); 254 DESCRIPTOR_BYTE_2(sad->byte2) |
255 SUPPORTED_FREQUENCIES(sad->freq);
256 max_channels = sad->channels;
257 }
258
213 if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM) 259 if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM)
214 value |= SUPPORTED_FREQUENCIES_STEREO(sad->freq); 260 stereo_freqs |= sad->freq;
215 break; 261 else
262 break;
216 } 263 }
217 } 264 }
265
266 value |= SUPPORTED_FREQUENCIES_STEREO(stereo_freqs);
267
218 WREG32_ENDPOINT(offset, eld_reg_to_type[i][0], value); 268 WREG32_ENDPOINT(offset, eld_reg_to_type[i][0], value);
219 } 269 }
220 270
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index b5c67a99dda9..52f1ae16f653 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -1193,6 +1193,62 @@ void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev)
1193 } 1193 }
1194} 1194}
1195 1195
1196void dce4_program_fmt(struct drm_encoder *encoder)
1197{
1198 struct drm_device *dev = encoder->dev;
1199 struct radeon_device *rdev = dev->dev_private;
1200 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1201 struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
1202 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
1203 int bpc = 0;
1204 u32 tmp = 0;
1205 enum radeon_connector_dither dither = RADEON_FMT_DITHER_DISABLE;
1206
1207 if (connector) {
1208 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
1209 bpc = radeon_get_monitor_bpc(connector);
1210 dither = radeon_connector->dither;
1211 }
1212
1213 /* LVDS/eDP FMT is set up by atom */
1214 if (radeon_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
1215 return;
1216
1217 /* not needed for analog */
1218 if ((radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1) ||
1219 (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2))
1220 return;
1221
1222 if (bpc == 0)
1223 return;
1224
1225 switch (bpc) {
1226 case 6:
1227 if (dither == RADEON_FMT_DITHER_ENABLE)
1228 /* XXX sort out optimal dither settings */
1229 tmp |= (FMT_FRAME_RANDOM_ENABLE | FMT_HIGHPASS_RANDOM_ENABLE |
1230 FMT_SPATIAL_DITHER_EN);
1231 else
1232 tmp |= FMT_TRUNCATE_EN;
1233 break;
1234 case 8:
1235 if (dither == RADEON_FMT_DITHER_ENABLE)
1236 /* XXX sort out optimal dither settings */
1237 tmp |= (FMT_FRAME_RANDOM_ENABLE | FMT_HIGHPASS_RANDOM_ENABLE |
1238 FMT_RGB_RANDOM_ENABLE |
1239 FMT_SPATIAL_DITHER_EN | FMT_SPATIAL_DITHER_DEPTH);
1240 else
1241 tmp |= (FMT_TRUNCATE_EN | FMT_TRUNCATE_DEPTH);
1242 break;
1243 case 10:
1244 default:
1245 /* not needed */
1246 break;
1247 }
1248
1249 WREG32(FMT_BIT_DEPTH_CONTROL + radeon_crtc->crtc_offset, tmp);
1250}
1251
1196static bool dce4_is_in_vblank(struct radeon_device *rdev, int crtc) 1252static bool dce4_is_in_vblank(struct radeon_device *rdev, int crtc)
1197{ 1253{
1198 if (RREG32(EVERGREEN_CRTC_STATUS + crtc_offsets[crtc]) & EVERGREEN_CRTC_V_BLANK) 1254 if (RREG32(EVERGREEN_CRTC_STATUS + crtc_offsets[crtc]) & EVERGREEN_CRTC_V_BLANK)
@@ -3963,7 +4019,7 @@ int sumo_rlc_init(struct radeon_device *rdev)
3963 if (rdev->family >= CHIP_TAHITI) { 4019 if (rdev->family >= CHIP_TAHITI) {
3964 /* SI */ 4020 /* SI */
3965 for (i = 0; i < rdev->rlc.reg_list_size; i++) 4021 for (i = 0; i < rdev->rlc.reg_list_size; i++)
3966 dst_ptr[i] = src_ptr[i]; 4022 dst_ptr[i] = cpu_to_le32(src_ptr[i]);
3967 } else { 4023 } else {
3968 /* ON/LN/TN */ 4024 /* ON/LN/TN */
3969 /* format: 4025 /* format:
@@ -3977,10 +4033,10 @@ int sumo_rlc_init(struct radeon_device *rdev)
3977 if (i < dws) 4033 if (i < dws)
3978 data |= (src_ptr[i] >> 2) << 16; 4034 data |= (src_ptr[i] >> 2) << 16;
3979 j = (((i - 1) * 3) / 2); 4035 j = (((i - 1) * 3) / 2);
3980 dst_ptr[j] = data; 4036 dst_ptr[j] = cpu_to_le32(data);
3981 } 4037 }
3982 j = ((i * 3) / 2); 4038 j = ((i * 3) / 2);
3983 dst_ptr[j] = RLC_SAVE_RESTORE_LIST_END_MARKER; 4039 dst_ptr[j] = cpu_to_le32(RLC_SAVE_RESTORE_LIST_END_MARKER);
3984 } 4040 }
3985 radeon_bo_kunmap(rdev->rlc.save_restore_obj); 4041 radeon_bo_kunmap(rdev->rlc.save_restore_obj);
3986 radeon_bo_unreserve(rdev->rlc.save_restore_obj); 4042 radeon_bo_unreserve(rdev->rlc.save_restore_obj);
@@ -4042,40 +4098,40 @@ int sumo_rlc_init(struct radeon_device *rdev)
4042 cik_get_csb_buffer(rdev, dst_ptr); 4098 cik_get_csb_buffer(rdev, dst_ptr);
4043 } else if (rdev->family >= CHIP_TAHITI) { 4099 } else if (rdev->family >= CHIP_TAHITI) {
4044 reg_list_mc_addr = rdev->rlc.clear_state_gpu_addr + 256; 4100 reg_list_mc_addr = rdev->rlc.clear_state_gpu_addr + 256;
4045 dst_ptr[0] = upper_32_bits(reg_list_mc_addr); 4101 dst_ptr[0] = cpu_to_le32(upper_32_bits(reg_list_mc_addr));
4046 dst_ptr[1] = lower_32_bits(reg_list_mc_addr); 4102 dst_ptr[1] = cpu_to_le32(lower_32_bits(reg_list_mc_addr));
4047 dst_ptr[2] = rdev->rlc.clear_state_size; 4103 dst_ptr[2] = cpu_to_le32(rdev->rlc.clear_state_size);
4048 si_get_csb_buffer(rdev, &dst_ptr[(256/4)]); 4104 si_get_csb_buffer(rdev, &dst_ptr[(256/4)]);
4049 } else { 4105 } else {
4050 reg_list_hdr_blk_index = 0; 4106 reg_list_hdr_blk_index = 0;
4051 reg_list_mc_addr = rdev->rlc.clear_state_gpu_addr + (reg_list_blk_index * 4); 4107 reg_list_mc_addr = rdev->rlc.clear_state_gpu_addr + (reg_list_blk_index * 4);
4052 data = upper_32_bits(reg_list_mc_addr); 4108 data = upper_32_bits(reg_list_mc_addr);
4053 dst_ptr[reg_list_hdr_blk_index] = data; 4109 dst_ptr[reg_list_hdr_blk_index] = cpu_to_le32(data);
4054 reg_list_hdr_blk_index++; 4110 reg_list_hdr_blk_index++;
4055 for (i = 0; cs_data[i].section != NULL; i++) { 4111 for (i = 0; cs_data[i].section != NULL; i++) {
4056 for (j = 0; cs_data[i].section[j].extent != NULL; j++) { 4112 for (j = 0; cs_data[i].section[j].extent != NULL; j++) {
4057 reg_num = cs_data[i].section[j].reg_count; 4113 reg_num = cs_data[i].section[j].reg_count;
4058 data = reg_list_mc_addr & 0xffffffff; 4114 data = reg_list_mc_addr & 0xffffffff;
4059 dst_ptr[reg_list_hdr_blk_index] = data; 4115 dst_ptr[reg_list_hdr_blk_index] = cpu_to_le32(data);
4060 reg_list_hdr_blk_index++; 4116 reg_list_hdr_blk_index++;
4061 4117
4062 data = (cs_data[i].section[j].reg_index * 4) & 0xffffffff; 4118 data = (cs_data[i].section[j].reg_index * 4) & 0xffffffff;
4063 dst_ptr[reg_list_hdr_blk_index] = data; 4119 dst_ptr[reg_list_hdr_blk_index] = cpu_to_le32(data);
4064 reg_list_hdr_blk_index++; 4120 reg_list_hdr_blk_index++;
4065 4121
4066 data = 0x08000000 | (reg_num * 4); 4122 data = 0x08000000 | (reg_num * 4);
4067 dst_ptr[reg_list_hdr_blk_index] = data; 4123 dst_ptr[reg_list_hdr_blk_index] = cpu_to_le32(data);
4068 reg_list_hdr_blk_index++; 4124 reg_list_hdr_blk_index++;
4069 4125
4070 for (k = 0; k < reg_num; k++) { 4126 for (k = 0; k < reg_num; k++) {
4071 data = cs_data[i].section[j].extent[k]; 4127 data = cs_data[i].section[j].extent[k];
4072 dst_ptr[reg_list_blk_index + k] = data; 4128 dst_ptr[reg_list_blk_index + k] = cpu_to_le32(data);
4073 } 4129 }
4074 reg_list_mc_addr += reg_num * 4; 4130 reg_list_mc_addr += reg_num * 4;
4075 reg_list_blk_index += reg_num; 4131 reg_list_blk_index += reg_num;
4076 } 4132 }
4077 } 4133 }
4078 dst_ptr[reg_list_hdr_blk_index] = RLC_CLEAR_STATE_END_MARKER; 4134 dst_ptr[reg_list_hdr_blk_index] = cpu_to_le32(RLC_CLEAR_STATE_END_MARKER);
4079 } 4135 }
4080 radeon_bo_kunmap(rdev->rlc.clear_state_obj); 4136 radeon_bo_kunmap(rdev->rlc.clear_state_obj);
4081 radeon_bo_unreserve(rdev->rlc.clear_state_obj); 4137 radeon_bo_unreserve(rdev->rlc.clear_state_obj);
diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c
index 57fcc4b16a52..aa695c4feb3d 100644
--- a/drivers/gpu/drm/radeon/evergreen_hdmi.c
+++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c
@@ -35,6 +35,8 @@
35extern void dce6_afmt_write_speaker_allocation(struct drm_encoder *encoder); 35extern void dce6_afmt_write_speaker_allocation(struct drm_encoder *encoder);
36extern void dce6_afmt_write_sad_regs(struct drm_encoder *encoder); 36extern void dce6_afmt_write_sad_regs(struct drm_encoder *encoder);
37extern void dce6_afmt_select_pin(struct drm_encoder *encoder); 37extern void dce6_afmt_select_pin(struct drm_encoder *encoder);
38extern void dce6_afmt_write_latency_fields(struct drm_encoder *encoder,
39 struct drm_display_mode *mode);
38 40
39/* 41/*
40 * update the N and CTS parameters for a given pixel clock rate 42 * update the N and CTS parameters for a given pixel clock rate
@@ -58,6 +60,42 @@ static void evergreen_hdmi_update_ACR(struct drm_encoder *encoder, uint32_t cloc
58 WREG32(HDMI_ACR_48_1 + offset, acr.n_48khz); 60 WREG32(HDMI_ACR_48_1 + offset, acr.n_48khz);
59} 61}
60 62
63static void dce4_afmt_write_latency_fields(struct drm_encoder *encoder,
64 struct drm_display_mode *mode)
65{
66 struct radeon_device *rdev = encoder->dev->dev_private;
67 struct drm_connector *connector;
68 struct radeon_connector *radeon_connector = NULL;
69 u32 tmp = 0;
70
71 list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
72 if (connector->encoder == encoder) {
73 radeon_connector = to_radeon_connector(connector);
74 break;
75 }
76 }
77
78 if (!radeon_connector) {
79 DRM_ERROR("Couldn't find encoder's connector\n");
80 return;
81 }
82
83 if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
84 if (connector->latency_present[1])
85 tmp = VIDEO_LIPSYNC(connector->video_latency[1]) |
86 AUDIO_LIPSYNC(connector->audio_latency[1]);
87 else
88 tmp = VIDEO_LIPSYNC(255) | AUDIO_LIPSYNC(255);
89 } else {
90 if (connector->latency_present[0])
91 tmp = VIDEO_LIPSYNC(connector->video_latency[0]) |
92 AUDIO_LIPSYNC(connector->audio_latency[0]);
93 else
94 tmp = VIDEO_LIPSYNC(255) | AUDIO_LIPSYNC(255);
95 }
96 WREG32(AZ_F0_CODEC_PIN0_CONTROL_RESPONSE_LIPSYNC, tmp);
97}
98
61static void dce4_afmt_write_speaker_allocation(struct drm_encoder *encoder) 99static void dce4_afmt_write_speaker_allocation(struct drm_encoder *encoder)
62{ 100{
63 struct radeon_device *rdev = encoder->dev->dev_private; 101 struct radeon_device *rdev = encoder->dev->dev_private;
@@ -67,12 +105,11 @@ static void dce4_afmt_write_speaker_allocation(struct drm_encoder *encoder)
67 u8 *sadb; 105 u8 *sadb;
68 int sad_count; 106 int sad_count;
69 107
70 /* XXX: setting this register causes hangs on some asics */
71 return;
72
73 list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { 108 list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
74 if (connector->encoder == encoder) 109 if (connector->encoder == encoder) {
75 radeon_connector = to_radeon_connector(connector); 110 radeon_connector = to_radeon_connector(connector);
111 break;
112 }
76 } 113 }
77 114
78 if (!radeon_connector) { 115 if (!radeon_connector) {
@@ -124,8 +161,10 @@ static void evergreen_hdmi_write_sad_regs(struct drm_encoder *encoder)
124 }; 161 };
125 162
126 list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { 163 list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
127 if (connector->encoder == encoder) 164 if (connector->encoder == encoder) {
128 radeon_connector = to_radeon_connector(connector); 165 radeon_connector = to_radeon_connector(connector);
166 break;
167 }
129 } 168 }
130 169
131 if (!radeon_connector) { 170 if (!radeon_connector) {
@@ -142,20 +181,30 @@ static void evergreen_hdmi_write_sad_regs(struct drm_encoder *encoder)
142 181
143 for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) { 182 for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
144 u32 value = 0; 183 u32 value = 0;
184 u8 stereo_freqs = 0;
185 int max_channels = -1;
145 int j; 186 int j;
146 187
147 for (j = 0; j < sad_count; j++) { 188 for (j = 0; j < sad_count; j++) {
148 struct cea_sad *sad = &sads[j]; 189 struct cea_sad *sad = &sads[j];
149 190
150 if (sad->format == eld_reg_to_type[i][1]) { 191 if (sad->format == eld_reg_to_type[i][1]) {
151 value = MAX_CHANNELS(sad->channels) | 192 if (sad->channels > max_channels) {
152 DESCRIPTOR_BYTE_2(sad->byte2) | 193 value = MAX_CHANNELS(sad->channels) |
153 SUPPORTED_FREQUENCIES(sad->freq); 194 DESCRIPTOR_BYTE_2(sad->byte2) |
195 SUPPORTED_FREQUENCIES(sad->freq);
196 max_channels = sad->channels;
197 }
198
154 if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM) 199 if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM)
155 value |= SUPPORTED_FREQUENCIES_STEREO(sad->freq); 200 stereo_freqs |= sad->freq;
156 break; 201 else
202 break;
157 } 203 }
158 } 204 }
205
206 value |= SUPPORTED_FREQUENCIES_STEREO(stereo_freqs);
207
159 WREG32(eld_reg_to_type[i][0], value); 208 WREG32(eld_reg_to_type[i][0], value);
160 } 209 }
161 210
@@ -324,8 +373,10 @@ void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode
324 if (ASIC_IS_DCE6(rdev)) { 373 if (ASIC_IS_DCE6(rdev)) {
325 dce6_afmt_select_pin(encoder); 374 dce6_afmt_select_pin(encoder);
326 dce6_afmt_write_sad_regs(encoder); 375 dce6_afmt_write_sad_regs(encoder);
376 dce6_afmt_write_latency_fields(encoder, mode);
327 } else { 377 } else {
328 evergreen_hdmi_write_sad_regs(encoder); 378 evergreen_hdmi_write_sad_regs(encoder);
379 dce4_afmt_write_latency_fields(encoder, mode);
329 } 380 }
330 381
331 err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode); 382 err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index 4f6d2962767d..17f990798992 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -750,6 +750,44 @@
750 * bit6 = 192 kHz 750 * bit6 = 192 kHz
751 */ 751 */
752 752
753#define AZ_CHANNEL_COUNT_CONTROL 0x5fe4
754# define HBR_CHANNEL_COUNT(x) (((x) & 0x7) << 0)
755# define COMPRESSED_CHANNEL_COUNT(x) (((x) & 0x7) << 4)
756/* HBR_CHANNEL_COUNT, COMPRESSED_CHANNEL_COUNT
757 * 0 = use stream header
758 * 1-7 = channel count - 1
759 */
760#define AZ_F0_CODEC_PIN0_CONTROL_RESPONSE_LIPSYNC 0x5fe8
761# define VIDEO_LIPSYNC(x) (((x) & 0xff) << 0)
762# define AUDIO_LIPSYNC(x) (((x) & 0xff) << 8)
763/* VIDEO_LIPSYNC, AUDIO_LIPSYNC
764 * 0 = invalid
765 * x = legal delay value
766 * 255 = sync not supported
767 */
768#define AZ_F0_CODEC_PIN0_CONTROL_RESPONSE_HBR 0x5fec
769# define HBR_CAPABLE (1 << 0) /* enabled by default */
770
771#define AZ_F0_CODEC_PIN0_CONTROL_RESPONSE_AV_ASSOCIATION0 0x5ff4
772# define DISPLAY0_TYPE(x) (((x) & 0x3) << 0)
773# define DISPLAY_TYPE_NONE 0
774# define DISPLAY_TYPE_HDMI 1
775# define DISPLAY_TYPE_DP 2
776# define DISPLAY0_ID(x) (((x) & 0x3f) << 2)
777# define DISPLAY1_TYPE(x) (((x) & 0x3) << 8)
778# define DISPLAY1_ID(x) (((x) & 0x3f) << 10)
779# define DISPLAY2_TYPE(x) (((x) & 0x3) << 16)
780# define DISPLAY2_ID(x) (((x) & 0x3f) << 18)
781# define DISPLAY3_TYPE(x) (((x) & 0x3) << 24)
782# define DISPLAY3_ID(x) (((x) & 0x3f) << 26)
783#define AZ_F0_CODEC_PIN0_CONTROL_RESPONSE_AV_ASSOCIATION1 0x5ff8
784# define DISPLAY4_TYPE(x) (((x) & 0x3) << 0)
785# define DISPLAY4_ID(x) (((x) & 0x3f) << 2)
786# define DISPLAY5_TYPE(x) (((x) & 0x3) << 8)
787# define DISPLAY5_ID(x) (((x) & 0x3f) << 10)
788#define AZ_F0_CODEC_PIN0_CONTROL_RESPONSE_AV_NUMBER 0x5ffc
789# define NUMBER_OF_DISPLAY_ID(x) (((x) & 0x7) << 0)
790
753#define AZ_HOT_PLUG_CONTROL 0x5e78 791#define AZ_HOT_PLUG_CONTROL 0x5e78
754# define AZ_FORCE_CODEC_WAKE (1 << 0) 792# define AZ_FORCE_CODEC_WAKE (1 << 0)
755# define PIN0_JACK_DETECTION_ENABLE (1 << 4) 793# define PIN0_JACK_DETECTION_ENABLE (1 << 4)
@@ -1312,6 +1350,38 @@
1312# define DC_HPDx_RX_INT_TIMER(x) ((x) << 16) 1350# define DC_HPDx_RX_INT_TIMER(x) ((x) << 16)
1313# define DC_HPDx_EN (1 << 28) 1351# define DC_HPDx_EN (1 << 28)
1314 1352
1353/* DCE4/5/6 FMT blocks */
1354#define FMT_DYNAMIC_EXP_CNTL 0x6fb4
1355# define FMT_DYNAMIC_EXP_EN (1 << 0)
1356# define FMT_DYNAMIC_EXP_MODE (1 << 4)
1357 /* 0 = 10bit -> 12bit, 1 = 8bit -> 12bit */
1358#define FMT_CONTROL 0x6fb8
1359# define FMT_PIXEL_ENCODING (1 << 16)
1360 /* 0 = RGB 4:4:4 or YCbCr 4:4:4, 1 = YCbCr 4:2:2 */
1361#define FMT_BIT_DEPTH_CONTROL 0x6fc8
1362# define FMT_TRUNCATE_EN (1 << 0)
1363# define FMT_TRUNCATE_DEPTH (1 << 4)
1364# define FMT_SPATIAL_DITHER_EN (1 << 8)
1365# define FMT_SPATIAL_DITHER_MODE(x) ((x) << 9)
1366# define FMT_SPATIAL_DITHER_DEPTH (1 << 12)
1367# define FMT_FRAME_RANDOM_ENABLE (1 << 13)
1368# define FMT_RGB_RANDOM_ENABLE (1 << 14)
1369# define FMT_HIGHPASS_RANDOM_ENABLE (1 << 15)
1370# define FMT_TEMPORAL_DITHER_EN (1 << 16)
1371# define FMT_TEMPORAL_DITHER_DEPTH (1 << 20)
1372# define FMT_TEMPORAL_DITHER_OFFSET(x) ((x) << 21)
1373# define FMT_TEMPORAL_LEVEL (1 << 24)
1374# define FMT_TEMPORAL_DITHER_RESET (1 << 25)
1375# define FMT_25FRC_SEL(x) ((x) << 26)
1376# define FMT_50FRC_SEL(x) ((x) << 28)
1377# define FMT_75FRC_SEL(x) ((x) << 30)
1378#define FMT_CLAMP_CONTROL 0x6fe4
1379# define FMT_CLAMP_DATA_EN (1 << 0)
1380# define FMT_CLAMP_COLOR_FORMAT(x) ((x) << 16)
1381# define FMT_CLAMP_6BPC 0
1382# define FMT_CLAMP_8BPC 1
1383# define FMT_CLAMP_10BPC 2
1384
1315/* ASYNC DMA */ 1385/* ASYNC DMA */
1316#define DMA_RB_RPTR 0xd008 1386#define DMA_RB_RPTR 0xd008
1317#define DMA_RB_WPTR 0xd00c 1387#define DMA_RB_WPTR 0xd00c
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index cac2866d79da..11aab2ab54ce 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -174,11 +174,6 @@ extern void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
174extern void evergreen_program_aspm(struct radeon_device *rdev); 174extern void evergreen_program_aspm(struct radeon_device *rdev);
175extern void sumo_rlc_fini(struct radeon_device *rdev); 175extern void sumo_rlc_fini(struct radeon_device *rdev);
176extern int sumo_rlc_init(struct radeon_device *rdev); 176extern int sumo_rlc_init(struct radeon_device *rdev);
177extern void cayman_dma_vm_set_page(struct radeon_device *rdev,
178 struct radeon_ib *ib,
179 uint64_t pe,
180 uint64_t addr, unsigned count,
181 uint32_t incr, uint32_t flags);
182 177
183/* Firmware Names */ 178/* Firmware Names */
184MODULE_FIRMWARE("radeon/BARTS_pfp.bin"); 179MODULE_FIRMWARE("radeon/BARTS_pfp.bin");
@@ -2400,77 +2395,6 @@ void cayman_vm_decode_fault(struct radeon_device *rdev,
2400 block, mc_id); 2395 block, mc_id);
2401} 2396}
2402 2397
2403#define R600_ENTRY_VALID (1 << 0)
2404#define R600_PTE_SYSTEM (1 << 1)
2405#define R600_PTE_SNOOPED (1 << 2)
2406#define R600_PTE_READABLE (1 << 5)
2407#define R600_PTE_WRITEABLE (1 << 6)
2408
2409uint32_t cayman_vm_page_flags(struct radeon_device *rdev, uint32_t flags)
2410{
2411 uint32_t r600_flags = 0;
2412 r600_flags |= (flags & RADEON_VM_PAGE_VALID) ? R600_ENTRY_VALID : 0;
2413 r600_flags |= (flags & RADEON_VM_PAGE_READABLE) ? R600_PTE_READABLE : 0;
2414 r600_flags |= (flags & RADEON_VM_PAGE_WRITEABLE) ? R600_PTE_WRITEABLE : 0;
2415 if (flags & RADEON_VM_PAGE_SYSTEM) {
2416 r600_flags |= R600_PTE_SYSTEM;
2417 r600_flags |= (flags & RADEON_VM_PAGE_SNOOPED) ? R600_PTE_SNOOPED : 0;
2418 }
2419 return r600_flags;
2420}
2421
2422/**
2423 * cayman_vm_set_page - update the page tables using the CP
2424 *
2425 * @rdev: radeon_device pointer
2426 * @ib: indirect buffer to fill with commands
2427 * @pe: addr of the page entry
2428 * @addr: dst addr to write into pe
2429 * @count: number of page entries to update
2430 * @incr: increase next addr by incr bytes
2431 * @flags: access flags
2432 *
2433 * Update the page tables using the CP (cayman/TN).
2434 */
2435void cayman_vm_set_page(struct radeon_device *rdev,
2436 struct radeon_ib *ib,
2437 uint64_t pe,
2438 uint64_t addr, unsigned count,
2439 uint32_t incr, uint32_t flags)
2440{
2441 uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
2442 uint64_t value;
2443 unsigned ndw;
2444
2445 if (rdev->asic->vm.pt_ring_index == RADEON_RING_TYPE_GFX_INDEX) {
2446 while (count) {
2447 ndw = 1 + count * 2;
2448 if (ndw > 0x3FFF)
2449 ndw = 0x3FFF;
2450
2451 ib->ptr[ib->length_dw++] = PACKET3(PACKET3_ME_WRITE, ndw);
2452 ib->ptr[ib->length_dw++] = pe;
2453 ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
2454 for (; ndw > 1; ndw -= 2, --count, pe += 8) {
2455 if (flags & RADEON_VM_PAGE_SYSTEM) {
2456 value = radeon_vm_map_gart(rdev, addr);
2457 value &= 0xFFFFFFFFFFFFF000ULL;
2458 } else if (flags & RADEON_VM_PAGE_VALID) {
2459 value = addr;
2460 } else {
2461 value = 0;
2462 }
2463 addr += incr;
2464 value |= r600_flags;
2465 ib->ptr[ib->length_dw++] = value;
2466 ib->ptr[ib->length_dw++] = upper_32_bits(value);
2467 }
2468 }
2469 } else {
2470 cayman_dma_vm_set_page(rdev, ib, pe, addr, count, incr, flags);
2471 }
2472}
2473
2474/** 2398/**
2475 * cayman_vm_flush - vm flush using the CP 2399 * cayman_vm_flush - vm flush using the CP
2476 * 2400 *
diff --git a/drivers/gpu/drm/radeon/ni_dma.c b/drivers/gpu/drm/radeon/ni_dma.c
index dd6e9688fbef..bdeb65ed3658 100644
--- a/drivers/gpu/drm/radeon/ni_dma.c
+++ b/drivers/gpu/drm/radeon/ni_dma.c
@@ -24,6 +24,7 @@
24#include <drm/drmP.h> 24#include <drm/drmP.h>
25#include "radeon.h" 25#include "radeon.h"
26#include "radeon_asic.h" 26#include "radeon_asic.h"
27#include "radeon_trace.h"
27#include "nid.h" 28#include "nid.h"
28 29
29u32 cayman_gpu_check_soft_reset(struct radeon_device *rdev); 30u32 cayman_gpu_check_soft_reset(struct radeon_device *rdev);
@@ -245,8 +246,7 @@ bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
245 * @addr: dst addr to write into pe 246 * @addr: dst addr to write into pe
246 * @count: number of page entries to update 247 * @count: number of page entries to update
247 * @incr: increase next addr by incr bytes 248 * @incr: increase next addr by incr bytes
248 * @flags: access flags 249 * @flags: hw access flags
249 * @r600_flags: hw access flags
250 * 250 *
251 * Update the page tables using the DMA (cayman/TN). 251 * Update the page tables using the DMA (cayman/TN).
252 */ 252 */
@@ -256,11 +256,12 @@ void cayman_dma_vm_set_page(struct radeon_device *rdev,
256 uint64_t addr, unsigned count, 256 uint64_t addr, unsigned count,
257 uint32_t incr, uint32_t flags) 257 uint32_t incr, uint32_t flags)
258{ 258{
259 uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
260 uint64_t value; 259 uint64_t value;
261 unsigned ndw; 260 unsigned ndw;
262 261
263 if ((flags & RADEON_VM_PAGE_SYSTEM) || (count == 1)) { 262 trace_radeon_vm_set_page(pe, addr, count, incr, flags);
263
264 if ((flags & R600_PTE_SYSTEM) || (count == 1)) {
264 while (count) { 265 while (count) {
265 ndw = count * 2; 266 ndw = count * 2;
266 if (ndw > 0xFFFFE) 267 if (ndw > 0xFFFFE)
@@ -271,16 +272,16 @@ void cayman_dma_vm_set_page(struct radeon_device *rdev,
271 ib->ptr[ib->length_dw++] = pe; 272 ib->ptr[ib->length_dw++] = pe;
272 ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff; 273 ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
273 for (; ndw > 0; ndw -= 2, --count, pe += 8) { 274 for (; ndw > 0; ndw -= 2, --count, pe += 8) {
274 if (flags & RADEON_VM_PAGE_SYSTEM) { 275 if (flags & R600_PTE_SYSTEM) {
275 value = radeon_vm_map_gart(rdev, addr); 276 value = radeon_vm_map_gart(rdev, addr);
276 value &= 0xFFFFFFFFFFFFF000ULL; 277 value &= 0xFFFFFFFFFFFFF000ULL;
277 } else if (flags & RADEON_VM_PAGE_VALID) { 278 } else if (flags & R600_PTE_VALID) {
278 value = addr; 279 value = addr;
279 } else { 280 } else {
280 value = 0; 281 value = 0;
281 } 282 }
282 addr += incr; 283 addr += incr;
283 value |= r600_flags; 284 value |= flags;
284 ib->ptr[ib->length_dw++] = value; 285 ib->ptr[ib->length_dw++] = value;
285 ib->ptr[ib->length_dw++] = upper_32_bits(value); 286 ib->ptr[ib->length_dw++] = upper_32_bits(value);
286 } 287 }
@@ -291,7 +292,7 @@ void cayman_dma_vm_set_page(struct radeon_device *rdev,
291 if (ndw > 0xFFFFE) 292 if (ndw > 0xFFFFE)
292 ndw = 0xFFFFE; 293 ndw = 0xFFFFE;
293 294
294 if (flags & RADEON_VM_PAGE_VALID) 295 if (flags & R600_PTE_VALID)
295 value = addr; 296 value = addr;
296 else 297 else
297 value = 0; 298 value = 0;
@@ -299,7 +300,7 @@ void cayman_dma_vm_set_page(struct radeon_device *rdev,
299 ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw); 300 ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
300 ib->ptr[ib->length_dw++] = pe; /* dst addr */ 301 ib->ptr[ib->length_dw++] = pe; /* dst addr */
301 ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff; 302 ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
302 ib->ptr[ib->length_dw++] = r600_flags; /* mask */ 303 ib->ptr[ib->length_dw++] = flags; /* mask */
303 ib->ptr[ib->length_dw++] = 0; 304 ib->ptr[ib->length_dw++] = 0;
304 ib->ptr[ib->length_dw++] = value; /* value */ 305 ib->ptr[ib->length_dw++] = value; /* value */
305 ib->ptr[ib->length_dw++] = upper_32_bits(value); 306 ib->ptr[ib->length_dw++] = upper_32_bits(value);
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index d71333033b2b..784983d78158 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -1434,7 +1434,7 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
1434 obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC); 1434 obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
1435 if (!obj) { 1435 if (!obj) {
1436 DRM_ERROR("cannot find crtc %d\n", crtc_id); 1436 DRM_ERROR("cannot find crtc %d\n", crtc_id);
1437 return -EINVAL; 1437 return -ENOENT;
1438 } 1438 }
1439 crtc = obj_to_crtc(obj); 1439 crtc = obj_to_crtc(obj);
1440 radeon_crtc = to_radeon_crtc(crtc); 1440 radeon_crtc = to_radeon_crtc(crtc);
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index f9be22062df1..4e609e8a8d2b 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -124,6 +124,59 @@ int r600_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
124 return 0; 124 return 0;
125} 125}
126 126
127void dce3_program_fmt(struct drm_encoder *encoder)
128{
129 struct drm_device *dev = encoder->dev;
130 struct radeon_device *rdev = dev->dev_private;
131 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
132 struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
133 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
134 int bpc = 0;
135 u32 tmp = 0;
136 enum radeon_connector_dither dither = RADEON_FMT_DITHER_DISABLE;
137
138 if (connector) {
139 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
140 bpc = radeon_get_monitor_bpc(connector);
141 dither = radeon_connector->dither;
142 }
143
144 /* LVDS FMT is set up by atom */
145 if (radeon_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
146 return;
147
148 /* not needed for analog */
149 if ((radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1) ||
150 (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2))
151 return;
152
153 if (bpc == 0)
154 return;
155
156 switch (bpc) {
157 case 6:
158 if (dither == RADEON_FMT_DITHER_ENABLE)
159 /* XXX sort out optimal dither settings */
160 tmp |= FMT_SPATIAL_DITHER_EN;
161 else
162 tmp |= FMT_TRUNCATE_EN;
163 break;
164 case 8:
165 if (dither == RADEON_FMT_DITHER_ENABLE)
166 /* XXX sort out optimal dither settings */
167 tmp |= (FMT_SPATIAL_DITHER_EN | FMT_SPATIAL_DITHER_DEPTH);
168 else
169 tmp |= (FMT_TRUNCATE_EN | FMT_TRUNCATE_DEPTH);
170 break;
171 case 10:
172 default:
173 /* not needed */
174 break;
175 }
176
177 WREG32(FMT_BIT_DEPTH_CONTROL + radeon_crtc->crtc_offset, tmp);
178}
179
127/* get temperature in millidegrees */ 180/* get temperature in millidegrees */
128int rv6xx_get_temp(struct radeon_device *rdev) 181int rv6xx_get_temp(struct radeon_device *rdev)
129{ 182{
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index 01a3ec83f284..5dceea6f71ae 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -887,7 +887,7 @@ int r600_cs_common_vline_parse(struct radeon_cs_parser *p,
887 obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC); 887 obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
888 if (!obj) { 888 if (!obj) {
889 DRM_ERROR("cannot find crtc %d\n", crtc_id); 889 DRM_ERROR("cannot find crtc %d\n", crtc_id);
890 return -EINVAL; 890 return -ENOENT;
891 } 891 }
892 crtc = obj_to_crtc(obj); 892 crtc = obj_to_crtc(obj);
893 radeon_crtc = to_radeon_crtc(crtc); 893 radeon_crtc = to_radeon_crtc(crtc);
@@ -2328,13 +2328,8 @@ static void r600_cs_parser_fini(struct radeon_cs_parser *parser, int error)
2328 unsigned i; 2328 unsigned i;
2329 2329
2330 kfree(parser->relocs); 2330 kfree(parser->relocs);
2331 for (i = 0; i < parser->nchunks; i++) { 2331 for (i = 0; i < parser->nchunks; i++)
2332 kfree(parser->chunks[i].kdata); 2332 drm_free_large(parser->chunks[i].kdata);
2333 if (parser->rdev && (parser->rdev->flags & RADEON_IS_AGP)) {
2334 kfree(parser->chunks[i].kpage[0]);
2335 kfree(parser->chunks[i].kpage[1]);
2336 }
2337 }
2338 kfree(parser->chunks); 2333 kfree(parser->chunks);
2339 kfree(parser->chunks_array); 2334 kfree(parser->chunks_array);
2340} 2335}
@@ -2391,13 +2386,12 @@ int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
2391 ib_chunk = &parser.chunks[parser.chunk_ib_idx]; 2386 ib_chunk = &parser.chunks[parser.chunk_ib_idx];
2392 parser.ib.length_dw = ib_chunk->length_dw; 2387 parser.ib.length_dw = ib_chunk->length_dw;
2393 *l = parser.ib.length_dw; 2388 *l = parser.ib.length_dw;
2394 r = r600_cs_parse(&parser); 2389 if (DRM_COPY_FROM_USER(ib, ib_chunk->user_ptr, ib_chunk->length_dw * 4)) {
2395 if (r) { 2390 r = -EFAULT;
2396 DRM_ERROR("Invalid command stream !\n");
2397 r600_cs_parser_fini(&parser, r); 2391 r600_cs_parser_fini(&parser, r);
2398 return r; 2392 return r;
2399 } 2393 }
2400 r = radeon_cs_finish_pages(&parser); 2394 r = r600_cs_parse(&parser);
2401 if (r) { 2395 if (r) {
2402 DRM_ERROR("Invalid command stream !\n"); 2396 DRM_ERROR("Invalid command stream !\n");
2403 r600_cs_parser_fini(&parser, r); 2397 r600_cs_parser_fini(&parser, r);
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index 06022e3b9c3b..4b89262f3f0e 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -24,6 +24,7 @@
24 * Authors: Christian König 24 * Authors: Christian König
25 */ 25 */
26#include <linux/hdmi.h> 26#include <linux/hdmi.h>
27#include <linux/gcd.h>
27#include <drm/drmP.h> 28#include <drm/drmP.h>
28#include <drm/radeon_drm.h> 29#include <drm/radeon_drm.h>
29#include "radeon.h" 30#include "radeon.h"
@@ -57,35 +58,57 @@ enum r600_hdmi_iec_status_bits {
57static const struct radeon_hdmi_acr r600_hdmi_predefined_acr[] = { 58static const struct radeon_hdmi_acr r600_hdmi_predefined_acr[] = {
58 /* 32kHz 44.1kHz 48kHz */ 59 /* 32kHz 44.1kHz 48kHz */
59 /* Clock N CTS N CTS N CTS */ 60 /* Clock N CTS N CTS N CTS */
60 { 25175, 4576, 28125, 7007, 31250, 6864, 28125 }, /* 25,20/1.001 MHz */ 61 { 25175, 4096, 25175, 28224, 125875, 6144, 25175 }, /* 25,20/1.001 MHz */
61 { 25200, 4096, 25200, 6272, 28000, 6144, 25200 }, /* 25.20 MHz */ 62 { 25200, 4096, 25200, 6272, 28000, 6144, 25200 }, /* 25.20 MHz */
62 { 27000, 4096, 27000, 6272, 30000, 6144, 27000 }, /* 27.00 MHz */ 63 { 27000, 4096, 27000, 6272, 30000, 6144, 27000 }, /* 27.00 MHz */
63 { 27027, 4096, 27027, 6272, 30030, 6144, 27027 }, /* 27.00*1.001 MHz */ 64 { 27027, 4096, 27027, 6272, 30030, 6144, 27027 }, /* 27.00*1.001 MHz */
64 { 54000, 4096, 54000, 6272, 60000, 6144, 54000 }, /* 54.00 MHz */ 65 { 54000, 4096, 54000, 6272, 60000, 6144, 54000 }, /* 54.00 MHz */
65 { 54054, 4096, 54054, 6272, 60060, 6144, 54054 }, /* 54.00*1.001 MHz */ 66 { 54054, 4096, 54054, 6272, 60060, 6144, 54054 }, /* 54.00*1.001 MHz */
66 { 74176, 11648, 210937, 17836, 234375, 11648, 140625 }, /* 74.25/1.001 MHz */ 67 { 74176, 4096, 74176, 5733, 75335, 6144, 74176 }, /* 74.25/1.001 MHz */
67 { 74250, 4096, 74250, 6272, 82500, 6144, 74250 }, /* 74.25 MHz */ 68 { 74250, 4096, 74250, 6272, 82500, 6144, 74250 }, /* 74.25 MHz */
68 { 148352, 11648, 421875, 8918, 234375, 5824, 140625 }, /* 148.50/1.001 MHz */ 69 { 148352, 4096, 148352, 5733, 150670, 6144, 148352 }, /* 148.50/1.001 MHz */
69 { 148500, 4096, 148500, 6272, 165000, 6144, 148500 }, /* 148.50 MHz */ 70 { 148500, 4096, 148500, 6272, 165000, 6144, 148500 }, /* 148.50 MHz */
70 { 0, 4096, 0, 6272, 0, 6144, 0 } /* Other */
71}; 71};
72 72
73
73/* 74/*
74 * calculate CTS value if it's not found in the table 75 * calculate CTS and N values if they are not found in the table
75 */ 76 */
76static void r600_hdmi_calc_cts(uint32_t clock, int *CTS, int N, int freq) 77static void r600_hdmi_calc_cts(uint32_t clock, int *CTS, int *N, int freq)
77{ 78{
78 u64 n; 79 int n, cts;
79 u32 d; 80 unsigned long div, mul;
80 81
81 if (*CTS == 0) { 82 /* Safe, but overly large values */
82 n = (u64)clock * (u64)N * 1000ULL; 83 n = 128 * freq;
83 d = 128 * freq; 84 cts = clock * 1000;
84 do_div(n, d); 85
85 *CTS = n; 86 /* Smallest valid fraction */
86 } 87 div = gcd(n, cts);
87 DRM_DEBUG("Using ACR timing N=%d CTS=%d for frequency %d\n", 88
88 N, *CTS, freq); 89 n /= div;
90 cts /= div;
91
92 /*
93 * The optimal N is 128*freq/1000. Calculate the closest larger
94 * value that doesn't truncate any bits.
95 */
96 mul = ((128*freq/1000) + (n-1))/n;
97
98 n *= mul;
99 cts *= mul;
100
101 /* Check that we are in spec (not always possible) */
102 if (n < (128*freq/1500))
103 printk(KERN_WARNING "Calculated ACR N value is too small. You may experience audio problems.\n");
104 if (n > (128*freq/300))
105 printk(KERN_WARNING "Calculated ACR N value is too large. You may experience audio problems.\n");
106
107 *N = n;
108 *CTS = cts;
109
110 DRM_DEBUG("Calculated ACR timing N=%d CTS=%d for frequency %d\n",
111 *N, *CTS, freq);
89} 112}
90 113
91struct radeon_hdmi_acr r600_hdmi_acr(uint32_t clock) 114struct radeon_hdmi_acr r600_hdmi_acr(uint32_t clock)
@@ -93,15 +116,16 @@ struct radeon_hdmi_acr r600_hdmi_acr(uint32_t clock)
93 struct radeon_hdmi_acr res; 116 struct radeon_hdmi_acr res;
94 u8 i; 117 u8 i;
95 118
96 for (i = 0; r600_hdmi_predefined_acr[i].clock != clock && 119 /* Precalculated values for common clocks */
97 r600_hdmi_predefined_acr[i].clock != 0; i++) 120 for (i = 0; i < ARRAY_SIZE(r600_hdmi_predefined_acr); i++) {
98 ; 121 if (r600_hdmi_predefined_acr[i].clock == clock)
99 res = r600_hdmi_predefined_acr[i]; 122 return r600_hdmi_predefined_acr[i];
123 }
100 124
101 /* In case some CTS are missing */ 125 /* And odd clocks get manually calculated */
102 r600_hdmi_calc_cts(clock, &res.cts_32khz, res.n_32khz, 32000); 126 r600_hdmi_calc_cts(clock, &res.cts_32khz, &res.n_32khz, 32000);
103 r600_hdmi_calc_cts(clock, &res.cts_44_1khz, res.n_44_1khz, 44100); 127 r600_hdmi_calc_cts(clock, &res.cts_44_1khz, &res.n_44_1khz, 44100);
104 r600_hdmi_calc_cts(clock, &res.cts_48khz, res.n_48khz, 48000); 128 r600_hdmi_calc_cts(clock, &res.cts_48khz, &res.n_48khz, 48000);
105 129
106 return res; 130 return res;
107} 131}
@@ -313,8 +337,10 @@ static void dce3_2_afmt_write_speaker_allocation(struct drm_encoder *encoder)
313 return; 337 return;
314 338
315 list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { 339 list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
316 if (connector->encoder == encoder) 340 if (connector->encoder == encoder) {
317 radeon_connector = to_radeon_connector(connector); 341 radeon_connector = to_radeon_connector(connector);
342 break;
343 }
318 } 344 }
319 345
320 if (!radeon_connector) { 346 if (!radeon_connector) {
@@ -366,8 +392,10 @@ static void dce3_2_afmt_write_sad_regs(struct drm_encoder *encoder)
366 }; 392 };
367 393
368 list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { 394 list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
369 if (connector->encoder == encoder) 395 if (connector->encoder == encoder) {
370 radeon_connector = to_radeon_connector(connector); 396 radeon_connector = to_radeon_connector(connector);
397 break;
398 }
371 } 399 }
372 400
373 if (!radeon_connector) { 401 if (!radeon_connector) {
@@ -384,20 +412,30 @@ static void dce3_2_afmt_write_sad_regs(struct drm_encoder *encoder)
384 412
385 for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) { 413 for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
386 u32 value = 0; 414 u32 value = 0;
415 u8 stereo_freqs = 0;
416 int max_channels = -1;
387 int j; 417 int j;
388 418
389 for (j = 0; j < sad_count; j++) { 419 for (j = 0; j < sad_count; j++) {
390 struct cea_sad *sad = &sads[j]; 420 struct cea_sad *sad = &sads[j];
391 421
392 if (sad->format == eld_reg_to_type[i][1]) { 422 if (sad->format == eld_reg_to_type[i][1]) {
393 value = MAX_CHANNELS(sad->channels) | 423 if (sad->channels > max_channels) {
394 DESCRIPTOR_BYTE_2(sad->byte2) | 424 value = MAX_CHANNELS(sad->channels) |
395 SUPPORTED_FREQUENCIES(sad->freq); 425 DESCRIPTOR_BYTE_2(sad->byte2) |
426 SUPPORTED_FREQUENCIES(sad->freq);
427 max_channels = sad->channels;
428 }
429
396 if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM) 430 if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM)
397 value |= SUPPORTED_FREQUENCIES_STEREO(sad->freq); 431 stereo_freqs |= sad->freq;
398 break; 432 else
433 break;
399 } 434 }
400 } 435 }
436
437 value |= SUPPORTED_FREQUENCIES_STEREO(stereo_freqs);
438
401 WREG32(eld_reg_to_type[i][0], value); 439 WREG32(eld_reg_to_type[i][0], value);
402 } 440 }
403 441
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index 7b3c7b5932c5..ebe38724a976 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -1199,6 +1199,34 @@
1199# define AFMT_AZ_FORMAT_WTRIG_ACK (1 << 29) 1199# define AFMT_AZ_FORMAT_WTRIG_ACK (1 << 29)
1200# define AFMT_AZ_AUDIO_ENABLE_CHG_ACK (1 << 30) 1200# define AFMT_AZ_AUDIO_ENABLE_CHG_ACK (1 << 30)
1201 1201
1202/* DCE3 FMT blocks */
1203#define FMT_CONTROL 0x6700
1204# define FMT_PIXEL_ENCODING (1 << 16)
1205 /* 0 = RGB 4:4:4 or YCbCr 4:4:4, 1 = YCbCr 4:2:2 */
1206#define FMT_BIT_DEPTH_CONTROL 0x6710
1207# define FMT_TRUNCATE_EN (1 << 0)
1208# define FMT_TRUNCATE_DEPTH (1 << 4)
1209# define FMT_SPATIAL_DITHER_EN (1 << 8)
1210# define FMT_SPATIAL_DITHER_MODE(x) ((x) << 9)
1211# define FMT_SPATIAL_DITHER_DEPTH (1 << 12)
1212# define FMT_FRAME_RANDOM_ENABLE (1 << 13)
1213# define FMT_RGB_RANDOM_ENABLE (1 << 14)
1214# define FMT_HIGHPASS_RANDOM_ENABLE (1 << 15)
1215# define FMT_TEMPORAL_DITHER_EN (1 << 16)
1216# define FMT_TEMPORAL_DITHER_DEPTH (1 << 20)
1217# define FMT_TEMPORAL_DITHER_OFFSET(x) ((x) << 21)
1218# define FMT_TEMPORAL_LEVEL (1 << 24)
1219# define FMT_TEMPORAL_DITHER_RESET (1 << 25)
1220# define FMT_25FRC_SEL(x) ((x) << 26)
1221# define FMT_50FRC_SEL(x) ((x) << 28)
1222# define FMT_75FRC_SEL(x) ((x) << 30)
1223#define FMT_CLAMP_CONTROL 0x672c
1224# define FMT_CLAMP_DATA_EN (1 << 0)
1225# define FMT_CLAMP_COLOR_FORMAT(x) ((x) << 16)
1226# define FMT_CLAMP_6BPC 0
1227# define FMT_CLAMP_8BPC 1
1228# define FMT_CLAMP_10BPC 2
1229
1202/* Power management */ 1230/* Power management */
1203#define CG_SPLL_FUNC_CNTL 0x600 1231#define CG_SPLL_FUNC_CNTL 0x600
1204# define SPLL_RESET (1 << 0) 1232# define SPLL_RESET (1 << 0)
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 24f4960f59ee..b9ee99258602 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -98,6 +98,7 @@ extern int radeon_lockup_timeout;
98extern int radeon_fastfb; 98extern int radeon_fastfb;
99extern int radeon_dpm; 99extern int radeon_dpm;
100extern int radeon_aspm; 100extern int radeon_aspm;
101extern int radeon_runtime_pm;
101 102
102/* 103/*
103 * Copy from radeon_drv.h so we don't have to include both and have conflicting 104 * Copy from radeon_drv.h so we don't have to include both and have conflicting
@@ -327,7 +328,6 @@ struct radeon_fence_driver {
327 /* sync_seq is protected by ring emission lock */ 328 /* sync_seq is protected by ring emission lock */
328 uint64_t sync_seq[RADEON_NUM_RINGS]; 329 uint64_t sync_seq[RADEON_NUM_RINGS];
329 atomic64_t last_seq; 330 atomic64_t last_seq;
330 unsigned long last_activity;
331 bool initialized; 331 bool initialized;
332}; 332};
333 333
@@ -832,6 +832,12 @@ struct radeon_mec {
832#define RADEON_VM_PTB_ALIGN_MASK (RADEON_VM_PTB_ALIGN_SIZE - 1) 832#define RADEON_VM_PTB_ALIGN_MASK (RADEON_VM_PTB_ALIGN_SIZE - 1)
833#define RADEON_VM_PTB_ALIGN(a) (((a) + RADEON_VM_PTB_ALIGN_MASK) & ~RADEON_VM_PTB_ALIGN_MASK) 833#define RADEON_VM_PTB_ALIGN(a) (((a) + RADEON_VM_PTB_ALIGN_MASK) & ~RADEON_VM_PTB_ALIGN_MASK)
834 834
835#define R600_PTE_VALID (1 << 0)
836#define R600_PTE_SYSTEM (1 << 1)
837#define R600_PTE_SNOOPED (1 << 2)
838#define R600_PTE_READABLE (1 << 5)
839#define R600_PTE_WRITEABLE (1 << 6)
840
835struct radeon_vm { 841struct radeon_vm {
836 struct list_head list; 842 struct list_head list;
837 struct list_head va; 843 struct list_head va;
@@ -967,12 +973,8 @@ struct radeon_cs_reloc {
967struct radeon_cs_chunk { 973struct radeon_cs_chunk {
968 uint32_t chunk_id; 974 uint32_t chunk_id;
969 uint32_t length_dw; 975 uint32_t length_dw;
970 int kpage_idx[2];
971 uint32_t *kpage[2];
972 uint32_t *kdata; 976 uint32_t *kdata;
973 void __user *user_ptr; 977 void __user *user_ptr;
974 int last_copied_page;
975 int last_page_index;
976}; 978};
977 979
978struct radeon_cs_parser { 980struct radeon_cs_parser {
@@ -1007,8 +1009,15 @@ struct radeon_cs_parser {
1007 struct ww_acquire_ctx ticket; 1009 struct ww_acquire_ctx ticket;
1008}; 1010};
1009 1011
1010extern int radeon_cs_finish_pages(struct radeon_cs_parser *p); 1012static inline u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
1011extern u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx); 1013{
1014 struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
1015
1016 if (ibc->kdata)
1017 return ibc->kdata[idx];
1018 return p->ib.ptr[idx];
1019}
1020
1012 1021
1013struct radeon_cs_packet { 1022struct radeon_cs_packet {
1014 unsigned idx; 1023 unsigned idx;
@@ -1675,8 +1684,6 @@ struct radeon_asic {
1675 struct { 1684 struct {
1676 int (*init)(struct radeon_device *rdev); 1685 int (*init)(struct radeon_device *rdev);
1677 void (*fini)(struct radeon_device *rdev); 1686 void (*fini)(struct radeon_device *rdev);
1678
1679 u32 pt_ring_index;
1680 void (*set_page)(struct radeon_device *rdev, 1687 void (*set_page)(struct radeon_device *rdev,
1681 struct radeon_ib *ib, 1688 struct radeon_ib *ib,
1682 uint64_t pe, 1689 uint64_t pe,
@@ -2170,6 +2177,7 @@ struct radeon_device {
2170 bool need_dma32; 2177 bool need_dma32;
2171 bool accel_working; 2178 bool accel_working;
2172 bool fastfb_working; /* IGP feature*/ 2179 bool fastfb_working; /* IGP feature*/
2180 bool needs_reset;
2173 struct radeon_surface_reg surface_regs[RADEON_GEM_MAX_SURFACES]; 2181 struct radeon_surface_reg surface_regs[RADEON_GEM_MAX_SURFACES];
2174 const struct firmware *me_fw; /* all family ME firmware */ 2182 const struct firmware *me_fw; /* all family ME firmware */
2175 const struct firmware *pfp_fw; /* r6/700 PFP firmware */ 2183 const struct firmware *pfp_fw; /* r6/700 PFP firmware */
@@ -2212,6 +2220,9 @@ struct radeon_device {
2212 /* clock, powergating flags */ 2220 /* clock, powergating flags */
2213 u32 cg_flags; 2221 u32 cg_flags;
2214 u32 pg_flags; 2222 u32 pg_flags;
2223
2224 struct dev_pm_domain vga_pm_domain;
2225 bool have_disp_power_ref;
2215}; 2226};
2216 2227
2217int radeon_device_init(struct radeon_device *rdev, 2228int radeon_device_init(struct radeon_device *rdev,
@@ -2673,8 +2684,8 @@ extern void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain);
2673extern bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo); 2684extern bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo);
2674extern void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base); 2685extern void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base);
2675extern void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc); 2686extern void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
2676extern int radeon_resume_kms(struct drm_device *dev); 2687extern int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon);
2677extern int radeon_suspend_kms(struct drm_device *dev, pm_message_t state); 2688extern int radeon_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon);
2678extern void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size); 2689extern void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size);
2679extern void radeon_program_register_sequence(struct radeon_device *rdev, 2690extern void radeon_program_register_sequence(struct radeon_device *rdev,
2680 const u32 *registers, 2691 const u32 *registers,
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index 8f7e04538fd6..50853c0cb49d 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -1622,8 +1622,7 @@ static struct radeon_asic cayman_asic = {
1622 .vm = { 1622 .vm = {
1623 .init = &cayman_vm_init, 1623 .init = &cayman_vm_init,
1624 .fini = &cayman_vm_fini, 1624 .fini = &cayman_vm_fini,
1625 .pt_ring_index = R600_RING_TYPE_DMA_INDEX, 1625 .set_page = &cayman_dma_vm_set_page,
1626 .set_page = &cayman_vm_set_page,
1627 }, 1626 },
1628 .ring = { 1627 .ring = {
1629 [RADEON_RING_TYPE_GFX_INDEX] = &cayman_gfx_ring, 1628 [RADEON_RING_TYPE_GFX_INDEX] = &cayman_gfx_ring,
@@ -1723,8 +1722,7 @@ static struct radeon_asic trinity_asic = {
1723 .vm = { 1722 .vm = {
1724 .init = &cayman_vm_init, 1723 .init = &cayman_vm_init,
1725 .fini = &cayman_vm_fini, 1724 .fini = &cayman_vm_fini,
1726 .pt_ring_index = R600_RING_TYPE_DMA_INDEX, 1725 .set_page = &cayman_dma_vm_set_page,
1727 .set_page = &cayman_vm_set_page,
1728 }, 1726 },
1729 .ring = { 1727 .ring = {
1730 [RADEON_RING_TYPE_GFX_INDEX] = &cayman_gfx_ring, 1728 [RADEON_RING_TYPE_GFX_INDEX] = &cayman_gfx_ring,
@@ -1854,8 +1852,7 @@ static struct radeon_asic si_asic = {
1854 .vm = { 1852 .vm = {
1855 .init = &si_vm_init, 1853 .init = &si_vm_init,
1856 .fini = &si_vm_fini, 1854 .fini = &si_vm_fini,
1857 .pt_ring_index = R600_RING_TYPE_DMA_INDEX, 1855 .set_page = &si_dma_vm_set_page,
1858 .set_page = &si_vm_set_page,
1859 }, 1856 },
1860 .ring = { 1857 .ring = {
1861 [RADEON_RING_TYPE_GFX_INDEX] = &si_gfx_ring, 1858 [RADEON_RING_TYPE_GFX_INDEX] = &si_gfx_ring,
@@ -1879,7 +1876,7 @@ static struct radeon_asic si_asic = {
1879 .hdmi_setmode = &evergreen_hdmi_setmode, 1876 .hdmi_setmode = &evergreen_hdmi_setmode,
1880 }, 1877 },
1881 .copy = { 1878 .copy = {
1882 .blit = NULL, 1879 .blit = &r600_copy_cpdma,
1883 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, 1880 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
1884 .dma = &si_copy_dma, 1881 .dma = &si_copy_dma,
1885 .dma_ring_index = R600_RING_TYPE_DMA_INDEX, 1882 .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
@@ -2000,8 +1997,7 @@ static struct radeon_asic ci_asic = {
2000 .vm = { 1997 .vm = {
2001 .init = &cik_vm_init, 1998 .init = &cik_vm_init,
2002 .fini = &cik_vm_fini, 1999 .fini = &cik_vm_fini,
2003 .pt_ring_index = R600_RING_TYPE_DMA_INDEX, 2000 .set_page = &cik_sdma_vm_set_page,
2004 .set_page = &cik_vm_set_page,
2005 }, 2001 },
2006 .ring = { 2002 .ring = {
2007 [RADEON_RING_TYPE_GFX_INDEX] = &ci_gfx_ring, 2003 [RADEON_RING_TYPE_GFX_INDEX] = &ci_gfx_ring,
@@ -2100,8 +2096,7 @@ static struct radeon_asic kv_asic = {
2100 .vm = { 2096 .vm = {
2101 .init = &cik_vm_init, 2097 .init = &cik_vm_init,
2102 .fini = &cik_vm_fini, 2098 .fini = &cik_vm_fini,
2103 .pt_ring_index = R600_RING_TYPE_DMA_INDEX, 2099 .set_page = &cik_sdma_vm_set_page,
2104 .set_page = &cik_vm_set_page,
2105 }, 2100 },
2106 .ring = { 2101 .ring = {
2107 [RADEON_RING_TYPE_GFX_INDEX] = &ci_gfx_ring, 2102 [RADEON_RING_TYPE_GFX_INDEX] = &ci_gfx_ring,
@@ -2442,27 +2437,48 @@ int radeon_asic_init(struct radeon_device *rdev)
2442 } 2437 }
2443 break; 2438 break;
2444 case CHIP_BONAIRE: 2439 case CHIP_BONAIRE:
2440 case CHIP_HAWAII:
2445 rdev->asic = &ci_asic; 2441 rdev->asic = &ci_asic;
2446 rdev->num_crtc = 6; 2442 rdev->num_crtc = 6;
2447 rdev->has_uvd = true; 2443 rdev->has_uvd = true;
2448 rdev->cg_flags = 2444 if (rdev->family == CHIP_BONAIRE) {
2449 RADEON_CG_SUPPORT_GFX_MGCG | 2445 rdev->cg_flags =
2450 RADEON_CG_SUPPORT_GFX_MGLS | 2446 RADEON_CG_SUPPORT_GFX_MGCG |
2451 /*RADEON_CG_SUPPORT_GFX_CGCG |*/ 2447 RADEON_CG_SUPPORT_GFX_MGLS |
2452 RADEON_CG_SUPPORT_GFX_CGLS | 2448 /*RADEON_CG_SUPPORT_GFX_CGCG |*/
2453 RADEON_CG_SUPPORT_GFX_CGTS | 2449 RADEON_CG_SUPPORT_GFX_CGLS |
2454 RADEON_CG_SUPPORT_GFX_CGTS_LS | 2450 RADEON_CG_SUPPORT_GFX_CGTS |
2455 RADEON_CG_SUPPORT_GFX_CP_LS | 2451 RADEON_CG_SUPPORT_GFX_CGTS_LS |
2456 RADEON_CG_SUPPORT_MC_LS | 2452 RADEON_CG_SUPPORT_GFX_CP_LS |
2457 RADEON_CG_SUPPORT_MC_MGCG | 2453 RADEON_CG_SUPPORT_MC_LS |
2458 RADEON_CG_SUPPORT_SDMA_MGCG | 2454 RADEON_CG_SUPPORT_MC_MGCG |
2459 RADEON_CG_SUPPORT_SDMA_LS | 2455 RADEON_CG_SUPPORT_SDMA_MGCG |
2460 RADEON_CG_SUPPORT_BIF_LS | 2456 RADEON_CG_SUPPORT_SDMA_LS |
2461 RADEON_CG_SUPPORT_VCE_MGCG | 2457 RADEON_CG_SUPPORT_BIF_LS |
2462 RADEON_CG_SUPPORT_UVD_MGCG | 2458 RADEON_CG_SUPPORT_VCE_MGCG |
2463 RADEON_CG_SUPPORT_HDP_LS | 2459 RADEON_CG_SUPPORT_UVD_MGCG |
2464 RADEON_CG_SUPPORT_HDP_MGCG; 2460 RADEON_CG_SUPPORT_HDP_LS |
2465 rdev->pg_flags = 0; 2461 RADEON_CG_SUPPORT_HDP_MGCG;
2462 rdev->pg_flags = 0;
2463 } else {
2464 rdev->cg_flags =
2465 RADEON_CG_SUPPORT_GFX_MGCG |
2466 RADEON_CG_SUPPORT_GFX_MGLS |
2467 /*RADEON_CG_SUPPORT_GFX_CGCG |*/
2468 RADEON_CG_SUPPORT_GFX_CGLS |
2469 RADEON_CG_SUPPORT_GFX_CGTS |
2470 RADEON_CG_SUPPORT_GFX_CP_LS |
2471 RADEON_CG_SUPPORT_MC_LS |
2472 RADEON_CG_SUPPORT_MC_MGCG |
2473 RADEON_CG_SUPPORT_SDMA_MGCG |
2474 RADEON_CG_SUPPORT_SDMA_LS |
2475 RADEON_CG_SUPPORT_BIF_LS |
2476 RADEON_CG_SUPPORT_VCE_MGCG |
2477 RADEON_CG_SUPPORT_UVD_MGCG |
2478 RADEON_CG_SUPPORT_HDP_LS |
2479 RADEON_CG_SUPPORT_HDP_MGCG;
2480 rdev->pg_flags = 0;
2481 }
2466 break; 2482 break;
2467 case CHIP_KAVERI: 2483 case CHIP_KAVERI:
2468 case CHIP_KABINI: 2484 case CHIP_KABINI:
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 70c29d5e080d..f2833ee3a613 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -581,17 +581,18 @@ int cayman_vm_init(struct radeon_device *rdev);
581void cayman_vm_fini(struct radeon_device *rdev); 581void cayman_vm_fini(struct radeon_device *rdev);
582void cayman_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm); 582void cayman_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
583uint32_t cayman_vm_page_flags(struct radeon_device *rdev, uint32_t flags); 583uint32_t cayman_vm_page_flags(struct radeon_device *rdev, uint32_t flags);
584void cayman_vm_set_page(struct radeon_device *rdev,
585 struct radeon_ib *ib,
586 uint64_t pe,
587 uint64_t addr, unsigned count,
588 uint32_t incr, uint32_t flags);
589int evergreen_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib); 584int evergreen_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
590int evergreen_dma_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib); 585int evergreen_dma_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
591void cayman_dma_ring_ib_execute(struct radeon_device *rdev, 586void cayman_dma_ring_ib_execute(struct radeon_device *rdev,
592 struct radeon_ib *ib); 587 struct radeon_ib *ib);
593bool cayman_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring); 588bool cayman_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
594bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring); 589bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
590void cayman_dma_vm_set_page(struct radeon_device *rdev,
591 struct radeon_ib *ib,
592 uint64_t pe,
593 uint64_t addr, unsigned count,
594 uint32_t incr, uint32_t flags);
595
595void cayman_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm); 596void cayman_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
596 597
597int ni_dpm_init(struct radeon_device *rdev); 598int ni_dpm_init(struct radeon_device *rdev);
@@ -653,17 +654,17 @@ int si_irq_set(struct radeon_device *rdev);
653int si_irq_process(struct radeon_device *rdev); 654int si_irq_process(struct radeon_device *rdev);
654int si_vm_init(struct radeon_device *rdev); 655int si_vm_init(struct radeon_device *rdev);
655void si_vm_fini(struct radeon_device *rdev); 656void si_vm_fini(struct radeon_device *rdev);
656void si_vm_set_page(struct radeon_device *rdev,
657 struct radeon_ib *ib,
658 uint64_t pe,
659 uint64_t addr, unsigned count,
660 uint32_t incr, uint32_t flags);
661void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm); 657void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
662int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib); 658int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
663int si_copy_dma(struct radeon_device *rdev, 659int si_copy_dma(struct radeon_device *rdev,
664 uint64_t src_offset, uint64_t dst_offset, 660 uint64_t src_offset, uint64_t dst_offset,
665 unsigned num_gpu_pages, 661 unsigned num_gpu_pages,
666 struct radeon_fence **fence); 662 struct radeon_fence **fence);
663void si_dma_vm_set_page(struct radeon_device *rdev,
664 struct radeon_ib *ib,
665 uint64_t pe,
666 uint64_t addr, unsigned count,
667 uint32_t incr, uint32_t flags);
667void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm); 668void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
668u32 si_get_xclk(struct radeon_device *rdev); 669u32 si_get_xclk(struct radeon_device *rdev);
669uint64_t si_get_gpu_clock_counter(struct radeon_device *rdev); 670uint64_t si_get_gpu_clock_counter(struct radeon_device *rdev);
@@ -705,6 +706,10 @@ int cik_copy_dma(struct radeon_device *rdev,
705 uint64_t src_offset, uint64_t dst_offset, 706 uint64_t src_offset, uint64_t dst_offset,
706 unsigned num_gpu_pages, 707 unsigned num_gpu_pages,
707 struct radeon_fence **fence); 708 struct radeon_fence **fence);
709int cik_copy_cpdma(struct radeon_device *rdev,
710 uint64_t src_offset, uint64_t dst_offset,
711 unsigned num_gpu_pages,
712 struct radeon_fence **fence);
708int cik_sdma_ring_test(struct radeon_device *rdev, struct radeon_ring *ring); 713int cik_sdma_ring_test(struct radeon_device *rdev, struct radeon_ring *ring);
709int cik_sdma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring); 714int cik_sdma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
710bool cik_sdma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring); 715bool cik_sdma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
@@ -731,11 +736,11 @@ int cik_irq_process(struct radeon_device *rdev);
731int cik_vm_init(struct radeon_device *rdev); 736int cik_vm_init(struct radeon_device *rdev);
732void cik_vm_fini(struct radeon_device *rdev); 737void cik_vm_fini(struct radeon_device *rdev);
733void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm); 738void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
734void cik_vm_set_page(struct radeon_device *rdev, 739void cik_sdma_vm_set_page(struct radeon_device *rdev,
735 struct radeon_ib *ib, 740 struct radeon_ib *ib,
736 uint64_t pe, 741 uint64_t pe,
737 uint64_t addr, unsigned count, 742 uint64_t addr, unsigned count,
738 uint32_t incr, uint32_t flags); 743 uint32_t incr, uint32_t flags);
739void cik_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm); 744void cik_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
740int cik_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib); 745int cik_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
741u32 cik_compute_ring_get_rptr(struct radeon_device *rdev, 746u32 cik_compute_ring_get_rptr(struct radeon_device *rdev,
diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
index d96070bf8388..6153ec18943a 100644
--- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
@@ -59,6 +59,10 @@ struct atpx_mux {
59 u16 mux; 59 u16 mux;
60} __packed; 60} __packed;
61 61
62bool radeon_is_px(void) {
63 return radeon_atpx_priv.atpx_detected;
64}
65
62/** 66/**
63 * radeon_atpx_call - call an ATPX method 67 * radeon_atpx_call - call an ATPX method
64 * 68 *
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 64565732cb98..20a768ac89a8 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -31,6 +31,8 @@
31#include "radeon.h" 31#include "radeon.h"
32#include "atom.h" 32#include "atom.h"
33 33
34#include <linux/pm_runtime.h>
35
34extern void 36extern void
35radeon_combios_connected_scratch_regs(struct drm_connector *connector, 37radeon_combios_connected_scratch_regs(struct drm_connector *connector,
36 struct drm_encoder *encoder, 38 struct drm_encoder *encoder,
@@ -411,6 +413,21 @@ static int radeon_connector_set_property(struct drm_connector *connector, struct
411 } 413 }
412 } 414 }
413 415
416 if (property == rdev->mode_info.dither_property) {
417 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
418 /* need to find digital encoder on connector */
419 encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS);
420 if (!encoder)
421 return 0;
422
423 radeon_encoder = to_radeon_encoder(encoder);
424
425 if (radeon_connector->dither != val) {
426 radeon_connector->dither = val;
427 radeon_property_change_mode(&radeon_encoder->base);
428 }
429 }
430
414 if (property == rdev->mode_info.underscan_property) { 431 if (property == rdev->mode_info.underscan_property) {
415 /* need to find digital encoder on connector */ 432 /* need to find digital encoder on connector */
416 encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS); 433 encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS);
@@ -626,6 +643,11 @@ radeon_lvds_detect(struct drm_connector *connector, bool force)
626 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 643 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
627 struct drm_encoder *encoder = radeon_best_single_encoder(connector); 644 struct drm_encoder *encoder = radeon_best_single_encoder(connector);
628 enum drm_connector_status ret = connector_status_disconnected; 645 enum drm_connector_status ret = connector_status_disconnected;
646 int r;
647
648 r = pm_runtime_get_sync(connector->dev->dev);
649 if (r < 0)
650 return connector_status_disconnected;
629 651
630 if (encoder) { 652 if (encoder) {
631 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 653 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
@@ -651,6 +673,8 @@ radeon_lvds_detect(struct drm_connector *connector, bool force)
651 /* check acpi lid status ??? */ 673 /* check acpi lid status ??? */
652 674
653 radeon_connector_update_scratch_regs(connector, ret); 675 radeon_connector_update_scratch_regs(connector, ret);
676 pm_runtime_mark_last_busy(connector->dev->dev);
677 pm_runtime_put_autosuspend(connector->dev->dev);
654 return ret; 678 return ret;
655} 679}
656 680
@@ -750,6 +774,11 @@ radeon_vga_detect(struct drm_connector *connector, bool force)
750 struct drm_encoder_helper_funcs *encoder_funcs; 774 struct drm_encoder_helper_funcs *encoder_funcs;
751 bool dret = false; 775 bool dret = false;
752 enum drm_connector_status ret = connector_status_disconnected; 776 enum drm_connector_status ret = connector_status_disconnected;
777 int r;
778
779 r = pm_runtime_get_sync(connector->dev->dev);
780 if (r < 0)
781 return connector_status_disconnected;
753 782
754 encoder = radeon_best_single_encoder(connector); 783 encoder = radeon_best_single_encoder(connector);
755 if (!encoder) 784 if (!encoder)
@@ -790,9 +819,8 @@ radeon_vga_detect(struct drm_connector *connector, bool force)
790 * detected a monitor via load. 819 * detected a monitor via load.
791 */ 820 */
792 if (radeon_connector->detected_by_load) 821 if (radeon_connector->detected_by_load)
793 return connector->status; 822 ret = connector->status;
794 else 823 goto out;
795 return ret;
796 } 824 }
797 825
798 if (radeon_connector->dac_load_detect && encoder) { 826 if (radeon_connector->dac_load_detect && encoder) {
@@ -817,6 +845,11 @@ radeon_vga_detect(struct drm_connector *connector, bool force)
817 } 845 }
818 846
819 radeon_connector_update_scratch_regs(connector, ret); 847 radeon_connector_update_scratch_regs(connector, ret);
848
849out:
850 pm_runtime_mark_last_busy(connector->dev->dev);
851 pm_runtime_put_autosuspend(connector->dev->dev);
852
820 return ret; 853 return ret;
821} 854}
822 855
@@ -873,10 +906,15 @@ radeon_tv_detect(struct drm_connector *connector, bool force)
873 struct drm_encoder_helper_funcs *encoder_funcs; 906 struct drm_encoder_helper_funcs *encoder_funcs;
874 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 907 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
875 enum drm_connector_status ret = connector_status_disconnected; 908 enum drm_connector_status ret = connector_status_disconnected;
909 int r;
876 910
877 if (!radeon_connector->dac_load_detect) 911 if (!radeon_connector->dac_load_detect)
878 return ret; 912 return ret;
879 913
914 r = pm_runtime_get_sync(connector->dev->dev);
915 if (r < 0)
916 return connector_status_disconnected;
917
880 encoder = radeon_best_single_encoder(connector); 918 encoder = radeon_best_single_encoder(connector);
881 if (!encoder) 919 if (!encoder)
882 ret = connector_status_disconnected; 920 ret = connector_status_disconnected;
@@ -887,6 +925,8 @@ radeon_tv_detect(struct drm_connector *connector, bool force)
887 if (ret == connector_status_connected) 925 if (ret == connector_status_connected)
888 ret = radeon_connector_analog_encoder_conflict_solve(connector, encoder, ret, false); 926 ret = radeon_connector_analog_encoder_conflict_solve(connector, encoder, ret, false);
889 radeon_connector_update_scratch_regs(connector, ret); 927 radeon_connector_update_scratch_regs(connector, ret);
928 pm_runtime_mark_last_busy(connector->dev->dev);
929 pm_runtime_put_autosuspend(connector->dev->dev);
890 return ret; 930 return ret;
891} 931}
892 932
@@ -954,12 +994,18 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
954 struct drm_encoder *encoder = NULL; 994 struct drm_encoder *encoder = NULL;
955 struct drm_encoder_helper_funcs *encoder_funcs; 995 struct drm_encoder_helper_funcs *encoder_funcs;
956 struct drm_mode_object *obj; 996 struct drm_mode_object *obj;
957 int i; 997 int i, r;
958 enum drm_connector_status ret = connector_status_disconnected; 998 enum drm_connector_status ret = connector_status_disconnected;
959 bool dret = false, broken_edid = false; 999 bool dret = false, broken_edid = false;
960 1000
961 if (!force && radeon_check_hpd_status_unchanged(connector)) 1001 r = pm_runtime_get_sync(connector->dev->dev);
962 return connector->status; 1002 if (r < 0)
1003 return connector_status_disconnected;
1004
1005 if (!force && radeon_check_hpd_status_unchanged(connector)) {
1006 ret = connector->status;
1007 goto exit;
1008 }
963 1009
964 if (radeon_connector->ddc_bus) 1010 if (radeon_connector->ddc_bus)
965 dret = radeon_ddc_probe(radeon_connector, false); 1011 dret = radeon_ddc_probe(radeon_connector, false);
@@ -1110,6 +1156,11 @@ out:
1110 1156
1111 /* updated in get modes as well since we need to know if it's analog or digital */ 1157 /* updated in get modes as well since we need to know if it's analog or digital */
1112 radeon_connector_update_scratch_regs(connector, ret); 1158 radeon_connector_update_scratch_regs(connector, ret);
1159
1160exit:
1161 pm_runtime_mark_last_busy(connector->dev->dev);
1162 pm_runtime_put_autosuspend(connector->dev->dev);
1163
1113 return ret; 1164 return ret;
1114} 1165}
1115 1166
@@ -1377,9 +1428,16 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
1377 enum drm_connector_status ret = connector_status_disconnected; 1428 enum drm_connector_status ret = connector_status_disconnected;
1378 struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv; 1429 struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv;
1379 struct drm_encoder *encoder = radeon_best_single_encoder(connector); 1430 struct drm_encoder *encoder = radeon_best_single_encoder(connector);
1431 int r;
1380 1432
1381 if (!force && radeon_check_hpd_status_unchanged(connector)) 1433 r = pm_runtime_get_sync(connector->dev->dev);
1382 return connector->status; 1434 if (r < 0)
1435 return connector_status_disconnected;
1436
1437 if (!force && radeon_check_hpd_status_unchanged(connector)) {
1438 ret = connector->status;
1439 goto out;
1440 }
1383 1441
1384 if (radeon_connector->edid) { 1442 if (radeon_connector->edid) {
1385 kfree(radeon_connector->edid); 1443 kfree(radeon_connector->edid);
@@ -1443,6 +1501,10 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
1443 } 1501 }
1444 1502
1445 radeon_connector_update_scratch_regs(connector, ret); 1503 radeon_connector_update_scratch_regs(connector, ret);
1504out:
1505 pm_runtime_mark_last_busy(connector->dev->dev);
1506 pm_runtime_put_autosuspend(connector->dev->dev);
1507
1446 return ret; 1508 return ret;
1447} 1509}
1448 1510
@@ -1658,12 +1720,16 @@ radeon_add_atom_connector(struct drm_device *dev,
1658 drm_object_attach_property(&radeon_connector->base.base, 1720 drm_object_attach_property(&radeon_connector->base.base,
1659 rdev->mode_info.underscan_vborder_property, 1721 rdev->mode_info.underscan_vborder_property,
1660 0); 1722 0);
1723
1724 drm_object_attach_property(&radeon_connector->base.base,
1725 rdev->mode_info.dither_property,
1726 RADEON_FMT_DITHER_DISABLE);
1727
1661 if (radeon_audio != 0) 1728 if (radeon_audio != 0)
1662 drm_object_attach_property(&radeon_connector->base.base, 1729 drm_object_attach_property(&radeon_connector->base.base,
1663 rdev->mode_info.audio_property, 1730 rdev->mode_info.audio_property,
1664 (radeon_audio == 1) ? 1731 RADEON_AUDIO_AUTO);
1665 RADEON_AUDIO_AUTO : 1732
1666 RADEON_AUDIO_DISABLE);
1667 subpixel_order = SubPixelHorizontalRGB; 1733 subpixel_order = SubPixelHorizontalRGB;
1668 connector->interlace_allowed = true; 1734 connector->interlace_allowed = true;
1669 if (connector_type == DRM_MODE_CONNECTOR_HDMIB) 1735 if (connector_type == DRM_MODE_CONNECTOR_HDMIB)
@@ -1760,9 +1826,12 @@ radeon_add_atom_connector(struct drm_device *dev,
1760 if (ASIC_IS_DCE2(rdev) && (radeon_audio != 0)) { 1826 if (ASIC_IS_DCE2(rdev) && (radeon_audio != 0)) {
1761 drm_object_attach_property(&radeon_connector->base.base, 1827 drm_object_attach_property(&radeon_connector->base.base,
1762 rdev->mode_info.audio_property, 1828 rdev->mode_info.audio_property,
1763 (radeon_audio == 1) ? 1829 RADEON_AUDIO_AUTO);
1764 RADEON_AUDIO_AUTO : 1830 }
1765 RADEON_AUDIO_DISABLE); 1831 if (ASIC_IS_AVIVO(rdev)) {
1832 drm_object_attach_property(&radeon_connector->base.base,
1833 rdev->mode_info.dither_property,
1834 RADEON_FMT_DITHER_DISABLE);
1766 } 1835 }
1767 if (connector_type == DRM_MODE_CONNECTOR_DVII) { 1836 if (connector_type == DRM_MODE_CONNECTOR_DVII) {
1768 radeon_connector->dac_load_detect = true; 1837 radeon_connector->dac_load_detect = true;
@@ -1807,9 +1876,12 @@ radeon_add_atom_connector(struct drm_device *dev,
1807 if (ASIC_IS_DCE2(rdev) && (radeon_audio != 0)) { 1876 if (ASIC_IS_DCE2(rdev) && (radeon_audio != 0)) {
1808 drm_object_attach_property(&radeon_connector->base.base, 1877 drm_object_attach_property(&radeon_connector->base.base,
1809 rdev->mode_info.audio_property, 1878 rdev->mode_info.audio_property,
1810 (radeon_audio == 1) ? 1879 RADEON_AUDIO_AUTO);
1811 RADEON_AUDIO_AUTO : 1880 }
1812 RADEON_AUDIO_DISABLE); 1881 if (ASIC_IS_AVIVO(rdev)) {
1882 drm_object_attach_property(&radeon_connector->base.base,
1883 rdev->mode_info.dither_property,
1884 RADEON_FMT_DITHER_DISABLE);
1813 } 1885 }
1814 subpixel_order = SubPixelHorizontalRGB; 1886 subpixel_order = SubPixelHorizontalRGB;
1815 connector->interlace_allowed = true; 1887 connector->interlace_allowed = true;
@@ -1853,9 +1925,13 @@ radeon_add_atom_connector(struct drm_device *dev,
1853 if (ASIC_IS_DCE2(rdev) && (radeon_audio != 0)) { 1925 if (ASIC_IS_DCE2(rdev) && (radeon_audio != 0)) {
1854 drm_object_attach_property(&radeon_connector->base.base, 1926 drm_object_attach_property(&radeon_connector->base.base,
1855 rdev->mode_info.audio_property, 1927 rdev->mode_info.audio_property,
1856 (radeon_audio == 1) ? 1928 RADEON_AUDIO_AUTO);
1857 RADEON_AUDIO_AUTO : 1929 }
1858 RADEON_AUDIO_DISABLE); 1930 if (ASIC_IS_AVIVO(rdev)) {
1931 drm_object_attach_property(&radeon_connector->base.base,
1932 rdev->mode_info.dither_property,
1933 RADEON_FMT_DITHER_DISABLE);
1934
1859 } 1935 }
1860 connector->interlace_allowed = true; 1936 connector->interlace_allowed = true;
1861 /* in theory with a DP to VGA converter... */ 1937 /* in theory with a DP to VGA converter... */
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 80285e35bc65..26ca223d12d6 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -212,9 +212,7 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
212 return -EFAULT; 212 return -EFAULT;
213 } 213 }
214 p->chunks[i].length_dw = user_chunk.length_dw; 214 p->chunks[i].length_dw = user_chunk.length_dw;
215 p->chunks[i].kdata = NULL;
216 p->chunks[i].chunk_id = user_chunk.chunk_id; 215 p->chunks[i].chunk_id = user_chunk.chunk_id;
217 p->chunks[i].user_ptr = (void __user *)(unsigned long)user_chunk.chunk_data;
218 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_RELOCS) { 216 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_RELOCS) {
219 p->chunk_relocs_idx = i; 217 p->chunk_relocs_idx = i;
220 } 218 }
@@ -237,25 +235,31 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
237 return -EINVAL; 235 return -EINVAL;
238 } 236 }
239 237
240 cdata = (uint32_t *)(unsigned long)user_chunk.chunk_data; 238 size = p->chunks[i].length_dw;
241 if ((p->chunks[i].chunk_id == RADEON_CHUNK_ID_RELOCS) || 239 cdata = (void __user *)(unsigned long)user_chunk.chunk_data;
242 (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS)) { 240 p->chunks[i].user_ptr = cdata;
243 size = p->chunks[i].length_dw * sizeof(uint32_t); 241 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_CONST_IB)
244 p->chunks[i].kdata = kmalloc(size, GFP_KERNEL); 242 continue;
245 if (p->chunks[i].kdata == NULL) { 243
246 return -ENOMEM; 244 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_IB) {
247 } 245 if (!p->rdev || !(p->rdev->flags & RADEON_IS_AGP))
248 if (DRM_COPY_FROM_USER(p->chunks[i].kdata, 246 continue;
249 p->chunks[i].user_ptr, size)) { 247 }
250 return -EFAULT; 248
251 } 249 p->chunks[i].kdata = drm_malloc_ab(size, sizeof(uint32_t));
252 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) { 250 size *= sizeof(uint32_t);
253 p->cs_flags = p->chunks[i].kdata[0]; 251 if (p->chunks[i].kdata == NULL) {
254 if (p->chunks[i].length_dw > 1) 252 return -ENOMEM;
255 ring = p->chunks[i].kdata[1]; 253 }
256 if (p->chunks[i].length_dw > 2) 254 if (DRM_COPY_FROM_USER(p->chunks[i].kdata, cdata, size)) {
257 priority = (s32)p->chunks[i].kdata[2]; 255 return -EFAULT;
258 } 256 }
257 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) {
258 p->cs_flags = p->chunks[i].kdata[0];
259 if (p->chunks[i].length_dw > 1)
260 ring = p->chunks[i].kdata[1];
261 if (p->chunks[i].length_dw > 2)
262 priority = (s32)p->chunks[i].kdata[2];
259 } 263 }
260 } 264 }
261 265
@@ -278,34 +282,6 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
278 } 282 }
279 } 283 }
280 284
281 /* deal with non-vm */
282 if ((p->chunk_ib_idx != -1) &&
283 ((p->cs_flags & RADEON_CS_USE_VM) == 0) &&
284 (p->chunks[p->chunk_ib_idx].chunk_id == RADEON_CHUNK_ID_IB)) {
285 if (p->chunks[p->chunk_ib_idx].length_dw > (16 * 1024)) {
286 DRM_ERROR("cs IB too big: %d\n",
287 p->chunks[p->chunk_ib_idx].length_dw);
288 return -EINVAL;
289 }
290 if (p->rdev && (p->rdev->flags & RADEON_IS_AGP)) {
291 p->chunks[p->chunk_ib_idx].kpage[0] = kmalloc(PAGE_SIZE, GFP_KERNEL);
292 p->chunks[p->chunk_ib_idx].kpage[1] = kmalloc(PAGE_SIZE, GFP_KERNEL);
293 if (p->chunks[p->chunk_ib_idx].kpage[0] == NULL ||
294 p->chunks[p->chunk_ib_idx].kpage[1] == NULL) {
295 kfree(p->chunks[p->chunk_ib_idx].kpage[0]);
296 kfree(p->chunks[p->chunk_ib_idx].kpage[1]);
297 p->chunks[p->chunk_ib_idx].kpage[0] = NULL;
298 p->chunks[p->chunk_ib_idx].kpage[1] = NULL;
299 return -ENOMEM;
300 }
301 }
302 p->chunks[p->chunk_ib_idx].kpage_idx[0] = -1;
303 p->chunks[p->chunk_ib_idx].kpage_idx[1] = -1;
304 p->chunks[p->chunk_ib_idx].last_copied_page = -1;
305 p->chunks[p->chunk_ib_idx].last_page_index =
306 ((p->chunks[p->chunk_ib_idx].length_dw * 4) - 1) / PAGE_SIZE;
307 }
308
309 return 0; 285 return 0;
310} 286}
311 287
@@ -339,13 +315,8 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error, bo
339 kfree(parser->track); 315 kfree(parser->track);
340 kfree(parser->relocs); 316 kfree(parser->relocs);
341 kfree(parser->relocs_ptr); 317 kfree(parser->relocs_ptr);
342 for (i = 0; i < parser->nchunks; i++) { 318 for (i = 0; i < parser->nchunks; i++)
343 kfree(parser->chunks[i].kdata); 319 drm_free_large(parser->chunks[i].kdata);
344 if ((parser->rdev->flags & RADEON_IS_AGP)) {
345 kfree(parser->chunks[i].kpage[0]);
346 kfree(parser->chunks[i].kpage[1]);
347 }
348 }
349 kfree(parser->chunks); 320 kfree(parser->chunks);
350 kfree(parser->chunks_array); 321 kfree(parser->chunks_array);
351 radeon_ib_free(parser->rdev, &parser->ib); 322 radeon_ib_free(parser->rdev, &parser->ib);
@@ -355,7 +326,6 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error, bo
355static int radeon_cs_ib_chunk(struct radeon_device *rdev, 326static int radeon_cs_ib_chunk(struct radeon_device *rdev,
356 struct radeon_cs_parser *parser) 327 struct radeon_cs_parser *parser)
357{ 328{
358 struct radeon_cs_chunk *ib_chunk;
359 int r; 329 int r;
360 330
361 if (parser->chunk_ib_idx == -1) 331 if (parser->chunk_ib_idx == -1)
@@ -364,28 +334,11 @@ static int radeon_cs_ib_chunk(struct radeon_device *rdev,
364 if (parser->cs_flags & RADEON_CS_USE_VM) 334 if (parser->cs_flags & RADEON_CS_USE_VM)
365 return 0; 335 return 0;
366 336
367 ib_chunk = &parser->chunks[parser->chunk_ib_idx];
368 /* Copy the packet into the IB, the parser will read from the
369 * input memory (cached) and write to the IB (which can be
370 * uncached).
371 */
372 r = radeon_ib_get(rdev, parser->ring, &parser->ib,
373 NULL, ib_chunk->length_dw * 4);
374 if (r) {
375 DRM_ERROR("Failed to get ib !\n");
376 return r;
377 }
378 parser->ib.length_dw = ib_chunk->length_dw;
379 r = radeon_cs_parse(rdev, parser->ring, parser); 337 r = radeon_cs_parse(rdev, parser->ring, parser);
380 if (r || parser->parser_error) { 338 if (r || parser->parser_error) {
381 DRM_ERROR("Invalid command stream !\n"); 339 DRM_ERROR("Invalid command stream !\n");
382 return r; 340 return r;
383 } 341 }
384 r = radeon_cs_finish_pages(parser);
385 if (r) {
386 DRM_ERROR("Invalid command stream !\n");
387 return r;
388 }
389 342
390 if (parser->ring == R600_RING_TYPE_UVD_INDEX) 343 if (parser->ring == R600_RING_TYPE_UVD_INDEX)
391 radeon_uvd_note_usage(rdev); 344 radeon_uvd_note_usage(rdev);
@@ -423,7 +376,6 @@ static int radeon_bo_vm_update_pte(struct radeon_cs_parser *parser,
423static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev, 376static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
424 struct radeon_cs_parser *parser) 377 struct radeon_cs_parser *parser)
425{ 378{
426 struct radeon_cs_chunk *ib_chunk;
427 struct radeon_fpriv *fpriv = parser->filp->driver_priv; 379 struct radeon_fpriv *fpriv = parser->filp->driver_priv;
428 struct radeon_vm *vm = &fpriv->vm; 380 struct radeon_vm *vm = &fpriv->vm;
429 int r; 381 int r;
@@ -433,49 +385,13 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
433 if ((parser->cs_flags & RADEON_CS_USE_VM) == 0) 385 if ((parser->cs_flags & RADEON_CS_USE_VM) == 0)
434 return 0; 386 return 0;
435 387
436 if ((rdev->family >= CHIP_TAHITI) && 388 if (parser->const_ib.length_dw) {
437 (parser->chunk_const_ib_idx != -1)) {
438 ib_chunk = &parser->chunks[parser->chunk_const_ib_idx];
439 if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
440 DRM_ERROR("cs IB CONST too big: %d\n", ib_chunk->length_dw);
441 return -EINVAL;
442 }
443 r = radeon_ib_get(rdev, parser->ring, &parser->const_ib,
444 vm, ib_chunk->length_dw * 4);
445 if (r) {
446 DRM_ERROR("Failed to get const ib !\n");
447 return r;
448 }
449 parser->const_ib.is_const_ib = true;
450 parser->const_ib.length_dw = ib_chunk->length_dw;
451 /* Copy the packet into the IB */
452 if (DRM_COPY_FROM_USER(parser->const_ib.ptr, ib_chunk->user_ptr,
453 ib_chunk->length_dw * 4)) {
454 return -EFAULT;
455 }
456 r = radeon_ring_ib_parse(rdev, parser->ring, &parser->const_ib); 389 r = radeon_ring_ib_parse(rdev, parser->ring, &parser->const_ib);
457 if (r) { 390 if (r) {
458 return r; 391 return r;
459 } 392 }
460 } 393 }
461 394
462 ib_chunk = &parser->chunks[parser->chunk_ib_idx];
463 if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
464 DRM_ERROR("cs IB too big: %d\n", ib_chunk->length_dw);
465 return -EINVAL;
466 }
467 r = radeon_ib_get(rdev, parser->ring, &parser->ib,
468 vm, ib_chunk->length_dw * 4);
469 if (r) {
470 DRM_ERROR("Failed to get ib !\n");
471 return r;
472 }
473 parser->ib.length_dw = ib_chunk->length_dw;
474 /* Copy the packet into the IB */
475 if (DRM_COPY_FROM_USER(parser->ib.ptr, ib_chunk->user_ptr,
476 ib_chunk->length_dw * 4)) {
477 return -EFAULT;
478 }
479 r = radeon_ring_ib_parse(rdev, parser->ring, &parser->ib); 395 r = radeon_ring_ib_parse(rdev, parser->ring, &parser->ib);
480 if (r) { 396 if (r) {
481 return r; 397 return r;
@@ -527,6 +443,62 @@ static int radeon_cs_handle_lockup(struct radeon_device *rdev, int r)
527 return r; 443 return r;
528} 444}
529 445
446static int radeon_cs_ib_fill(struct radeon_device *rdev, struct radeon_cs_parser *parser)
447{
448 struct radeon_cs_chunk *ib_chunk;
449 struct radeon_vm *vm = NULL;
450 int r;
451
452 if (parser->chunk_ib_idx == -1)
453 return 0;
454
455 if (parser->cs_flags & RADEON_CS_USE_VM) {
456 struct radeon_fpriv *fpriv = parser->filp->driver_priv;
457 vm = &fpriv->vm;
458
459 if ((rdev->family >= CHIP_TAHITI) &&
460 (parser->chunk_const_ib_idx != -1)) {
461 ib_chunk = &parser->chunks[parser->chunk_const_ib_idx];
462 if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
463 DRM_ERROR("cs IB CONST too big: %d\n", ib_chunk->length_dw);
464 return -EINVAL;
465 }
466 r = radeon_ib_get(rdev, parser->ring, &parser->const_ib,
467 vm, ib_chunk->length_dw * 4);
468 if (r) {
469 DRM_ERROR("Failed to get const ib !\n");
470 return r;
471 }
472 parser->const_ib.is_const_ib = true;
473 parser->const_ib.length_dw = ib_chunk->length_dw;
474 if (DRM_COPY_FROM_USER(parser->const_ib.ptr,
475 ib_chunk->user_ptr,
476 ib_chunk->length_dw * 4))
477 return -EFAULT;
478 }
479
480 ib_chunk = &parser->chunks[parser->chunk_ib_idx];
481 if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
482 DRM_ERROR("cs IB too big: %d\n", ib_chunk->length_dw);
483 return -EINVAL;
484 }
485 }
486 ib_chunk = &parser->chunks[parser->chunk_ib_idx];
487
488 r = radeon_ib_get(rdev, parser->ring, &parser->ib,
489 vm, ib_chunk->length_dw * 4);
490 if (r) {
491 DRM_ERROR("Failed to get ib !\n");
492 return r;
493 }
494 parser->ib.length_dw = ib_chunk->length_dw;
495 if (ib_chunk->kdata)
496 memcpy(parser->ib.ptr, ib_chunk->kdata, ib_chunk->length_dw * 4);
497 else if (DRM_COPY_FROM_USER(parser->ib.ptr, ib_chunk->user_ptr, ib_chunk->length_dw * 4))
498 return -EFAULT;
499 return 0;
500}
501
530int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) 502int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
531{ 503{
532 struct radeon_device *rdev = dev->dev_private; 504 struct radeon_device *rdev = dev->dev_private;
@@ -552,10 +524,15 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
552 r = radeon_cs_handle_lockup(rdev, r); 524 r = radeon_cs_handle_lockup(rdev, r);
553 return r; 525 return r;
554 } 526 }
555 r = radeon_cs_parser_relocs(&parser); 527
556 if (r) { 528 r = radeon_cs_ib_fill(rdev, &parser);
557 if (r != -ERESTARTSYS) 529 if (!r) {
530 r = radeon_cs_parser_relocs(&parser);
531 if (r && r != -ERESTARTSYS)
558 DRM_ERROR("Failed to parse relocation %d!\n", r); 532 DRM_ERROR("Failed to parse relocation %d!\n", r);
533 }
534
535 if (r) {
559 radeon_cs_parser_fini(&parser, r, false); 536 radeon_cs_parser_fini(&parser, r, false);
560 up_read(&rdev->exclusive_lock); 537 up_read(&rdev->exclusive_lock);
561 r = radeon_cs_handle_lockup(rdev, r); 538 r = radeon_cs_handle_lockup(rdev, r);
@@ -579,97 +556,6 @@ out:
579 return r; 556 return r;
580} 557}
581 558
582int radeon_cs_finish_pages(struct radeon_cs_parser *p)
583{
584 struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
585 int i;
586 int size = PAGE_SIZE;
587
588 for (i = ibc->last_copied_page + 1; i <= ibc->last_page_index; i++) {
589 if (i == ibc->last_page_index) {
590 size = (ibc->length_dw * 4) % PAGE_SIZE;
591 if (size == 0)
592 size = PAGE_SIZE;
593 }
594
595 if (DRM_COPY_FROM_USER(p->ib.ptr + (i * (PAGE_SIZE/4)),
596 ibc->user_ptr + (i * PAGE_SIZE),
597 size))
598 return -EFAULT;
599 }
600 return 0;
601}
602
603static int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx)
604{
605 int new_page;
606 struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
607 int i;
608 int size = PAGE_SIZE;
609 bool copy1 = (p->rdev && (p->rdev->flags & RADEON_IS_AGP)) ?
610 false : true;
611
612 for (i = ibc->last_copied_page + 1; i < pg_idx; i++) {
613 if (DRM_COPY_FROM_USER(p->ib.ptr + (i * (PAGE_SIZE/4)),
614 ibc->user_ptr + (i * PAGE_SIZE),
615 PAGE_SIZE)) {
616 p->parser_error = -EFAULT;
617 return 0;
618 }
619 }
620
621 if (pg_idx == ibc->last_page_index) {
622 size = (ibc->length_dw * 4) % PAGE_SIZE;
623 if (size == 0)
624 size = PAGE_SIZE;
625 }
626
627 new_page = ibc->kpage_idx[0] < ibc->kpage_idx[1] ? 0 : 1;
628 if (copy1)
629 ibc->kpage[new_page] = p->ib.ptr + (pg_idx * (PAGE_SIZE / 4));
630
631 if (DRM_COPY_FROM_USER(ibc->kpage[new_page],
632 ibc->user_ptr + (pg_idx * PAGE_SIZE),
633 size)) {
634 p->parser_error = -EFAULT;
635 return 0;
636 }
637
638 /* copy to IB for non single case */
639 if (!copy1)
640 memcpy((void *)(p->ib.ptr+(pg_idx*(PAGE_SIZE/4))), ibc->kpage[new_page], size);
641
642 ibc->last_copied_page = pg_idx;
643 ibc->kpage_idx[new_page] = pg_idx;
644
645 return new_page;
646}
647
648u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
649{
650 struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
651 u32 pg_idx, pg_offset;
652 u32 idx_value = 0;
653 int new_page;
654
655 pg_idx = (idx * 4) / PAGE_SIZE;
656 pg_offset = (idx * 4) % PAGE_SIZE;
657
658 if (ibc->kpage_idx[0] == pg_idx)
659 return ibc->kpage[0][pg_offset/4];
660 if (ibc->kpage_idx[1] == pg_idx)
661 return ibc->kpage[1][pg_offset/4];
662
663 new_page = radeon_cs_update_pages(p, pg_idx);
664 if (new_page < 0) {
665 p->parser_error = new_page;
666 return 0;
667 }
668
669 idx_value = ibc->kpage[new_page][pg_offset/4];
670 return idx_value;
671}
672
673/** 559/**
674 * radeon_cs_packet_parse() - parse cp packet and point ib index to next packet 560 * radeon_cs_packet_parse() - parse cp packet and point ib index to next packet
675 * @parser: parser structure holding parsing context. 561 * @parser: parser structure holding parsing context.
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 841d0e09be3e..b9234c43f43d 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -98,9 +98,16 @@ static const char radeon_family_name[][16] = {
98 "BONAIRE", 98 "BONAIRE",
99 "KAVERI", 99 "KAVERI",
100 "KABINI", 100 "KABINI",
101 "HAWAII",
101 "LAST", 102 "LAST",
102}; 103};
103 104
105#if defined(CONFIG_VGA_SWITCHEROO)
106bool radeon_is_px(void);
107#else
108static inline bool radeon_is_px(void) { return false; }
109#endif
110
104/** 111/**
105 * radeon_program_register_sequence - program an array of registers. 112 * radeon_program_register_sequence - program an array of registers.
106 * 113 *
@@ -1076,7 +1083,10 @@ static bool radeon_switcheroo_quirk_long_wakeup(struct pci_dev *pdev)
1076static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state) 1083static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
1077{ 1084{
1078 struct drm_device *dev = pci_get_drvdata(pdev); 1085 struct drm_device *dev = pci_get_drvdata(pdev);
1079 pm_message_t pmm = { .event = PM_EVENT_SUSPEND }; 1086
1087 if (radeon_is_px() && state == VGA_SWITCHEROO_OFF)
1088 return;
1089
1080 if (state == VGA_SWITCHEROO_ON) { 1090 if (state == VGA_SWITCHEROO_ON) {
1081 unsigned d3_delay = dev->pdev->d3_delay; 1091 unsigned d3_delay = dev->pdev->d3_delay;
1082 1092
@@ -1087,7 +1097,7 @@ static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switchero
1087 if (d3_delay < 20 && radeon_switcheroo_quirk_long_wakeup(pdev)) 1097 if (d3_delay < 20 && radeon_switcheroo_quirk_long_wakeup(pdev))
1088 dev->pdev->d3_delay = 20; 1098 dev->pdev->d3_delay = 20;
1089 1099
1090 radeon_resume_kms(dev); 1100 radeon_resume_kms(dev, true, true);
1091 1101
1092 dev->pdev->d3_delay = d3_delay; 1102 dev->pdev->d3_delay = d3_delay;
1093 1103
@@ -1097,7 +1107,7 @@ static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switchero
1097 printk(KERN_INFO "radeon: switched off\n"); 1107 printk(KERN_INFO "radeon: switched off\n");
1098 drm_kms_helper_poll_disable(dev); 1108 drm_kms_helper_poll_disable(dev);
1099 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; 1109 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1100 radeon_suspend_kms(dev, pmm); 1110 radeon_suspend_kms(dev, true, true);
1101 dev->switch_power_state = DRM_SWITCH_POWER_OFF; 1111 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1102 } 1112 }
1103} 1113}
@@ -1147,6 +1157,7 @@ int radeon_device_init(struct radeon_device *rdev,
1147{ 1157{
1148 int r, i; 1158 int r, i;
1149 int dma_bits; 1159 int dma_bits;
1160 bool runtime = false;
1150 1161
1151 rdev->shutdown = false; 1162 rdev->shutdown = false;
1152 rdev->dev = &pdev->dev; 1163 rdev->dev = &pdev->dev;
@@ -1293,7 +1304,14 @@ int radeon_device_init(struct radeon_device *rdev,
1293 /* this will fail for cards that aren't VGA class devices, just 1304 /* this will fail for cards that aren't VGA class devices, just
1294 * ignore it */ 1305 * ignore it */
1295 vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode); 1306 vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
1296 vga_switcheroo_register_client(rdev->pdev, &radeon_switcheroo_ops, false); 1307
1308 if (radeon_runtime_pm == 1)
1309 runtime = true;
1310 if ((radeon_runtime_pm == -1) && radeon_is_px())
1311 runtime = true;
1312 vga_switcheroo_register_client(rdev->pdev, &radeon_switcheroo_ops, runtime);
1313 if (runtime)
1314 vga_switcheroo_init_domain_pm_ops(rdev->dev, &rdev->vga_pm_domain);
1297 1315
1298 r = radeon_init(rdev); 1316 r = radeon_init(rdev);
1299 if (r) 1317 if (r)
@@ -1383,7 +1401,7 @@ void radeon_device_fini(struct radeon_device *rdev)
1383 * Returns 0 for success or an error on failure. 1401 * Returns 0 for success or an error on failure.
1384 * Called at driver suspend. 1402 * Called at driver suspend.
1385 */ 1403 */
1386int radeon_suspend_kms(struct drm_device *dev, pm_message_t state) 1404int radeon_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon)
1387{ 1405{
1388 struct radeon_device *rdev; 1406 struct radeon_device *rdev;
1389 struct drm_crtc *crtc; 1407 struct drm_crtc *crtc;
@@ -1394,9 +1412,7 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
1394 if (dev == NULL || dev->dev_private == NULL) { 1412 if (dev == NULL || dev->dev_private == NULL) {
1395 return -ENODEV; 1413 return -ENODEV;
1396 } 1414 }
1397 if (state.event == PM_EVENT_PRETHAW) { 1415
1398 return 0;
1399 }
1400 rdev = dev->dev_private; 1416 rdev = dev->dev_private;
1401 1417
1402 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) 1418 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
@@ -1455,14 +1471,17 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
1455 radeon_agp_suspend(rdev); 1471 radeon_agp_suspend(rdev);
1456 1472
1457 pci_save_state(dev->pdev); 1473 pci_save_state(dev->pdev);
1458 if (state.event == PM_EVENT_SUSPEND) { 1474 if (suspend) {
1459 /* Shut down the device */ 1475 /* Shut down the device */
1460 pci_disable_device(dev->pdev); 1476 pci_disable_device(dev->pdev);
1461 pci_set_power_state(dev->pdev, PCI_D3hot); 1477 pci_set_power_state(dev->pdev, PCI_D3hot);
1462 } 1478 }
1463 console_lock(); 1479
1464 radeon_fbdev_set_suspend(rdev, 1); 1480 if (fbcon) {
1465 console_unlock(); 1481 console_lock();
1482 radeon_fbdev_set_suspend(rdev, 1);
1483 console_unlock();
1484 }
1466 return 0; 1485 return 0;
1467} 1486}
1468 1487
@@ -1475,7 +1494,7 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
1475 * Returns 0 for success or an error on failure. 1494 * Returns 0 for success or an error on failure.
1476 * Called at driver resume. 1495 * Called at driver resume.
1477 */ 1496 */
1478int radeon_resume_kms(struct drm_device *dev) 1497int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
1479{ 1498{
1480 struct drm_connector *connector; 1499 struct drm_connector *connector;
1481 struct radeon_device *rdev = dev->dev_private; 1500 struct radeon_device *rdev = dev->dev_private;
@@ -1484,12 +1503,17 @@ int radeon_resume_kms(struct drm_device *dev)
1484 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) 1503 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
1485 return 0; 1504 return 0;
1486 1505
1487 console_lock(); 1506 if (fbcon) {
1488 pci_set_power_state(dev->pdev, PCI_D0); 1507 console_lock();
1489 pci_restore_state(dev->pdev); 1508 }
1490 if (pci_enable_device(dev->pdev)) { 1509 if (resume) {
1491 console_unlock(); 1510 pci_set_power_state(dev->pdev, PCI_D0);
1492 return -1; 1511 pci_restore_state(dev->pdev);
1512 if (pci_enable_device(dev->pdev)) {
1513 if (fbcon)
1514 console_unlock();
1515 return -1;
1516 }
1493 } 1517 }
1494 /* resume AGP if in use */ 1518 /* resume AGP if in use */
1495 radeon_agp_resume(rdev); 1519 radeon_agp_resume(rdev);
@@ -1502,9 +1526,11 @@ int radeon_resume_kms(struct drm_device *dev)
1502 radeon_pm_resume(rdev); 1526 radeon_pm_resume(rdev);
1503 radeon_restore_bios_scratch_regs(rdev); 1527 radeon_restore_bios_scratch_regs(rdev);
1504 1528
1505 radeon_fbdev_set_suspend(rdev, 0); 1529 if (fbcon) {
1506 console_unlock(); 1530 radeon_fbdev_set_suspend(rdev, 0);
1507 1531 console_unlock();
1532 }
1533
1508 /* init dig PHYs, disp eng pll */ 1534 /* init dig PHYs, disp eng pll */
1509 if (rdev->is_atom_bios) { 1535 if (rdev->is_atom_bios) {
1510 radeon_atom_encoder_init(rdev); 1536 radeon_atom_encoder_init(rdev);
@@ -1549,6 +1575,14 @@ int radeon_gpu_reset(struct radeon_device *rdev)
1549 int resched; 1575 int resched;
1550 1576
1551 down_write(&rdev->exclusive_lock); 1577 down_write(&rdev->exclusive_lock);
1578
1579 if (!rdev->needs_reset) {
1580 up_write(&rdev->exclusive_lock);
1581 return 0;
1582 }
1583
1584 rdev->needs_reset = false;
1585
1552 radeon_save_bios_scratch_regs(rdev); 1586 radeon_save_bios_scratch_regs(rdev);
1553 /* block TTM */ 1587 /* block TTM */
1554 resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev); 1588 resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 0d1aa050d41d..7b253815a323 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -30,6 +30,7 @@
30#include "atom.h" 30#include "atom.h"
31#include <asm/div64.h> 31#include <asm/div64.h>
32 32
33#include <linux/pm_runtime.h>
33#include <drm/drm_crtc_helper.h> 34#include <drm/drm_crtc_helper.h>
34#include <drm/drm_edid.h> 35#include <drm/drm_edid.h>
35 36
@@ -306,7 +307,7 @@ void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id)
306 */ 307 */
307 if (update_pending && 308 if (update_pending &&
308 (DRM_SCANOUTPOS_VALID & radeon_get_crtc_scanoutpos(rdev->ddev, crtc_id, 309 (DRM_SCANOUTPOS_VALID & radeon_get_crtc_scanoutpos(rdev->ddev, crtc_id,
309 &vpos, &hpos)) && 310 &vpos, &hpos, NULL, NULL)) &&
310 ((vpos >= (99 * rdev->mode_info.crtcs[crtc_id]->base.hwmode.crtc_vdisplay)/100) || 311 ((vpos >= (99 * rdev->mode_info.crtcs[crtc_id]->base.hwmode.crtc_vdisplay)/100) ||
311 (vpos < 0 && !ASIC_IS_AVIVO(rdev)))) { 312 (vpos < 0 && !ASIC_IS_AVIVO(rdev)))) {
312 /* crtc didn't flip in this target vblank interval, 313 /* crtc didn't flip in this target vblank interval,
@@ -494,11 +495,55 @@ unlock_free:
494 return r; 495 return r;
495} 496}
496 497
498static int
499radeon_crtc_set_config(struct drm_mode_set *set)
500{
501 struct drm_device *dev;
502 struct radeon_device *rdev;
503 struct drm_crtc *crtc;
504 bool active = false;
505 int ret;
506
507 if (!set || !set->crtc)
508 return -EINVAL;
509
510 dev = set->crtc->dev;
511
512 ret = pm_runtime_get_sync(dev->dev);
513 if (ret < 0)
514 return ret;
515
516 ret = drm_crtc_helper_set_config(set);
517
518 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
519 if (crtc->enabled)
520 active = true;
521
522 pm_runtime_mark_last_busy(dev->dev);
523
524 rdev = dev->dev_private;
525 /* if we have active crtcs and we don't have a power ref,
526 take the current one */
527 if (active && !rdev->have_disp_power_ref) {
528 rdev->have_disp_power_ref = true;
529 return ret;
530 }
531 /* if we have no active crtcs, then drop the power ref
532 we got before */
533 if (!active && rdev->have_disp_power_ref) {
534 pm_runtime_put_autosuspend(dev->dev);
535 rdev->have_disp_power_ref = false;
536 }
537
538 /* drop the power reference we got coming in here */
539 pm_runtime_put_autosuspend(dev->dev);
540 return ret;
541}
497static const struct drm_crtc_funcs radeon_crtc_funcs = { 542static const struct drm_crtc_funcs radeon_crtc_funcs = {
498 .cursor_set = radeon_crtc_cursor_set, 543 .cursor_set = radeon_crtc_cursor_set,
499 .cursor_move = radeon_crtc_cursor_move, 544 .cursor_move = radeon_crtc_cursor_move,
500 .gamma_set = radeon_crtc_gamma_set, 545 .gamma_set = radeon_crtc_gamma_set,
501 .set_config = drm_crtc_helper_set_config, 546 .set_config = radeon_crtc_set_config,
502 .destroy = radeon_crtc_destroy, 547 .destroy = radeon_crtc_destroy,
503 .page_flip = radeon_crtc_page_flip, 548 .page_flip = radeon_crtc_page_flip,
504}; 549};
@@ -1178,6 +1223,12 @@ static struct drm_prop_enum_list radeon_audio_enum_list[] =
1178 { RADEON_AUDIO_AUTO, "auto" }, 1223 { RADEON_AUDIO_AUTO, "auto" },
1179}; 1224};
1180 1225
1226/* XXX support different dither options? spatial, temporal, both, etc. */
1227static struct drm_prop_enum_list radeon_dither_enum_list[] =
1228{ { RADEON_FMT_DITHER_DISABLE, "off" },
1229 { RADEON_FMT_DITHER_ENABLE, "on" },
1230};
1231
1181static int radeon_modeset_create_props(struct radeon_device *rdev) 1232static int radeon_modeset_create_props(struct radeon_device *rdev)
1182{ 1233{
1183 int sz; 1234 int sz;
@@ -1234,6 +1285,12 @@ static int radeon_modeset_create_props(struct radeon_device *rdev)
1234 "audio", 1285 "audio",
1235 radeon_audio_enum_list, sz); 1286 radeon_audio_enum_list, sz);
1236 1287
1288 sz = ARRAY_SIZE(radeon_dither_enum_list);
1289 rdev->mode_info.dither_property =
1290 drm_property_create_enum(rdev->ddev, 0,
1291 "dither",
1292 radeon_dither_enum_list, sz);
1293
1237 return 0; 1294 return 0;
1238} 1295}
1239 1296
@@ -1539,12 +1596,17 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
1539} 1596}
1540 1597
1541/* 1598/*
1542 * Retrieve current video scanout position of crtc on a given gpu. 1599 * Retrieve current video scanout position of crtc on a given gpu, and
1600 * an optional accurate timestamp of when query happened.
1543 * 1601 *
1544 * \param dev Device to query. 1602 * \param dev Device to query.
1545 * \param crtc Crtc to query. 1603 * \param crtc Crtc to query.
1546 * \param *vpos Location where vertical scanout position should be stored. 1604 * \param *vpos Location where vertical scanout position should be stored.
1547 * \param *hpos Location where horizontal scanout position should go. 1605 * \param *hpos Location where horizontal scanout position should go.
1606 * \param *stime Target location for timestamp taken immediately before
1607 * scanout position query. Can be NULL to skip timestamp.
1608 * \param *etime Target location for timestamp taken immediately after
1609 * scanout position query. Can be NULL to skip timestamp.
1548 * 1610 *
1549 * Returns vpos as a positive number while in active scanout area. 1611 * Returns vpos as a positive number while in active scanout area.
1550 * Returns vpos as a negative number inside vblank, counting the number 1612 * Returns vpos as a negative number inside vblank, counting the number
@@ -1560,7 +1622,8 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
1560 * unknown small number of scanlines wrt. real scanout position. 1622 * unknown small number of scanlines wrt. real scanout position.
1561 * 1623 *
1562 */ 1624 */
1563int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc, int *vpos, int *hpos) 1625int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc, int *vpos, int *hpos,
1626 ktime_t *stime, ktime_t *etime)
1564{ 1627{
1565 u32 stat_crtc = 0, vbl = 0, position = 0; 1628 u32 stat_crtc = 0, vbl = 0, position = 0;
1566 int vbl_start, vbl_end, vtotal, ret = 0; 1629 int vbl_start, vbl_end, vtotal, ret = 0;
@@ -1568,6 +1631,12 @@ int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc, int *vpos, int
1568 1631
1569 struct radeon_device *rdev = dev->dev_private; 1632 struct radeon_device *rdev = dev->dev_private;
1570 1633
1634 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
1635
1636 /* Get optional system timestamp before query. */
1637 if (stime)
1638 *stime = ktime_get();
1639
1571 if (ASIC_IS_DCE4(rdev)) { 1640 if (ASIC_IS_DCE4(rdev)) {
1572 if (crtc == 0) { 1641 if (crtc == 0) {
1573 vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + 1642 vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
@@ -1650,6 +1719,12 @@ int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc, int *vpos, int
1650 } 1719 }
1651 } 1720 }
1652 1721
1722 /* Get optional system timestamp after query. */
1723 if (etime)
1724 *etime = ktime_get();
1725
1726 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
1727
1653 /* Decode into vertical and horizontal scanout position. */ 1728 /* Decode into vertical and horizontal scanout position. */
1654 *vpos = position & 0x1fff; 1729 *vpos = position & 0x1fff;
1655 *hpos = (position >> 16) & 0x1fff; 1730 *hpos = (position >> 16) & 0x1fff;
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index b01f231c2f19..1aee32213f66 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -36,8 +36,9 @@
36#include <drm/drm_pciids.h> 36#include <drm/drm_pciids.h>
37#include <linux/console.h> 37#include <linux/console.h>
38#include <linux/module.h> 38#include <linux/module.h>
39 39#include <linux/pm_runtime.h>
40 40#include <linux/vga_switcheroo.h>
41#include "drm_crtc_helper.h"
41/* 42/*
42 * KMS wrapper. 43 * KMS wrapper.
43 * - 2.0.0 - initial interface 44 * - 2.0.0 - initial interface
@@ -87,8 +88,8 @@ void radeon_driver_postclose_kms(struct drm_device *dev,
87 struct drm_file *file_priv); 88 struct drm_file *file_priv);
88void radeon_driver_preclose_kms(struct drm_device *dev, 89void radeon_driver_preclose_kms(struct drm_device *dev,
89 struct drm_file *file_priv); 90 struct drm_file *file_priv);
90int radeon_suspend_kms(struct drm_device *dev, pm_message_t state); 91int radeon_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon);
91int radeon_resume_kms(struct drm_device *dev); 92int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon);
92u32 radeon_get_vblank_counter_kms(struct drm_device *dev, int crtc); 93u32 radeon_get_vblank_counter_kms(struct drm_device *dev, int crtc);
93int radeon_enable_vblank_kms(struct drm_device *dev, int crtc); 94int radeon_enable_vblank_kms(struct drm_device *dev, int crtc);
94void radeon_disable_vblank_kms(struct drm_device *dev, int crtc); 95void radeon_disable_vblank_kms(struct drm_device *dev, int crtc);
@@ -106,7 +107,8 @@ int radeon_gem_object_open(struct drm_gem_object *obj,
106void radeon_gem_object_close(struct drm_gem_object *obj, 107void radeon_gem_object_close(struct drm_gem_object *obj,
107 struct drm_file *file_priv); 108 struct drm_file *file_priv);
108extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc, 109extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc,
109 int *vpos, int *hpos); 110 int *vpos, int *hpos, ktime_t *stime,
111 ktime_t *etime);
110extern const struct drm_ioctl_desc radeon_ioctls_kms[]; 112extern const struct drm_ioctl_desc radeon_ioctls_kms[];
111extern int radeon_max_kms_ioctl; 113extern int radeon_max_kms_ioctl;
112int radeon_mmap(struct file *filp, struct vm_area_struct *vma); 114int radeon_mmap(struct file *filp, struct vm_area_struct *vma);
@@ -136,9 +138,11 @@ void radeon_debugfs_cleanup(struct drm_minor *minor);
136#if defined(CONFIG_VGA_SWITCHEROO) 138#if defined(CONFIG_VGA_SWITCHEROO)
137void radeon_register_atpx_handler(void); 139void radeon_register_atpx_handler(void);
138void radeon_unregister_atpx_handler(void); 140void radeon_unregister_atpx_handler(void);
141bool radeon_is_px(void);
139#else 142#else
140static inline void radeon_register_atpx_handler(void) {} 143static inline void radeon_register_atpx_handler(void) {}
141static inline void radeon_unregister_atpx_handler(void) {} 144static inline void radeon_unregister_atpx_handler(void) {}
145static inline bool radeon_is_px(void) { return false; }
142#endif 146#endif
143 147
144int radeon_no_wb; 148int radeon_no_wb;
@@ -161,6 +165,7 @@ int radeon_lockup_timeout = 10000;
161int radeon_fastfb = 0; 165int radeon_fastfb = 0;
162int radeon_dpm = -1; 166int radeon_dpm = -1;
163int radeon_aspm = -1; 167int radeon_aspm = -1;
168int radeon_runtime_pm = -1;
164 169
165MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers"); 170MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers");
166module_param_named(no_wb, radeon_no_wb, int, 0444); 171module_param_named(no_wb, radeon_no_wb, int, 0444);
@@ -222,6 +227,9 @@ module_param_named(dpm, radeon_dpm, int, 0444);
222MODULE_PARM_DESC(aspm, "ASPM support (1 = enable, 0 = disable, -1 = auto)"); 227MODULE_PARM_DESC(aspm, "ASPM support (1 = enable, 0 = disable, -1 = auto)");
223module_param_named(aspm, radeon_aspm, int, 0444); 228module_param_named(aspm, radeon_aspm, int, 0444);
224 229
230MODULE_PARM_DESC(runpm, "PX runtime pm (1 = force enable, 0 = disable, -1 = PX only default)");
231module_param_named(runpm, radeon_runtime_pm, int, 0444);
232
225static struct pci_device_id pciidlist[] = { 233static struct pci_device_id pciidlist[] = {
226 radeon_PCI_IDS 234 radeon_PCI_IDS
227}; 235};
@@ -258,6 +266,7 @@ static int radeon_resume(struct drm_device *dev)
258 return 0; 266 return 0;
259} 267}
260 268
269
261static const struct file_operations radeon_driver_old_fops = { 270static const struct file_operations radeon_driver_old_fops = {
262 .owner = THIS_MODULE, 271 .owner = THIS_MODULE,
263 .open = drm_open, 272 .open = drm_open,
@@ -352,25 +361,144 @@ radeon_pci_remove(struct pci_dev *pdev)
352 drm_put_dev(dev); 361 drm_put_dev(dev);
353} 362}
354 363
355static int 364static int radeon_pmops_suspend(struct device *dev)
356radeon_pci_suspend(struct pci_dev *pdev, pm_message_t state)
357{ 365{
358 struct drm_device *dev = pci_get_drvdata(pdev); 366 struct pci_dev *pdev = to_pci_dev(dev);
359 return radeon_suspend_kms(dev, state); 367 struct drm_device *drm_dev = pci_get_drvdata(pdev);
368 return radeon_suspend_kms(drm_dev, true, true);
360} 369}
361 370
362static int 371static int radeon_pmops_resume(struct device *dev)
363radeon_pci_resume(struct pci_dev *pdev)
364{ 372{
365 struct drm_device *dev = pci_get_drvdata(pdev); 373 struct pci_dev *pdev = to_pci_dev(dev);
366 return radeon_resume_kms(dev); 374 struct drm_device *drm_dev = pci_get_drvdata(pdev);
375 return radeon_resume_kms(drm_dev, true, true);
376}
377
378static int radeon_pmops_freeze(struct device *dev)
379{
380 struct pci_dev *pdev = to_pci_dev(dev);
381 struct drm_device *drm_dev = pci_get_drvdata(pdev);
382 return radeon_suspend_kms(drm_dev, false, true);
383}
384
385static int radeon_pmops_thaw(struct device *dev)
386{
387 struct pci_dev *pdev = to_pci_dev(dev);
388 struct drm_device *drm_dev = pci_get_drvdata(pdev);
389 return radeon_resume_kms(drm_dev, false, true);
390}
391
392static int radeon_pmops_runtime_suspend(struct device *dev)
393{
394 struct pci_dev *pdev = to_pci_dev(dev);
395 struct drm_device *drm_dev = pci_get_drvdata(pdev);
396 int ret;
397
398 if (radeon_runtime_pm == 0)
399 return -EINVAL;
400
401 drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
402 drm_kms_helper_poll_disable(drm_dev);
403 vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_OFF);
404
405 ret = radeon_suspend_kms(drm_dev, false, false);
406 pci_save_state(pdev);
407 pci_disable_device(pdev);
408 pci_set_power_state(pdev, PCI_D3cold);
409 drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF;
410
411 return 0;
412}
413
414static int radeon_pmops_runtime_resume(struct device *dev)
415{
416 struct pci_dev *pdev = to_pci_dev(dev);
417 struct drm_device *drm_dev = pci_get_drvdata(pdev);
418 int ret;
419
420 if (radeon_runtime_pm == 0)
421 return -EINVAL;
422
423 drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
424
425 pci_set_power_state(pdev, PCI_D0);
426 pci_restore_state(pdev);
427 ret = pci_enable_device(pdev);
428 if (ret)
429 return ret;
430 pci_set_master(pdev);
431
432 ret = radeon_resume_kms(drm_dev, false, false);
433 drm_kms_helper_poll_enable(drm_dev);
434 vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON);
435 drm_dev->switch_power_state = DRM_SWITCH_POWER_ON;
436 return 0;
367} 437}
368 438
439static int radeon_pmops_runtime_idle(struct device *dev)
440{
441 struct pci_dev *pdev = to_pci_dev(dev);
442 struct drm_device *drm_dev = pci_get_drvdata(pdev);
443 struct drm_crtc *crtc;
444
445 if (radeon_runtime_pm == 0)
446 return -EBUSY;
447
448 /* are we PX enabled? */
449 if (radeon_runtime_pm == -1 && !radeon_is_px()) {
450 DRM_DEBUG_DRIVER("failing to power off - not px\n");
451 return -EBUSY;
452 }
453
454 list_for_each_entry(crtc, &drm_dev->mode_config.crtc_list, head) {
455 if (crtc->enabled) {
456 DRM_DEBUG_DRIVER("failing to power off - crtc active\n");
457 return -EBUSY;
458 }
459 }
460
461 pm_runtime_mark_last_busy(dev);
462 pm_runtime_autosuspend(dev);
463 /* we don't want the main rpm_idle to call suspend - we want to autosuspend */
464 return 1;
465}
466
467long radeon_drm_ioctl(struct file *filp,
468 unsigned int cmd, unsigned long arg)
469{
470 struct drm_file *file_priv = filp->private_data;
471 struct drm_device *dev;
472 long ret;
473 dev = file_priv->minor->dev;
474 ret = pm_runtime_get_sync(dev->dev);
475 if (ret < 0)
476 return ret;
477
478 ret = drm_ioctl(filp, cmd, arg);
479
480 pm_runtime_mark_last_busy(dev->dev);
481 pm_runtime_put_autosuspend(dev->dev);
482 return ret;
483}
484
485static const struct dev_pm_ops radeon_pm_ops = {
486 .suspend = radeon_pmops_suspend,
487 .resume = radeon_pmops_resume,
488 .freeze = radeon_pmops_freeze,
489 .thaw = radeon_pmops_thaw,
490 .poweroff = radeon_pmops_freeze,
491 .restore = radeon_pmops_resume,
492 .runtime_suspend = radeon_pmops_runtime_suspend,
493 .runtime_resume = radeon_pmops_runtime_resume,
494 .runtime_idle = radeon_pmops_runtime_idle,
495};
496
369static const struct file_operations radeon_driver_kms_fops = { 497static const struct file_operations radeon_driver_kms_fops = {
370 .owner = THIS_MODULE, 498 .owner = THIS_MODULE,
371 .open = drm_open, 499 .open = drm_open,
372 .release = drm_release, 500 .release = drm_release,
373 .unlocked_ioctl = drm_ioctl, 501 .unlocked_ioctl = radeon_drm_ioctl,
374 .mmap = radeon_mmap, 502 .mmap = radeon_mmap,
375 .poll = drm_poll, 503 .poll = drm_poll,
376 .read = drm_read, 504 .read = drm_read,
@@ -379,6 +507,15 @@ static const struct file_operations radeon_driver_kms_fops = {
379#endif 507#endif
380}; 508};
381 509
510
511static void
512radeon_pci_shutdown(struct pci_dev *pdev)
513{
514 struct drm_device *dev = pci_get_drvdata(pdev);
515
516 radeon_driver_unload_kms(dev);
517}
518
382static struct drm_driver kms_driver = { 519static struct drm_driver kms_driver = {
383 .driver_features = 520 .driver_features =
384 DRIVER_USE_AGP | 521 DRIVER_USE_AGP |
@@ -391,8 +528,6 @@ static struct drm_driver kms_driver = {
391 .postclose = radeon_driver_postclose_kms, 528 .postclose = radeon_driver_postclose_kms,
392 .lastclose = radeon_driver_lastclose_kms, 529 .lastclose = radeon_driver_lastclose_kms,
393 .unload = radeon_driver_unload_kms, 530 .unload = radeon_driver_unload_kms,
394 .suspend = radeon_suspend_kms,
395 .resume = radeon_resume_kms,
396 .get_vblank_counter = radeon_get_vblank_counter_kms, 531 .get_vblank_counter = radeon_get_vblank_counter_kms,
397 .enable_vblank = radeon_enable_vblank_kms, 532 .enable_vblank = radeon_enable_vblank_kms,
398 .disable_vblank = radeon_disable_vblank_kms, 533 .disable_vblank = radeon_disable_vblank_kms,
@@ -449,8 +584,8 @@ static struct pci_driver radeon_kms_pci_driver = {
449 .id_table = pciidlist, 584 .id_table = pciidlist,
450 .probe = radeon_pci_probe, 585 .probe = radeon_pci_probe,
451 .remove = radeon_pci_remove, 586 .remove = radeon_pci_remove,
452 .suspend = radeon_pci_suspend, 587 .driver.pm = &radeon_pm_ops,
453 .resume = radeon_pci_resume, 588 .shutdown = radeon_pci_shutdown,
454}; 589};
455 590
456static int __init radeon_init(void) 591static int __init radeon_init(void)
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
index b369d42f7de5..543dcfae7e6f 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.h
+++ b/drivers/gpu/drm/radeon/radeon_drv.h
@@ -113,6 +113,9 @@
113#define DRIVER_MINOR 33 113#define DRIVER_MINOR 33
114#define DRIVER_PATCHLEVEL 0 114#define DRIVER_PATCHLEVEL 0
115 115
116long radeon_drm_ioctl(struct file *filp,
117 unsigned int cmd, unsigned long arg);
118
116/* The rest of the file is DEPRECATED! */ 119/* The rest of the file is DEPRECATED! */
117#ifdef CONFIG_DRM_RADEON_UMS 120#ifdef CONFIG_DRM_RADEON_UMS
118 121
diff --git a/drivers/gpu/drm/radeon/radeon_family.h b/drivers/gpu/drm/radeon/radeon_family.h
index 3c8289083f9d..614ad549297f 100644
--- a/drivers/gpu/drm/radeon/radeon_family.h
+++ b/drivers/gpu/drm/radeon/radeon_family.h
@@ -96,6 +96,7 @@ enum radeon_family {
96 CHIP_BONAIRE, 96 CHIP_BONAIRE,
97 CHIP_KAVERI, 97 CHIP_KAVERI,
98 CHIP_KABINI, 98 CHIP_KABINI,
99 CHIP_HAWAII,
99 CHIP_LAST, 100 CHIP_LAST,
100}; 101};
101 102
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index ddb8f8e04eb5..281d14c22a47 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -190,10 +190,8 @@ void radeon_fence_process(struct radeon_device *rdev, int ring)
190 } 190 }
191 } while (atomic64_xchg(&rdev->fence_drv[ring].last_seq, seq) > seq); 191 } while (atomic64_xchg(&rdev->fence_drv[ring].last_seq, seq) > seq);
192 192
193 if (wake) { 193 if (wake)
194 rdev->fence_drv[ring].last_activity = jiffies;
195 wake_up_all(&rdev->fence_queue); 194 wake_up_all(&rdev->fence_queue);
196 }
197} 195}
198 196
199/** 197/**
@@ -212,13 +210,13 @@ static void radeon_fence_destroy(struct kref *kref)
212} 210}
213 211
214/** 212/**
215 * radeon_fence_seq_signaled - check if a fence sequeuce number has signaled 213 * radeon_fence_seq_signaled - check if a fence sequence number has signaled
216 * 214 *
217 * @rdev: radeon device pointer 215 * @rdev: radeon device pointer
218 * @seq: sequence number 216 * @seq: sequence number
219 * @ring: ring index the fence is associated with 217 * @ring: ring index the fence is associated with
220 * 218 *
221 * Check if the last singled fence sequnce number is >= the requested 219 * Check if the last signaled fence sequnce number is >= the requested
222 * sequence number (all asics). 220 * sequence number (all asics).
223 * Returns true if the fence has signaled (current fence value 221 * Returns true if the fence has signaled (current fence value
224 * is >= requested value) or false if it has not (current fence 222 * is >= requested value) or false if it has not (current fence
@@ -263,113 +261,131 @@ bool radeon_fence_signaled(struct radeon_fence *fence)
263} 261}
264 262
265/** 263/**
266 * radeon_fence_wait_seq - wait for a specific sequence number 264 * radeon_fence_any_seq_signaled - check if any sequence number is signaled
267 * 265 *
268 * @rdev: radeon device pointer 266 * @rdev: radeon device pointer
269 * @target_seq: sequence number we want to wait for 267 * @seq: sequence numbers
270 * @ring: ring index the fence is associated with 268 *
269 * Check if the last signaled fence sequnce number is >= the requested
270 * sequence number (all asics).
271 * Returns true if any has signaled (current value is >= requested value)
272 * or false if it has not. Helper function for radeon_fence_wait_seq.
273 */
274static bool radeon_fence_any_seq_signaled(struct radeon_device *rdev, u64 *seq)
275{
276 unsigned i;
277
278 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
279 if (seq[i] && radeon_fence_seq_signaled(rdev, seq[i], i))
280 return true;
281 }
282 return false;
283}
284
285/**
286 * radeon_fence_wait_seq - wait for a specific sequence numbers
287 *
288 * @rdev: radeon device pointer
289 * @target_seq: sequence number(s) we want to wait for
271 * @intr: use interruptable sleep 290 * @intr: use interruptable sleep
272 * @lock_ring: whether the ring should be locked or not 291 * @lock_ring: whether the ring should be locked or not
273 * 292 *
274 * Wait for the requested sequence number to be written (all asics). 293 * Wait for the requested sequence number(s) to be written by any ring
294 * (all asics). Sequnce number array is indexed by ring id.
275 * @intr selects whether to use interruptable (true) or non-interruptable 295 * @intr selects whether to use interruptable (true) or non-interruptable
276 * (false) sleep when waiting for the sequence number. Helper function 296 * (false) sleep when waiting for the sequence number. Helper function
277 * for radeon_fence_wait(), et al. 297 * for radeon_fence_wait_*().
278 * Returns 0 if the sequence number has passed, error for all other cases. 298 * Returns 0 if the sequence number has passed, error for all other cases.
279 * -EDEADLK is returned when a GPU lockup has been detected and the ring is 299 * -EDEADLK is returned when a GPU lockup has been detected.
280 * marked as not ready so no further jobs get scheduled until a successful
281 * reset.
282 */ 300 */
283static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 target_seq, 301static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 *target_seq,
284 unsigned ring, bool intr, bool lock_ring) 302 bool intr, bool lock_ring)
285{ 303{
286 unsigned long timeout, last_activity; 304 uint64_t last_seq[RADEON_NUM_RINGS];
287 uint64_t seq;
288 unsigned i;
289 bool signaled; 305 bool signaled;
290 int r; 306 int i, r;
307
308 while (!radeon_fence_any_seq_signaled(rdev, target_seq)) {
309
310 /* Save current sequence values, used to check for GPU lockups */
311 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
312 if (!target_seq[i])
313 continue;
291 314
292 while (target_seq > atomic64_read(&rdev->fence_drv[ring].last_seq)) { 315 last_seq[i] = atomic64_read(&rdev->fence_drv[i].last_seq);
293 if (!rdev->ring[ring].ready) { 316 trace_radeon_fence_wait_begin(rdev->ddev, target_seq[i]);
294 return -EBUSY; 317 radeon_irq_kms_sw_irq_get(rdev, i);
295 } 318 }
296 319
297 timeout = jiffies - RADEON_FENCE_JIFFIES_TIMEOUT; 320 if (intr) {
298 if (time_after(rdev->fence_drv[ring].last_activity, timeout)) { 321 r = wait_event_interruptible_timeout(rdev->fence_queue, (
299 /* the normal case, timeout is somewhere before last_activity */ 322 (signaled = radeon_fence_any_seq_signaled(rdev, target_seq))
300 timeout = rdev->fence_drv[ring].last_activity - timeout; 323 || rdev->needs_reset), RADEON_FENCE_JIFFIES_TIMEOUT);
301 } else { 324 } else {
302 /* either jiffies wrapped around, or no fence was signaled in the last 500ms 325 r = wait_event_timeout(rdev->fence_queue, (
303 * anyway we will just wait for the minimum amount and then check for a lockup 326 (signaled = radeon_fence_any_seq_signaled(rdev, target_seq))
304 */ 327 || rdev->needs_reset), RADEON_FENCE_JIFFIES_TIMEOUT);
305 timeout = 1;
306 } 328 }
307 seq = atomic64_read(&rdev->fence_drv[ring].last_seq);
308 /* Save current last activity valuee, used to check for GPU lockups */
309 last_activity = rdev->fence_drv[ring].last_activity;
310 329
311 trace_radeon_fence_wait_begin(rdev->ddev, seq); 330 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
312 radeon_irq_kms_sw_irq_get(rdev, ring); 331 if (!target_seq[i])
313 if (intr) { 332 continue;
314 r = wait_event_interruptible_timeout(rdev->fence_queue, 333
315 (signaled = radeon_fence_seq_signaled(rdev, target_seq, ring)), 334 radeon_irq_kms_sw_irq_put(rdev, i);
316 timeout); 335 trace_radeon_fence_wait_end(rdev->ddev, target_seq[i]);
317 } else {
318 r = wait_event_timeout(rdev->fence_queue,
319 (signaled = radeon_fence_seq_signaled(rdev, target_seq, ring)),
320 timeout);
321 } 336 }
322 radeon_irq_kms_sw_irq_put(rdev, ring); 337
323 if (unlikely(r < 0)) { 338 if (unlikely(r < 0))
324 return r; 339 return r;
325 }
326 trace_radeon_fence_wait_end(rdev->ddev, seq);
327 340
328 if (unlikely(!signaled)) { 341 if (unlikely(!signaled)) {
342 if (rdev->needs_reset)
343 return -EDEADLK;
344
329 /* we were interrupted for some reason and fence 345 /* we were interrupted for some reason and fence
330 * isn't signaled yet, resume waiting */ 346 * isn't signaled yet, resume waiting */
331 if (r) { 347 if (r)
332 continue; 348 continue;
349
350 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
351 if (!target_seq[i])
352 continue;
353
354 if (last_seq[i] != atomic64_read(&rdev->fence_drv[i].last_seq))
355 break;
333 } 356 }
334 357
335 /* check if sequence value has changed since last_activity */ 358 if (i != RADEON_NUM_RINGS)
336 if (seq != atomic64_read(&rdev->fence_drv[ring].last_seq)) {
337 continue; 359 continue;
338 }
339 360
340 if (lock_ring) { 361 if (lock_ring)
341 mutex_lock(&rdev->ring_lock); 362 mutex_lock(&rdev->ring_lock);
342 }
343 363
344 /* test if somebody else has already decided that this is a lockup */ 364 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
345 if (last_activity != rdev->fence_drv[ring].last_activity) { 365 if (!target_seq[i])
346 if (lock_ring) { 366 continue;
347 mutex_unlock(&rdev->ring_lock); 367
348 } 368 if (radeon_ring_is_lockup(rdev, i, &rdev->ring[i]))
349 continue; 369 break;
350 } 370 }
351 371
352 if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) { 372 if (i < RADEON_NUM_RINGS) {
353 /* good news we believe it's a lockup */ 373 /* good news we believe it's a lockup */
354 dev_warn(rdev->dev, "GPU lockup (waiting for 0x%016llx last fence id 0x%016llx)\n", 374 dev_warn(rdev->dev, "GPU lockup (waiting for "
355 target_seq, seq); 375 "0x%016llx last fence id 0x%016llx on"
356 376 " ring %d)\n",
357 /* change last activity so nobody else think there is a lockup */ 377 target_seq[i], last_seq[i], i);
358 for (i = 0; i < RADEON_NUM_RINGS; ++i) { 378
359 rdev->fence_drv[i].last_activity = jiffies; 379 /* remember that we need an reset */
360 } 380 rdev->needs_reset = true;
361 381 if (lock_ring)
362 /* mark the ring as not ready any more */
363 rdev->ring[ring].ready = false;
364 if (lock_ring) {
365 mutex_unlock(&rdev->ring_lock); 382 mutex_unlock(&rdev->ring_lock);
366 } 383 wake_up_all(&rdev->fence_queue);
367 return -EDEADLK; 384 return -EDEADLK;
368 } 385 }
369 386
370 if (lock_ring) { 387 if (lock_ring)
371 mutex_unlock(&rdev->ring_lock); 388 mutex_unlock(&rdev->ring_lock);
372 }
373 } 389 }
374 } 390 }
375 return 0; 391 return 0;
@@ -388,6 +404,7 @@ static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 target_seq,
388 */ 404 */
389int radeon_fence_wait(struct radeon_fence *fence, bool intr) 405int radeon_fence_wait(struct radeon_fence *fence, bool intr)
390{ 406{
407 uint64_t seq[RADEON_NUM_RINGS] = {};
391 int r; 408 int r;
392 409
393 if (fence == NULL) { 410 if (fence == NULL) {
@@ -395,147 +412,15 @@ int radeon_fence_wait(struct radeon_fence *fence, bool intr)
395 return -EINVAL; 412 return -EINVAL;
396 } 413 }
397 414
398 r = radeon_fence_wait_seq(fence->rdev, fence->seq, 415 seq[fence->ring] = fence->seq;
399 fence->ring, intr, true); 416 if (seq[fence->ring] == RADEON_FENCE_SIGNALED_SEQ)
400 if (r) { 417 return 0;
401 return r;
402 }
403 fence->seq = RADEON_FENCE_SIGNALED_SEQ;
404 return 0;
405}
406
407static bool radeon_fence_any_seq_signaled(struct radeon_device *rdev, u64 *seq)
408{
409 unsigned i;
410
411 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
412 if (seq[i] && radeon_fence_seq_signaled(rdev, seq[i], i)) {
413 return true;
414 }
415 }
416 return false;
417}
418 418
419/** 419 r = radeon_fence_wait_seq(fence->rdev, seq, intr, true);
420 * radeon_fence_wait_any_seq - wait for a sequence number on any ring 420 if (r)
421 * 421 return r;
422 * @rdev: radeon device pointer
423 * @target_seq: sequence number(s) we want to wait for
424 * @intr: use interruptable sleep
425 *
426 * Wait for the requested sequence number(s) to be written by any ring
427 * (all asics). Sequnce number array is indexed by ring id.
428 * @intr selects whether to use interruptable (true) or non-interruptable
429 * (false) sleep when waiting for the sequence number. Helper function
430 * for radeon_fence_wait_any(), et al.
431 * Returns 0 if the sequence number has passed, error for all other cases.
432 */
433static int radeon_fence_wait_any_seq(struct radeon_device *rdev,
434 u64 *target_seq, bool intr)
435{
436 unsigned long timeout, last_activity, tmp;
437 unsigned i, ring = RADEON_NUM_RINGS;
438 bool signaled;
439 int r;
440
441 for (i = 0, last_activity = 0; i < RADEON_NUM_RINGS; ++i) {
442 if (!target_seq[i]) {
443 continue;
444 }
445
446 /* use the most recent one as indicator */
447 if (time_after(rdev->fence_drv[i].last_activity, last_activity)) {
448 last_activity = rdev->fence_drv[i].last_activity;
449 }
450
451 /* For lockup detection just pick the lowest ring we are
452 * actively waiting for
453 */
454 if (i < ring) {
455 ring = i;
456 }
457 }
458
459 /* nothing to wait for ? */
460 if (ring == RADEON_NUM_RINGS) {
461 return -ENOENT;
462 }
463
464 while (!radeon_fence_any_seq_signaled(rdev, target_seq)) {
465 timeout = jiffies - RADEON_FENCE_JIFFIES_TIMEOUT;
466 if (time_after(last_activity, timeout)) {
467 /* the normal case, timeout is somewhere before last_activity */
468 timeout = last_activity - timeout;
469 } else {
470 /* either jiffies wrapped around, or no fence was signaled in the last 500ms
471 * anyway we will just wait for the minimum amount and then check for a lockup
472 */
473 timeout = 1;
474 }
475
476 trace_radeon_fence_wait_begin(rdev->ddev, target_seq[ring]);
477 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
478 if (target_seq[i]) {
479 radeon_irq_kms_sw_irq_get(rdev, i);
480 }
481 }
482 if (intr) {
483 r = wait_event_interruptible_timeout(rdev->fence_queue,
484 (signaled = radeon_fence_any_seq_signaled(rdev, target_seq)),
485 timeout);
486 } else {
487 r = wait_event_timeout(rdev->fence_queue,
488 (signaled = radeon_fence_any_seq_signaled(rdev, target_seq)),
489 timeout);
490 }
491 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
492 if (target_seq[i]) {
493 radeon_irq_kms_sw_irq_put(rdev, i);
494 }
495 }
496 if (unlikely(r < 0)) {
497 return r;
498 }
499 trace_radeon_fence_wait_end(rdev->ddev, target_seq[ring]);
500
501 if (unlikely(!signaled)) {
502 /* we were interrupted for some reason and fence
503 * isn't signaled yet, resume waiting */
504 if (r) {
505 continue;
506 }
507
508 mutex_lock(&rdev->ring_lock);
509 for (i = 0, tmp = 0; i < RADEON_NUM_RINGS; ++i) {
510 if (time_after(rdev->fence_drv[i].last_activity, tmp)) {
511 tmp = rdev->fence_drv[i].last_activity;
512 }
513 }
514 /* test if somebody else has already decided that this is a lockup */
515 if (last_activity != tmp) {
516 last_activity = tmp;
517 mutex_unlock(&rdev->ring_lock);
518 continue;
519 }
520
521 if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) {
522 /* good news we believe it's a lockup */
523 dev_warn(rdev->dev, "GPU lockup (waiting for 0x%016llx)\n",
524 target_seq[ring]);
525
526 /* change last activity so nobody else think there is a lockup */
527 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
528 rdev->fence_drv[i].last_activity = jiffies;
529 }
530 422
531 /* mark the ring as not ready any more */ 423 fence->seq = RADEON_FENCE_SIGNALED_SEQ;
532 rdev->ring[ring].ready = false;
533 mutex_unlock(&rdev->ring_lock);
534 return -EDEADLK;
535 }
536 mutex_unlock(&rdev->ring_lock);
537 }
538 }
539 return 0; 424 return 0;
540} 425}
541 426
@@ -557,7 +442,7 @@ int radeon_fence_wait_any(struct radeon_device *rdev,
557 bool intr) 442 bool intr)
558{ 443{
559 uint64_t seq[RADEON_NUM_RINGS]; 444 uint64_t seq[RADEON_NUM_RINGS];
560 unsigned i; 445 unsigned i, num_rings = 0;
561 int r; 446 int r;
562 447
563 for (i = 0; i < RADEON_NUM_RINGS; ++i) { 448 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
@@ -567,15 +452,19 @@ int radeon_fence_wait_any(struct radeon_device *rdev,
567 continue; 452 continue;
568 } 453 }
569 454
570 if (fences[i]->seq == RADEON_FENCE_SIGNALED_SEQ) {
571 /* something was allready signaled */
572 return 0;
573 }
574
575 seq[i] = fences[i]->seq; 455 seq[i] = fences[i]->seq;
456 ++num_rings;
457
458 /* test if something was allready signaled */
459 if (seq[i] == RADEON_FENCE_SIGNALED_SEQ)
460 return 0;
576 } 461 }
577 462
578 r = radeon_fence_wait_any_seq(rdev, seq, intr); 463 /* nothing to wait for ? */
464 if (num_rings == 0)
465 return -ENOENT;
466
467 r = radeon_fence_wait_seq(rdev, seq, intr, true);
579 if (r) { 468 if (r) {
580 return r; 469 return r;
581 } 470 }
@@ -594,15 +483,15 @@ int radeon_fence_wait_any(struct radeon_device *rdev,
594 */ 483 */
595int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring) 484int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring)
596{ 485{
597 uint64_t seq; 486 uint64_t seq[RADEON_NUM_RINGS] = {};
598 487
599 seq = atomic64_read(&rdev->fence_drv[ring].last_seq) + 1ULL; 488 seq[ring] = atomic64_read(&rdev->fence_drv[ring].last_seq) + 1ULL;
600 if (seq >= rdev->fence_drv[ring].sync_seq[ring]) { 489 if (seq[ring] >= rdev->fence_drv[ring].sync_seq[ring]) {
601 /* nothing to wait for, last_seq is 490 /* nothing to wait for, last_seq is
602 already the last emited fence */ 491 already the last emited fence */
603 return -ENOENT; 492 return -ENOENT;
604 } 493 }
605 return radeon_fence_wait_seq(rdev, seq, ring, false, false); 494 return radeon_fence_wait_seq(rdev, seq, false, false);
606} 495}
607 496
608/** 497/**
@@ -617,14 +506,18 @@ int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring)
617 */ 506 */
618int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring) 507int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring)
619{ 508{
620 uint64_t seq = rdev->fence_drv[ring].sync_seq[ring]; 509 uint64_t seq[RADEON_NUM_RINGS] = {};
621 int r; 510 int r;
622 511
623 r = radeon_fence_wait_seq(rdev, seq, ring, false, false); 512 seq[ring] = rdev->fence_drv[ring].sync_seq[ring];
513 if (!seq[ring])
514 return 0;
515
516 r = radeon_fence_wait_seq(rdev, seq, false, false);
624 if (r) { 517 if (r) {
625 if (r == -EDEADLK) { 518 if (r == -EDEADLK)
626 return -EDEADLK; 519 return -EDEADLK;
627 } 520
628 dev_err(rdev->dev, "error waiting for ring[%d] to become idle (%d)\n", 521 dev_err(rdev->dev, "error waiting for ring[%d] to become idle (%d)\n",
629 ring, r); 522 ring, r);
630 } 523 }
@@ -826,7 +719,6 @@ static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring)
826 for (i = 0; i < RADEON_NUM_RINGS; ++i) 719 for (i = 0; i < RADEON_NUM_RINGS; ++i)
827 rdev->fence_drv[ring].sync_seq[i] = 0; 720 rdev->fence_drv[ring].sync_seq[i] = 0;
828 atomic64_set(&rdev->fence_drv[ring].last_seq, 0); 721 atomic64_set(&rdev->fence_drv[ring].last_seq, 0);
829 rdev->fence_drv[ring].last_activity = jiffies;
830 rdev->fence_drv[ring].initialized = false; 722 rdev->fence_drv[ring].initialized = false;
831} 723}
832 724
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index b990b1a2bd50..8a83b89d4709 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -607,8 +607,8 @@ static int radeon_vm_evict(struct radeon_device *rdev, struct radeon_vm *vm)
607 */ 607 */
608int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm) 608int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm)
609{ 609{
610 unsigned pd_size, pts_size; 610 unsigned pd_size, pd_entries, pts_size;
611 u64 *pd_addr; 611 struct radeon_ib ib;
612 int r; 612 int r;
613 613
614 if (vm == NULL) { 614 if (vm == NULL) {
@@ -619,8 +619,10 @@ int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm)
619 return 0; 619 return 0;
620 } 620 }
621 621
622retry:
623 pd_size = radeon_vm_directory_size(rdev); 622 pd_size = radeon_vm_directory_size(rdev);
623 pd_entries = radeon_vm_num_pdes(rdev);
624
625retry:
624 r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager, 626 r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager,
625 &vm->page_directory, pd_size, 627 &vm->page_directory, pd_size,
626 RADEON_VM_PTB_ALIGN_SIZE, false); 628 RADEON_VM_PTB_ALIGN_SIZE, false);
@@ -637,9 +639,31 @@ retry:
637 vm->pd_gpu_addr = radeon_sa_bo_gpu_addr(vm->page_directory); 639 vm->pd_gpu_addr = radeon_sa_bo_gpu_addr(vm->page_directory);
638 640
639 /* Initially clear the page directory */ 641 /* Initially clear the page directory */
640 pd_addr = radeon_sa_bo_cpu_addr(vm->page_directory); 642 r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib,
641 memset(pd_addr, 0, pd_size); 643 NULL, pd_entries * 2 + 64);
644 if (r) {
645 radeon_sa_bo_free(rdev, &vm->page_directory, vm->fence);
646 return r;
647 }
648
649 ib.length_dw = 0;
650
651 radeon_asic_vm_set_page(rdev, &ib, vm->pd_gpu_addr,
652 0, pd_entries, 0, 0);
653
654 radeon_ib_sync_to(&ib, vm->fence);
655 r = radeon_ib_schedule(rdev, &ib, NULL);
656 if (r) {
657 radeon_ib_free(rdev, &ib);
658 radeon_sa_bo_free(rdev, &vm->page_directory, vm->fence);
659 return r;
660 }
661 radeon_fence_unref(&vm->fence);
662 vm->fence = radeon_fence_ref(ib.fence);
663 radeon_ib_free(rdev, &ib);
664 radeon_fence_unref(&vm->last_flush);
642 665
666 /* allocate page table array */
643 pts_size = radeon_vm_num_pdes(rdev) * sizeof(struct radeon_sa_bo *); 667 pts_size = radeon_vm_num_pdes(rdev) * sizeof(struct radeon_sa_bo *);
644 vm->page_tables = kzalloc(pts_size, GFP_KERNEL); 668 vm->page_tables = kzalloc(pts_size, GFP_KERNEL);
645 669
@@ -914,6 +938,26 @@ uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr)
914} 938}
915 939
916/** 940/**
941 * radeon_vm_page_flags - translate page flags to what the hw uses
942 *
943 * @flags: flags comming from userspace
944 *
945 * Translate the flags the userspace ABI uses to hw flags.
946 */
947static uint32_t radeon_vm_page_flags(uint32_t flags)
948{
949 uint32_t hw_flags = 0;
950 hw_flags |= (flags & RADEON_VM_PAGE_VALID) ? R600_PTE_VALID : 0;
951 hw_flags |= (flags & RADEON_VM_PAGE_READABLE) ? R600_PTE_READABLE : 0;
952 hw_flags |= (flags & RADEON_VM_PAGE_WRITEABLE) ? R600_PTE_WRITEABLE : 0;
953 if (flags & RADEON_VM_PAGE_SYSTEM) {
954 hw_flags |= R600_PTE_SYSTEM;
955 hw_flags |= (flags & RADEON_VM_PAGE_SNOOPED) ? R600_PTE_SNOOPED : 0;
956 }
957 return hw_flags;
958}
959
960/**
917 * radeon_vm_update_pdes - make sure that page directory is valid 961 * radeon_vm_update_pdes - make sure that page directory is valid
918 * 962 *
919 * @rdev: radeon_device pointer 963 * @rdev: radeon_device pointer
@@ -974,7 +1018,11 @@ retry:
974 if (count) { 1018 if (count) {
975 radeon_asic_vm_set_page(rdev, ib, last_pde, 1019 radeon_asic_vm_set_page(rdev, ib, last_pde,
976 last_pt, count, incr, 1020 last_pt, count, incr,
977 RADEON_VM_PAGE_VALID); 1021 R600_PTE_VALID);
1022
1023 count *= RADEON_VM_PTE_COUNT;
1024 radeon_asic_vm_set_page(rdev, ib, last_pt, 0,
1025 count, 0, 0);
978 } 1026 }
979 1027
980 count = 1; 1028 count = 1;
@@ -987,8 +1035,11 @@ retry:
987 1035
988 if (count) { 1036 if (count) {
989 radeon_asic_vm_set_page(rdev, ib, last_pde, last_pt, count, 1037 radeon_asic_vm_set_page(rdev, ib, last_pde, last_pt, count,
990 incr, RADEON_VM_PAGE_VALID); 1038 incr, R600_PTE_VALID);
991 1039
1040 count *= RADEON_VM_PTE_COUNT;
1041 radeon_asic_vm_set_page(rdev, ib, last_pt, 0,
1042 count, 0, 0);
992 } 1043 }
993 1044
994 return 0; 1045 return 0;
@@ -1082,7 +1133,6 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
1082 struct radeon_bo *bo, 1133 struct radeon_bo *bo,
1083 struct ttm_mem_reg *mem) 1134 struct ttm_mem_reg *mem)
1084{ 1135{
1085 unsigned ridx = rdev->asic->vm.pt_ring_index;
1086 struct radeon_ib ib; 1136 struct radeon_ib ib;
1087 struct radeon_bo_va *bo_va; 1137 struct radeon_bo_va *bo_va;
1088 unsigned nptes, npdes, ndw; 1138 unsigned nptes, npdes, ndw;
@@ -1151,11 +1201,14 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
1151 /* reserve space for pde addresses */ 1201 /* reserve space for pde addresses */
1152 ndw += npdes * 2; 1202 ndw += npdes * 2;
1153 1203
1204 /* reserve space for clearing new page tables */
1205 ndw += npdes * 2 * RADEON_VM_PTE_COUNT;
1206
1154 /* update too big for an IB */ 1207 /* update too big for an IB */
1155 if (ndw > 0xfffff) 1208 if (ndw > 0xfffff)
1156 return -ENOMEM; 1209 return -ENOMEM;
1157 1210
1158 r = radeon_ib_get(rdev, ridx, &ib, NULL, ndw * 4); 1211 r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib, NULL, ndw * 4);
1159 ib.length_dw = 0; 1212 ib.length_dw = 0;
1160 1213
1161 r = radeon_vm_update_pdes(rdev, vm, &ib, bo_va->soffset, bo_va->eoffset); 1214 r = radeon_vm_update_pdes(rdev, vm, &ib, bo_va->soffset, bo_va->eoffset);
@@ -1165,7 +1218,7 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
1165 } 1218 }
1166 1219
1167 radeon_vm_update_ptes(rdev, vm, &ib, bo_va->soffset, bo_va->eoffset, 1220 radeon_vm_update_ptes(rdev, vm, &ib, bo_va->soffset, bo_va->eoffset,
1168 addr, bo_va->flags); 1221 addr, radeon_vm_page_flags(bo_va->flags));
1169 1222
1170 radeon_ib_sync_to(&ib, vm->fence); 1223 radeon_ib_sync_to(&ib, vm->fence);
1171 r = radeon_ib_schedule(rdev, &ib, NULL); 1224 r = radeon_ib_schedule(rdev, &ib, NULL);
diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
index c180df8e84db..bdb0f93e73bc 100644
--- a/drivers/gpu/drm/radeon/radeon_ioc32.c
+++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
@@ -418,7 +418,7 @@ long radeon_kms_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long
418 if (nr < DRM_COMMAND_BASE) 418 if (nr < DRM_COMMAND_BASE)
419 return drm_compat_ioctl(filp, cmd, arg); 419 return drm_compat_ioctl(filp, cmd, arg);
420 420
421 ret = drm_ioctl(filp, cmd, arg); 421 ret = radeon_drm_ioctl(filp, cmd, arg);
422 422
423 return ret; 423 return ret;
424} 424}
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index cc9e8482cf30..ec6240b00469 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -32,6 +32,8 @@
32#include "radeon.h" 32#include "radeon.h"
33#include "atom.h" 33#include "atom.h"
34 34
35#include <linux/pm_runtime.h>
36
35#define RADEON_WAIT_IDLE_TIMEOUT 200 37#define RADEON_WAIT_IDLE_TIMEOUT 200
36 38
37/** 39/**
@@ -47,8 +49,12 @@ irqreturn_t radeon_driver_irq_handler_kms(DRM_IRQ_ARGS)
47{ 49{
48 struct drm_device *dev = (struct drm_device *) arg; 50 struct drm_device *dev = (struct drm_device *) arg;
49 struct radeon_device *rdev = dev->dev_private; 51 struct radeon_device *rdev = dev->dev_private;
52 irqreturn_t ret;
50 53
51 return radeon_irq_process(rdev); 54 ret = radeon_irq_process(rdev);
55 if (ret == IRQ_HANDLED)
56 pm_runtime_mark_last_busy(dev->dev);
57 return ret;
52} 58}
53 59
54/* 60/*
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index d6b36766e8c9..bb8710531a1b 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -32,7 +32,7 @@
32 32
33#include <linux/vga_switcheroo.h> 33#include <linux/vga_switcheroo.h>
34#include <linux/slab.h> 34#include <linux/slab.h>
35 35#include <linux/pm_runtime.h>
36/** 36/**
37 * radeon_driver_unload_kms - Main unload function for KMS. 37 * radeon_driver_unload_kms - Main unload function for KMS.
38 * 38 *
@@ -50,9 +50,14 @@ int radeon_driver_unload_kms(struct drm_device *dev)
50 50
51 if (rdev == NULL) 51 if (rdev == NULL)
52 return 0; 52 return 0;
53
53 if (rdev->rmmio == NULL) 54 if (rdev->rmmio == NULL)
54 goto done_free; 55 goto done_free;
56
57 pm_runtime_get_sync(dev->dev);
58
55 radeon_acpi_fini(rdev); 59 radeon_acpi_fini(rdev);
60
56 radeon_modeset_fini(rdev); 61 radeon_modeset_fini(rdev);
57 radeon_device_fini(rdev); 62 radeon_device_fini(rdev);
58 63
@@ -125,9 +130,20 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
125 "Error during ACPI methods call\n"); 130 "Error during ACPI methods call\n");
126 } 131 }
127 132
133 if (radeon_runtime_pm != 0) {
134 pm_runtime_use_autosuspend(dev->dev);
135 pm_runtime_set_autosuspend_delay(dev->dev, 5000);
136 pm_runtime_set_active(dev->dev);
137 pm_runtime_allow(dev->dev);
138 pm_runtime_mark_last_busy(dev->dev);
139 pm_runtime_put_autosuspend(dev->dev);
140 }
141
128out: 142out:
129 if (r) 143 if (r)
130 radeon_driver_unload_kms(dev); 144 radeon_driver_unload_kms(dev);
145
146
131 return r; 147 return r;
132} 148}
133 149
@@ -475,9 +491,14 @@ void radeon_driver_lastclose_kms(struct drm_device *dev)
475int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) 491int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
476{ 492{
477 struct radeon_device *rdev = dev->dev_private; 493 struct radeon_device *rdev = dev->dev_private;
494 int r;
478 495
479 file_priv->driver_priv = NULL; 496 file_priv->driver_priv = NULL;
480 497
498 r = pm_runtime_get_sync(dev->dev);
499 if (r < 0)
500 return r;
501
481 /* new gpu have virtual address space support */ 502 /* new gpu have virtual address space support */
482 if (rdev->family >= CHIP_CAYMAN) { 503 if (rdev->family >= CHIP_CAYMAN) {
483 struct radeon_fpriv *fpriv; 504 struct radeon_fpriv *fpriv;
@@ -506,6 +527,9 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
506 527
507 file_priv->driver_priv = fpriv; 528 file_priv->driver_priv = fpriv;
508 } 529 }
530
531 pm_runtime_mark_last_busy(dev->dev);
532 pm_runtime_put_autosuspend(dev->dev);
509 return 0; 533 return 0;
510} 534}
511 535
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index 7cb178a34a0f..0c7b8c66301b 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -1056,6 +1056,26 @@ static void radeon_crtc_commit(struct drm_crtc *crtc)
1056 } 1056 }
1057} 1057}
1058 1058
1059static void radeon_crtc_disable(struct drm_crtc *crtc)
1060{
1061 radeon_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
1062 if (crtc->fb) {
1063 int r;
1064 struct radeon_framebuffer *radeon_fb;
1065 struct radeon_bo *rbo;
1066
1067 radeon_fb = to_radeon_framebuffer(crtc->fb);
1068 rbo = gem_to_radeon_bo(radeon_fb->obj);
1069 r = radeon_bo_reserve(rbo, false);
1070 if (unlikely(r))
1071 DRM_ERROR("failed to reserve rbo before unpin\n");
1072 else {
1073 radeon_bo_unpin(rbo);
1074 radeon_bo_unreserve(rbo);
1075 }
1076 }
1077}
1078
1059static const struct drm_crtc_helper_funcs legacy_helper_funcs = { 1079static const struct drm_crtc_helper_funcs legacy_helper_funcs = {
1060 .dpms = radeon_crtc_dpms, 1080 .dpms = radeon_crtc_dpms,
1061 .mode_fixup = radeon_crtc_mode_fixup, 1081 .mode_fixup = radeon_crtc_mode_fixup,
@@ -1065,6 +1085,7 @@ static const struct drm_crtc_helper_funcs legacy_helper_funcs = {
1065 .prepare = radeon_crtc_prepare, 1085 .prepare = radeon_crtc_prepare,
1066 .commit = radeon_crtc_commit, 1086 .commit = radeon_crtc_commit,
1067 .load_lut = radeon_crtc_load_lut, 1087 .load_lut = radeon_crtc_load_lut,
1088 .disable = radeon_crtc_disable
1068}; 1089};
1069 1090
1070 1091
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index 62cd512f5c8d..c89971d904c3 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -392,7 +392,7 @@ void radeon_legacy_backlight_init(struct radeon_encoder *radeon_encoder,
392 props.type = BACKLIGHT_RAW; 392 props.type = BACKLIGHT_RAW;
393 snprintf(bl_name, sizeof(bl_name), 393 snprintf(bl_name, sizeof(bl_name),
394 "radeon_bl%d", dev->primary->index); 394 "radeon_bl%d", dev->primary->index);
395 bd = backlight_device_register(bl_name, &drm_connector->kdev, 395 bd = backlight_device_register(bl_name, drm_connector->kdev,
396 pdata, &radeon_backlight_ops, &props); 396 pdata, &radeon_backlight_ops, &props);
397 if (IS_ERR(bd)) { 397 if (IS_ERR(bd)) {
398 DRM_ERROR("Backlight registration failed\n"); 398 DRM_ERROR("Backlight registration failed\n");
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index ef63d3f00b2f..3f0dd664af90 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -249,6 +249,8 @@ struct radeon_mode_info {
249 struct drm_property *underscan_vborder_property; 249 struct drm_property *underscan_vborder_property;
250 /* audio */ 250 /* audio */
251 struct drm_property *audio_property; 251 struct drm_property *audio_property;
252 /* FMT dithering */
253 struct drm_property *dither_property;
252 /* hardcoded DFP edid from BIOS */ 254 /* hardcoded DFP edid from BIOS */
253 struct edid *bios_hardcoded_edid; 255 struct edid *bios_hardcoded_edid;
254 int bios_hardcoded_edid_size; 256 int bios_hardcoded_edid_size;
@@ -479,6 +481,11 @@ enum radeon_connector_audio {
479 RADEON_AUDIO_AUTO = 2 481 RADEON_AUDIO_AUTO = 2
480}; 482};
481 483
484enum radeon_connector_dither {
485 RADEON_FMT_DITHER_DISABLE = 0,
486 RADEON_FMT_DITHER_ENABLE = 1,
487};
488
482struct radeon_connector { 489struct radeon_connector {
483 struct drm_connector base; 490 struct drm_connector base;
484 uint32_t connector_id; 491 uint32_t connector_id;
@@ -498,6 +505,7 @@ struct radeon_connector {
498 struct radeon_router router; 505 struct radeon_router router;
499 struct radeon_i2c_chan *router_bus; 506 struct radeon_i2c_chan *router_bus;
500 enum radeon_connector_audio audio; 507 enum radeon_connector_audio audio;
508 enum radeon_connector_dither dither;
501}; 509};
502 510
503struct radeon_framebuffer { 511struct radeon_framebuffer {
@@ -758,7 +766,8 @@ extern int radeon_crtc_cursor_move(struct drm_crtc *crtc,
758 int x, int y); 766 int x, int y);
759 767
760extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc, 768extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc,
761 int *vpos, int *hpos); 769 int *vpos, int *hpos, ktime_t *stime,
770 ktime_t *etime);
762 771
763extern bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev); 772extern bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev);
764extern struct edid * 773extern struct edid *
@@ -850,6 +859,12 @@ void radeon_legacy_tv_mode_set(struct drm_encoder *encoder,
850 struct drm_display_mode *mode, 859 struct drm_display_mode *mode,
851 struct drm_display_mode *adjusted_mode); 860 struct drm_display_mode *adjusted_mode);
852 861
862/* fmt blocks */
863void avivo_program_fmt(struct drm_encoder *encoder);
864void dce3_program_fmt(struct drm_encoder *encoder);
865void dce4_program_fmt(struct drm_encoder *encoder);
866void dce8_program_fmt(struct drm_encoder *encoder);
867
853/* fbdev layer */ 868/* fbdev layer */
854int radeon_fbdev_init(struct radeon_device *rdev); 869int radeon_fbdev_init(struct radeon_device *rdev);
855void radeon_fbdev_fini(struct radeon_device *rdev); 870void radeon_fbdev_fini(struct radeon_device *rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 4f6b7fc7ad3c..866ace070b91 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -508,17 +508,21 @@ static ssize_t radeon_set_dpm_forced_performance_level(struct device *dev,
508 } else if (strncmp("auto", buf, strlen("auto")) == 0) { 508 } else if (strncmp("auto", buf, strlen("auto")) == 0) {
509 level = RADEON_DPM_FORCED_LEVEL_AUTO; 509 level = RADEON_DPM_FORCED_LEVEL_AUTO;
510 } else { 510 } else {
511 mutex_unlock(&rdev->pm.mutex);
512 count = -EINVAL; 511 count = -EINVAL;
513 goto fail; 512 goto fail;
514 } 513 }
515 if (rdev->asic->dpm.force_performance_level) { 514 if (rdev->asic->dpm.force_performance_level) {
515 if (rdev->pm.dpm.thermal_active) {
516 count = -EINVAL;
517 goto fail;
518 }
516 ret = radeon_dpm_force_performance_level(rdev, level); 519 ret = radeon_dpm_force_performance_level(rdev, level);
517 if (ret) 520 if (ret)
518 count = -EINVAL; 521 count = -EINVAL;
519 } 522 }
520 mutex_unlock(&rdev->pm.mutex);
521fail: 523fail:
524 mutex_unlock(&rdev->pm.mutex);
525
522 return count; 526 return count;
523} 527}
524 528
@@ -881,11 +885,12 @@ static void radeon_dpm_change_power_state_locked(struct radeon_device *rdev)
881 } 885 }
882 } 886 }
883 887
884 printk("switching from power state:\n"); 888 if (radeon_dpm == 1) {
885 radeon_dpm_print_power_state(rdev, rdev->pm.dpm.current_ps); 889 printk("switching from power state:\n");
886 printk("switching to power state:\n"); 890 radeon_dpm_print_power_state(rdev, rdev->pm.dpm.current_ps);
887 radeon_dpm_print_power_state(rdev, rdev->pm.dpm.requested_ps); 891 printk("switching to power state:\n");
888 892 radeon_dpm_print_power_state(rdev, rdev->pm.dpm.requested_ps);
893 }
889 mutex_lock(&rdev->ddev->struct_mutex); 894 mutex_lock(&rdev->ddev->struct_mutex);
890 down_write(&rdev->pm.mclk_lock); 895 down_write(&rdev->pm.mclk_lock);
891 mutex_lock(&rdev->ring_lock); 896 mutex_lock(&rdev->ring_lock);
@@ -918,12 +923,16 @@ static void radeon_dpm_change_power_state_locked(struct radeon_device *rdev)
918 radeon_dpm_post_set_power_state(rdev); 923 radeon_dpm_post_set_power_state(rdev);
919 924
920 if (rdev->asic->dpm.force_performance_level) { 925 if (rdev->asic->dpm.force_performance_level) {
921 if (rdev->pm.dpm.thermal_active) 926 if (rdev->pm.dpm.thermal_active) {
927 enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level;
922 /* force low perf level for thermal */ 928 /* force low perf level for thermal */
923 radeon_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_LOW); 929 radeon_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_LOW);
924 else 930 /* save the user's level */
925 /* otherwise, enable auto */ 931 rdev->pm.dpm.forced_level = level;
926 radeon_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_AUTO); 932 } else {
933 /* otherwise, user selected level */
934 radeon_dpm_force_performance_level(rdev, rdev->pm.dpm.forced_level);
935 }
927 } 936 }
928 937
929done: 938done:
@@ -1179,7 +1188,8 @@ static int radeon_pm_init_dpm(struct radeon_device *rdev)
1179 mutex_lock(&rdev->pm.mutex); 1188 mutex_lock(&rdev->pm.mutex);
1180 radeon_dpm_init(rdev); 1189 radeon_dpm_init(rdev);
1181 rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps = rdev->pm.dpm.boot_ps; 1190 rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps = rdev->pm.dpm.boot_ps;
1182 radeon_dpm_print_power_states(rdev); 1191 if (radeon_dpm == 1)
1192 radeon_dpm_print_power_states(rdev);
1183 radeon_dpm_setup_asic(rdev); 1193 radeon_dpm_setup_asic(rdev);
1184 ret = radeon_dpm_enable(rdev); 1194 ret = radeon_dpm_enable(rdev);
1185 mutex_unlock(&rdev->pm.mutex); 1195 mutex_unlock(&rdev->pm.mutex);
@@ -1241,6 +1251,24 @@ int radeon_pm_init(struct radeon_device *rdev)
1241 case CHIP_RV670: 1251 case CHIP_RV670:
1242 case CHIP_RS780: 1252 case CHIP_RS780:
1243 case CHIP_RS880: 1253 case CHIP_RS880:
1254 case CHIP_CAYMAN:
1255 case CHIP_ARUBA:
1256 case CHIP_BONAIRE:
1257 case CHIP_KABINI:
1258 case CHIP_KAVERI:
1259 case CHIP_HAWAII:
1260 /* DPM requires the RLC, RV770+ dGPU requires SMC */
1261 if (!rdev->rlc_fw)
1262 rdev->pm.pm_method = PM_METHOD_PROFILE;
1263 else if ((rdev->family >= CHIP_RV770) &&
1264 (!(rdev->flags & RADEON_IS_IGP)) &&
1265 (!rdev->smc_fw))
1266 rdev->pm.pm_method = PM_METHOD_PROFILE;
1267 else if (radeon_dpm == 1)
1268 rdev->pm.pm_method = PM_METHOD_DPM;
1269 else
1270 rdev->pm.pm_method = PM_METHOD_PROFILE;
1271 break;
1244 case CHIP_RV770: 1272 case CHIP_RV770:
1245 case CHIP_RV730: 1273 case CHIP_RV730:
1246 case CHIP_RV710: 1274 case CHIP_RV710:
@@ -1256,16 +1284,11 @@ int radeon_pm_init(struct radeon_device *rdev)
1256 case CHIP_BARTS: 1284 case CHIP_BARTS:
1257 case CHIP_TURKS: 1285 case CHIP_TURKS:
1258 case CHIP_CAICOS: 1286 case CHIP_CAICOS:
1259 case CHIP_CAYMAN:
1260 case CHIP_ARUBA:
1261 case CHIP_TAHITI: 1287 case CHIP_TAHITI:
1262 case CHIP_PITCAIRN: 1288 case CHIP_PITCAIRN:
1263 case CHIP_VERDE: 1289 case CHIP_VERDE:
1264 case CHIP_OLAND: 1290 case CHIP_OLAND:
1265 case CHIP_HAINAN: 1291 case CHIP_HAINAN:
1266 case CHIP_BONAIRE:
1267 case CHIP_KABINI:
1268 case CHIP_KAVERI:
1269 /* DPM requires the RLC, RV770+ dGPU requires SMC */ 1292 /* DPM requires the RLC, RV770+ dGPU requires SMC */
1270 if (!rdev->rlc_fw) 1293 if (!rdev->rlc_fw)
1271 rdev->pm.pm_method = PM_METHOD_PROFILE; 1294 rdev->pm.pm_method = PM_METHOD_PROFILE;
@@ -1273,10 +1296,10 @@ int radeon_pm_init(struct radeon_device *rdev)
1273 (!(rdev->flags & RADEON_IS_IGP)) && 1296 (!(rdev->flags & RADEON_IS_IGP)) &&
1274 (!rdev->smc_fw)) 1297 (!rdev->smc_fw))
1275 rdev->pm.pm_method = PM_METHOD_PROFILE; 1298 rdev->pm.pm_method = PM_METHOD_PROFILE;
1276 else if (radeon_dpm == 1) 1299 else if (radeon_dpm == 0)
1277 rdev->pm.pm_method = PM_METHOD_DPM;
1278 else
1279 rdev->pm.pm_method = PM_METHOD_PROFILE; 1300 rdev->pm.pm_method = PM_METHOD_PROFILE;
1301 else
1302 rdev->pm.pm_method = PM_METHOD_DPM;
1280 break; 1303 break;
1281 default: 1304 default:
1282 /* default to profile method */ 1305 /* default to profile method */
@@ -1468,7 +1491,7 @@ static bool radeon_pm_in_vbl(struct radeon_device *rdev)
1468 */ 1491 */
1469 for (crtc = 0; (crtc < rdev->num_crtc) && in_vbl; crtc++) { 1492 for (crtc = 0; (crtc < rdev->num_crtc) && in_vbl; crtc++) {
1470 if (rdev->pm.active_crtcs & (1 << crtc)) { 1493 if (rdev->pm.active_crtcs & (1 << crtc)) {
1471 vbl_status = radeon_get_crtc_scanoutpos(rdev->ddev, crtc, &vpos, &hpos); 1494 vbl_status = radeon_get_crtc_scanoutpos(rdev->ddev, crtc, &vpos, &hpos, NULL, NULL);
1472 if ((vbl_status & DRM_SCANOUTPOS_VALID) && 1495 if ((vbl_status & DRM_SCANOUTPOS_VALID) &&
1473 !(vbl_status & DRM_SCANOUTPOS_INVBL)) 1496 !(vbl_status & DRM_SCANOUTPOS_INVBL))
1474 in_vbl = false; 1497 in_vbl = false;
diff --git a/drivers/gpu/drm/radeon/radeon_trace.h b/drivers/gpu/drm/radeon/radeon_trace.h
index f7e367815964..811bca691b36 100644
--- a/drivers/gpu/drm/radeon/radeon_trace.h
+++ b/drivers/gpu/drm/radeon/radeon_trace.h
@@ -47,6 +47,30 @@ TRACE_EVENT(radeon_cs,
47 __entry->fences) 47 __entry->fences)
48); 48);
49 49
50TRACE_EVENT(radeon_vm_set_page,
51 TP_PROTO(uint64_t pe, uint64_t addr, unsigned count,
52 uint32_t incr, uint32_t flags),
53 TP_ARGS(pe, addr, count, incr, flags),
54 TP_STRUCT__entry(
55 __field(u64, pe)
56 __field(u64, addr)
57 __field(u32, count)
58 __field(u32, incr)
59 __field(u32, flags)
60 ),
61
62 TP_fast_assign(
63 __entry->pe = pe;
64 __entry->addr = addr;
65 __entry->count = count;
66 __entry->incr = incr;
67 __entry->flags = flags;
68 ),
69 TP_printk("pe=%010Lx, addr=%010Lx, incr=%u, flags=%08x, count=%u",
70 __entry->pe, __entry->addr, __entry->incr,
71 __entry->flags, __entry->count)
72);
73
50DECLARE_EVENT_CLASS(radeon_fence_request, 74DECLARE_EVENT_CLASS(radeon_fence_request,
51 75
52 TP_PROTO(struct drm_device *dev, u32 seqno), 76 TP_PROTO(struct drm_device *dev, u32 seqno),
diff --git a/drivers/gpu/drm/radeon/radeon_ucode.h b/drivers/gpu/drm/radeon/radeon_ucode.h
index 33858364fe89..a77cd274dfc3 100644
--- a/drivers/gpu/drm/radeon/radeon_ucode.h
+++ b/drivers/gpu/drm/radeon/radeon_ucode.h
@@ -59,6 +59,7 @@
59#define SI_MC_UCODE_SIZE 7769 59#define SI_MC_UCODE_SIZE 7769
60#define OLAND_MC_UCODE_SIZE 7863 60#define OLAND_MC_UCODE_SIZE 7863
61#define CIK_MC_UCODE_SIZE 7866 61#define CIK_MC_UCODE_SIZE 7866
62#define HAWAII_MC_UCODE_SIZE 7933
62 63
63/* SDMA */ 64/* SDMA */
64#define CIK_SDMA_UCODE_SIZE 1050 65#define CIK_SDMA_UCODE_SIZE 1050
@@ -143,4 +144,7 @@
143#define BONAIRE_SMC_UCODE_START 0x20000 144#define BONAIRE_SMC_UCODE_START 0x20000
144#define BONAIRE_SMC_UCODE_SIZE 0x1FDEC 145#define BONAIRE_SMC_UCODE_SIZE 0x1FDEC
145 146
147#define HAWAII_SMC_UCODE_START 0x20000
148#define HAWAII_SMC_UCODE_SIZE 0x1FDEC
149
146#endif 150#endif
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index 308eff5be1b4..373d088bac66 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -97,6 +97,7 @@ int radeon_uvd_init(struct radeon_device *rdev)
97 case CHIP_BONAIRE: 97 case CHIP_BONAIRE:
98 case CHIP_KABINI: 98 case CHIP_KABINI:
99 case CHIP_KAVERI: 99 case CHIP_KAVERI:
100 case CHIP_HAWAII:
100 fw_name = FIRMWARE_BONAIRE; 101 fw_name = FIRMWARE_BONAIRE;
101 break; 102 break;
102 103
@@ -240,6 +241,8 @@ void radeon_uvd_free_handles(struct radeon_device *rdev, struct drm_file *filp)
240 if (handle != 0 && rdev->uvd.filp[i] == filp) { 241 if (handle != 0 && rdev->uvd.filp[i] == filp) {
241 struct radeon_fence *fence; 242 struct radeon_fence *fence;
242 243
244 radeon_uvd_note_usage(rdev);
245
243 r = radeon_uvd_get_destroy_msg(rdev, 246 r = radeon_uvd_get_destroy_msg(rdev,
244 R600_RING_TYPE_UVD_INDEX, handle, &fence); 247 R600_RING_TYPE_UVD_INDEX, handle, &fence);
245 if (r) { 248 if (r) {
@@ -620,7 +623,7 @@ static int radeon_uvd_send_msg(struct radeon_device *rdev,
620 if (r) 623 if (r)
621 goto err; 624 goto err;
622 625
623 r = radeon_ib_get(rdev, ring, &ib, NULL, 16); 626 r = radeon_ib_get(rdev, ring, &ib, NULL, 64);
624 if (r) 627 if (r)
625 goto err; 628 goto err;
626 629
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 6acba8017b9a..76cc8d3aafec 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -153,6 +153,70 @@ u32 rs600_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
153 return RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING; 153 return RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING;
154} 154}
155 155
156void avivo_program_fmt(struct drm_encoder *encoder)
157{
158 struct drm_device *dev = encoder->dev;
159 struct radeon_device *rdev = dev->dev_private;
160 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
161 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
162 int bpc = 0;
163 u32 tmp = 0;
164 enum radeon_connector_dither dither = RADEON_FMT_DITHER_DISABLE;
165
166 if (connector) {
167 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
168 bpc = radeon_get_monitor_bpc(connector);
169 dither = radeon_connector->dither;
170 }
171
172 /* LVDS FMT is set up by atom */
173 if (radeon_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
174 return;
175
176 if (bpc == 0)
177 return;
178
179 switch (bpc) {
180 case 6:
181 if (dither == RADEON_FMT_DITHER_ENABLE)
182 /* XXX sort out optimal dither settings */
183 tmp |= AVIVO_TMDS_BIT_DEPTH_CONTROL_SPATIAL_DITHER_EN;
184 else
185 tmp |= AVIVO_TMDS_BIT_DEPTH_CONTROL_TRUNCATE_EN;
186 break;
187 case 8:
188 if (dither == RADEON_FMT_DITHER_ENABLE)
189 /* XXX sort out optimal dither settings */
190 tmp |= (AVIVO_TMDS_BIT_DEPTH_CONTROL_SPATIAL_DITHER_EN |
191 AVIVO_TMDS_BIT_DEPTH_CONTROL_SPATIAL_DITHER_DEPTH);
192 else
193 tmp |= (AVIVO_TMDS_BIT_DEPTH_CONTROL_TRUNCATE_EN |
194 AVIVO_TMDS_BIT_DEPTH_CONTROL_TRUNCATE_DEPTH);
195 break;
196 case 10:
197 default:
198 /* not needed */
199 break;
200 }
201
202 switch (radeon_encoder->encoder_id) {
203 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
204 WREG32(AVIVO_TMDSA_BIT_DEPTH_CONTROL, tmp);
205 break;
206 case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
207 WREG32(AVIVO_LVTMA_BIT_DEPTH_CONTROL, tmp);
208 break;
209 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
210 WREG32(AVIVO_DVOA_BIT_DEPTH_CONTROL, tmp);
211 break;
212 case ENCODER_OBJECT_ID_INTERNAL_DDI:
213 WREG32(AVIVO_DDIA_BIT_DEPTH_CONTROL, tmp);
214 break;
215 default:
216 break;
217 }
218}
219
156void rs600_pm_misc(struct radeon_device *rdev) 220void rs600_pm_misc(struct radeon_device *rdev)
157{ 221{
158 int requested_index = rdev->pm.requested_power_state_index; 222 int requested_index = rdev->pm.requested_power_state_index;
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index 1447d794c22a..1c560629575a 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -345,9 +345,11 @@ static void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
345 if (max_bandwidth.full > rdev->pm.sideport_bandwidth.full && 345 if (max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
346 rdev->pm.sideport_bandwidth.full) 346 rdev->pm.sideport_bandwidth.full)
347 max_bandwidth = rdev->pm.sideport_bandwidth; 347 max_bandwidth = rdev->pm.sideport_bandwidth;
348 read_delay_latency.full = dfixed_const(370 * 800 * 1000); 348 read_delay_latency.full = dfixed_const(370 * 800);
349 read_delay_latency.full = dfixed_div(read_delay_latency, 349 a.full = dfixed_const(1000);
350 rdev->pm.igp_sideport_mclk); 350 b.full = dfixed_div(rdev->pm.igp_sideport_mclk, a);
351 read_delay_latency.full = dfixed_div(read_delay_latency, b);
352 read_delay_latency.full = dfixed_mul(read_delay_latency, a);
351 } else { 353 } else {
352 if (max_bandwidth.full > rdev->pm.k8_bandwidth.full && 354 if (max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
353 rdev->pm.k8_bandwidth.full) 355 rdev->pm.k8_bandwidth.full)
@@ -488,14 +490,10 @@ static void rs690_compute_mode_priority(struct radeon_device *rdev,
488 } 490 }
489 if (wm0->priority_mark.full > priority_mark02.full) 491 if (wm0->priority_mark.full > priority_mark02.full)
490 priority_mark02.full = wm0->priority_mark.full; 492 priority_mark02.full = wm0->priority_mark.full;
491 if (dfixed_trunc(priority_mark02) < 0)
492 priority_mark02.full = 0;
493 if (wm0->priority_mark_max.full > priority_mark02.full) 493 if (wm0->priority_mark_max.full > priority_mark02.full)
494 priority_mark02.full = wm0->priority_mark_max.full; 494 priority_mark02.full = wm0->priority_mark_max.full;
495 if (wm1->priority_mark.full > priority_mark12.full) 495 if (wm1->priority_mark.full > priority_mark12.full)
496 priority_mark12.full = wm1->priority_mark.full; 496 priority_mark12.full = wm1->priority_mark.full;
497 if (dfixed_trunc(priority_mark12) < 0)
498 priority_mark12.full = 0;
499 if (wm1->priority_mark_max.full > priority_mark12.full) 497 if (wm1->priority_mark_max.full > priority_mark12.full)
500 priority_mark12.full = wm1->priority_mark_max.full; 498 priority_mark12.full = wm1->priority_mark_max.full;
501 *d1mode_priority_a_cnt = dfixed_trunc(priority_mark02); 499 *d1mode_priority_a_cnt = dfixed_trunc(priority_mark02);
@@ -526,8 +524,6 @@ static void rs690_compute_mode_priority(struct radeon_device *rdev,
526 } 524 }
527 if (wm0->priority_mark.full > priority_mark02.full) 525 if (wm0->priority_mark.full > priority_mark02.full)
528 priority_mark02.full = wm0->priority_mark.full; 526 priority_mark02.full = wm0->priority_mark.full;
529 if (dfixed_trunc(priority_mark02) < 0)
530 priority_mark02.full = 0;
531 if (wm0->priority_mark_max.full > priority_mark02.full) 527 if (wm0->priority_mark_max.full > priority_mark02.full)
532 priority_mark02.full = wm0->priority_mark_max.full; 528 priority_mark02.full = wm0->priority_mark_max.full;
533 *d1mode_priority_a_cnt = dfixed_trunc(priority_mark02); 529 *d1mode_priority_a_cnt = dfixed_trunc(priority_mark02);
@@ -555,8 +551,6 @@ static void rs690_compute_mode_priority(struct radeon_device *rdev,
555 } 551 }
556 if (wm1->priority_mark.full > priority_mark12.full) 552 if (wm1->priority_mark.full > priority_mark12.full)
557 priority_mark12.full = wm1->priority_mark.full; 553 priority_mark12.full = wm1->priority_mark.full;
558 if (dfixed_trunc(priority_mark12) < 0)
559 priority_mark12.full = 0;
560 if (wm1->priority_mark_max.full > priority_mark12.full) 554 if (wm1->priority_mark_max.full > priority_mark12.full)
561 priority_mark12.full = wm1->priority_mark_max.full; 555 priority_mark12.full = wm1->priority_mark_max.full;
562 *d2mode_priority_a_cnt = dfixed_trunc(priority_mark12); 556 *d2mode_priority_a_cnt = dfixed_trunc(priority_mark12);
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index 873eb4b193b4..5d1c316115ef 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -1155,14 +1155,10 @@ static void rv515_compute_mode_priority(struct radeon_device *rdev,
1155 } 1155 }
1156 if (wm0->priority_mark.full > priority_mark02.full) 1156 if (wm0->priority_mark.full > priority_mark02.full)
1157 priority_mark02.full = wm0->priority_mark.full; 1157 priority_mark02.full = wm0->priority_mark.full;
1158 if (dfixed_trunc(priority_mark02) < 0)
1159 priority_mark02.full = 0;
1160 if (wm0->priority_mark_max.full > priority_mark02.full) 1158 if (wm0->priority_mark_max.full > priority_mark02.full)
1161 priority_mark02.full = wm0->priority_mark_max.full; 1159 priority_mark02.full = wm0->priority_mark_max.full;
1162 if (wm1->priority_mark.full > priority_mark12.full) 1160 if (wm1->priority_mark.full > priority_mark12.full)
1163 priority_mark12.full = wm1->priority_mark.full; 1161 priority_mark12.full = wm1->priority_mark.full;
1164 if (dfixed_trunc(priority_mark12) < 0)
1165 priority_mark12.full = 0;
1166 if (wm1->priority_mark_max.full > priority_mark12.full) 1162 if (wm1->priority_mark_max.full > priority_mark12.full)
1167 priority_mark12.full = wm1->priority_mark_max.full; 1163 priority_mark12.full = wm1->priority_mark_max.full;
1168 *d1mode_priority_a_cnt = dfixed_trunc(priority_mark02); 1164 *d1mode_priority_a_cnt = dfixed_trunc(priority_mark02);
@@ -1193,8 +1189,6 @@ static void rv515_compute_mode_priority(struct radeon_device *rdev,
1193 } 1189 }
1194 if (wm0->priority_mark.full > priority_mark02.full) 1190 if (wm0->priority_mark.full > priority_mark02.full)
1195 priority_mark02.full = wm0->priority_mark.full; 1191 priority_mark02.full = wm0->priority_mark.full;
1196 if (dfixed_trunc(priority_mark02) < 0)
1197 priority_mark02.full = 0;
1198 if (wm0->priority_mark_max.full > priority_mark02.full) 1192 if (wm0->priority_mark_max.full > priority_mark02.full)
1199 priority_mark02.full = wm0->priority_mark_max.full; 1193 priority_mark02.full = wm0->priority_mark_max.full;
1200 *d1mode_priority_a_cnt = dfixed_trunc(priority_mark02); 1194 *d1mode_priority_a_cnt = dfixed_trunc(priority_mark02);
@@ -1222,8 +1216,6 @@ static void rv515_compute_mode_priority(struct radeon_device *rdev,
1222 } 1216 }
1223 if (wm1->priority_mark.full > priority_mark12.full) 1217 if (wm1->priority_mark.full > priority_mark12.full)
1224 priority_mark12.full = wm1->priority_mark.full; 1218 priority_mark12.full = wm1->priority_mark.full;
1225 if (dfixed_trunc(priority_mark12) < 0)
1226 priority_mark12.full = 0;
1227 if (wm1->priority_mark_max.full > priority_mark12.full) 1219 if (wm1->priority_mark_max.full > priority_mark12.full)
1228 priority_mark12.full = wm1->priority_mark_max.full; 1220 priority_mark12.full = wm1->priority_mark_max.full;
1229 *d2mode_priority_a_cnt = dfixed_trunc(priority_mark12); 1221 *d2mode_priority_a_cnt = dfixed_trunc(priority_mark12);
diff --git a/drivers/gpu/drm/radeon/rv6xx_dpm.c b/drivers/gpu/drm/radeon/rv6xx_dpm.c
index 5811d277a36a..26633a025252 100644
--- a/drivers/gpu/drm/radeon/rv6xx_dpm.c
+++ b/drivers/gpu/drm/radeon/rv6xx_dpm.c
@@ -407,9 +407,9 @@ static void rv6xx_enable_engine_feedback_and_reference_sync(struct radeon_device
407 WREG32_P(SPLL_CNTL_MODE, SPLL_DIV_SYNC, ~SPLL_DIV_SYNC); 407 WREG32_P(SPLL_CNTL_MODE, SPLL_DIV_SYNC, ~SPLL_DIV_SYNC);
408} 408}
409 409
410static u64 rv6xx_clocks_per_unit(u32 unit) 410static u32 rv6xx_clocks_per_unit(u32 unit)
411{ 411{
412 u64 tmp = 1 << (2 * unit); 412 u32 tmp = 1 << (2 * unit);
413 413
414 return tmp; 414 return tmp;
415} 415}
@@ -417,7 +417,7 @@ static u64 rv6xx_clocks_per_unit(u32 unit)
417static u32 rv6xx_scale_count_given_unit(struct radeon_device *rdev, 417static u32 rv6xx_scale_count_given_unit(struct radeon_device *rdev,
418 u32 unscaled_count, u32 unit) 418 u32 unscaled_count, u32 unit)
419{ 419{
420 u32 count_per_unit = (u32)rv6xx_clocks_per_unit(unit); 420 u32 count_per_unit = rv6xx_clocks_per_unit(unit);
421 421
422 return (unscaled_count + count_per_unit - 1) / count_per_unit; 422 return (unscaled_count + count_per_unit - 1) / count_per_unit;
423} 423}
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index d96f7cbca0a1..6a64ccaa0695 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -78,11 +78,6 @@ extern void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_
78extern u32 evergreen_get_number_of_dram_channels(struct radeon_device *rdev); 78extern u32 evergreen_get_number_of_dram_channels(struct radeon_device *rdev);
79extern void evergreen_print_gpu_status_regs(struct radeon_device *rdev); 79extern void evergreen_print_gpu_status_regs(struct radeon_device *rdev);
80extern bool evergreen_is_display_hung(struct radeon_device *rdev); 80extern bool evergreen_is_display_hung(struct radeon_device *rdev);
81extern void si_dma_vm_set_page(struct radeon_device *rdev,
82 struct radeon_ib *ib,
83 uint64_t pe,
84 uint64_t addr, unsigned count,
85 uint32_t incr, uint32_t flags);
86static void si_enable_gui_idle_interrupt(struct radeon_device *rdev, 81static void si_enable_gui_idle_interrupt(struct radeon_device *rdev,
87 bool enable); 82 bool enable);
88static void si_fini_pg(struct radeon_device *rdev); 83static void si_fini_pg(struct radeon_device *rdev);
@@ -4673,61 +4668,6 @@ static void si_vm_decode_fault(struct radeon_device *rdev,
4673 block, mc_id); 4668 block, mc_id);
4674} 4669}
4675 4670
4676/**
4677 * si_vm_set_page - update the page tables using the CP
4678 *
4679 * @rdev: radeon_device pointer
4680 * @ib: indirect buffer to fill with commands
4681 * @pe: addr of the page entry
4682 * @addr: dst addr to write into pe
4683 * @count: number of page entries to update
4684 * @incr: increase next addr by incr bytes
4685 * @flags: access flags
4686 *
4687 * Update the page tables using the CP (SI).
4688 */
4689void si_vm_set_page(struct radeon_device *rdev,
4690 struct radeon_ib *ib,
4691 uint64_t pe,
4692 uint64_t addr, unsigned count,
4693 uint32_t incr, uint32_t flags)
4694{
4695 uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
4696 uint64_t value;
4697 unsigned ndw;
4698
4699 if (rdev->asic->vm.pt_ring_index == RADEON_RING_TYPE_GFX_INDEX) {
4700 while (count) {
4701 ndw = 2 + count * 2;
4702 if (ndw > 0x3FFE)
4703 ndw = 0x3FFE;
4704
4705 ib->ptr[ib->length_dw++] = PACKET3(PACKET3_WRITE_DATA, ndw);
4706 ib->ptr[ib->length_dw++] = (WRITE_DATA_ENGINE_SEL(0) |
4707 WRITE_DATA_DST_SEL(1));
4708 ib->ptr[ib->length_dw++] = pe;
4709 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
4710 for (; ndw > 2; ndw -= 2, --count, pe += 8) {
4711 if (flags & RADEON_VM_PAGE_SYSTEM) {
4712 value = radeon_vm_map_gart(rdev, addr);
4713 value &= 0xFFFFFFFFFFFFF000ULL;
4714 } else if (flags & RADEON_VM_PAGE_VALID) {
4715 value = addr;
4716 } else {
4717 value = 0;
4718 }
4719 addr += incr;
4720 value |= r600_flags;
4721 ib->ptr[ib->length_dw++] = value;
4722 ib->ptr[ib->length_dw++] = upper_32_bits(value);
4723 }
4724 }
4725 } else {
4726 /* DMA */
4727 si_dma_vm_set_page(rdev, ib, pe, addr, count, incr, flags);
4728 }
4729}
4730
4731void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm) 4671void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
4732{ 4672{
4733 struct radeon_ring *ring = &rdev->ring[ridx]; 4673 struct radeon_ring *ring = &rdev->ring[ridx];
@@ -5372,52 +5312,53 @@ void si_get_csb_buffer(struct radeon_device *rdev, volatile u32 *buffer)
5372 if (buffer == NULL) 5312 if (buffer == NULL)
5373 return; 5313 return;
5374 5314
5375 buffer[count++] = PACKET3(PACKET3_PREAMBLE_CNTL, 0); 5315 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
5376 buffer[count++] = PACKET3_PREAMBLE_BEGIN_CLEAR_STATE; 5316 buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
5377 5317
5378 buffer[count++] = PACKET3(PACKET3_CONTEXT_CONTROL, 1); 5318 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
5379 buffer[count++] = 0x80000000; 5319 buffer[count++] = cpu_to_le32(0x80000000);
5380 buffer[count++] = 0x80000000; 5320 buffer[count++] = cpu_to_le32(0x80000000);
5381 5321
5382 for (sect = rdev->rlc.cs_data; sect->section != NULL; ++sect) { 5322 for (sect = rdev->rlc.cs_data; sect->section != NULL; ++sect) {
5383 for (ext = sect->section; ext->extent != NULL; ++ext) { 5323 for (ext = sect->section; ext->extent != NULL; ++ext) {
5384 if (sect->id == SECT_CONTEXT) { 5324 if (sect->id == SECT_CONTEXT) {
5385 buffer[count++] = PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count); 5325 buffer[count++] =
5386 buffer[count++] = ext->reg_index - 0xa000; 5326 cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
5327 buffer[count++] = cpu_to_le32(ext->reg_index - 0xa000);
5387 for (i = 0; i < ext->reg_count; i++) 5328 for (i = 0; i < ext->reg_count; i++)
5388 buffer[count++] = ext->extent[i]; 5329 buffer[count++] = cpu_to_le32(ext->extent[i]);
5389 } else { 5330 } else {
5390 return; 5331 return;
5391 } 5332 }
5392 } 5333 }
5393 } 5334 }
5394 5335
5395 buffer[count++] = PACKET3(PACKET3_SET_CONTEXT_REG, 1); 5336 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 1));
5396 buffer[count++] = PA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START; 5337 buffer[count++] = cpu_to_le32(PA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START);
5397 switch (rdev->family) { 5338 switch (rdev->family) {
5398 case CHIP_TAHITI: 5339 case CHIP_TAHITI:
5399 case CHIP_PITCAIRN: 5340 case CHIP_PITCAIRN:
5400 buffer[count++] = 0x2a00126a; 5341 buffer[count++] = cpu_to_le32(0x2a00126a);
5401 break; 5342 break;
5402 case CHIP_VERDE: 5343 case CHIP_VERDE:
5403 buffer[count++] = 0x0000124a; 5344 buffer[count++] = cpu_to_le32(0x0000124a);
5404 break; 5345 break;
5405 case CHIP_OLAND: 5346 case CHIP_OLAND:
5406 buffer[count++] = 0x00000082; 5347 buffer[count++] = cpu_to_le32(0x00000082);
5407 break; 5348 break;
5408 case CHIP_HAINAN: 5349 case CHIP_HAINAN:
5409 buffer[count++] = 0x00000000; 5350 buffer[count++] = cpu_to_le32(0x00000000);
5410 break; 5351 break;
5411 default: 5352 default:
5412 buffer[count++] = 0x00000000; 5353 buffer[count++] = cpu_to_le32(0x00000000);
5413 break; 5354 break;
5414 } 5355 }
5415 5356
5416 buffer[count++] = PACKET3(PACKET3_PREAMBLE_CNTL, 0); 5357 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
5417 buffer[count++] = PACKET3_PREAMBLE_END_CLEAR_STATE; 5358 buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
5418 5359
5419 buffer[count++] = PACKET3(PACKET3_CLEAR_STATE, 0); 5360 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
5420 buffer[count++] = 0; 5361 buffer[count++] = cpu_to_le32(0);
5421} 5362}
5422 5363
5423static void si_init_pg(struct radeon_device *rdev) 5364static void si_init_pg(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/si_dma.c b/drivers/gpu/drm/radeon/si_dma.c
index 49909d23dfce..8e8f46133532 100644
--- a/drivers/gpu/drm/radeon/si_dma.c
+++ b/drivers/gpu/drm/radeon/si_dma.c
@@ -24,6 +24,7 @@
24#include <drm/drmP.h> 24#include <drm/drmP.h>
25#include "radeon.h" 25#include "radeon.h"
26#include "radeon_asic.h" 26#include "radeon_asic.h"
27#include "radeon_trace.h"
27#include "sid.h" 28#include "sid.h"
28 29
29u32 si_gpu_check_soft_reset(struct radeon_device *rdev); 30u32 si_gpu_check_soft_reset(struct radeon_device *rdev);
@@ -75,11 +76,12 @@ void si_dma_vm_set_page(struct radeon_device *rdev,
75 uint64_t addr, unsigned count, 76 uint64_t addr, unsigned count,
76 uint32_t incr, uint32_t flags) 77 uint32_t incr, uint32_t flags)
77{ 78{
78 uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
79 uint64_t value; 79 uint64_t value;
80 unsigned ndw; 80 unsigned ndw;
81 81
82 if (flags & RADEON_VM_PAGE_SYSTEM) { 82 trace_radeon_vm_set_page(pe, addr, count, incr, flags);
83
84 if (flags & R600_PTE_SYSTEM) {
83 while (count) { 85 while (count) {
84 ndw = count * 2; 86 ndw = count * 2;
85 if (ndw > 0xFFFFE) 87 if (ndw > 0xFFFFE)
@@ -90,16 +92,10 @@ void si_dma_vm_set_page(struct radeon_device *rdev,
90 ib->ptr[ib->length_dw++] = pe; 92 ib->ptr[ib->length_dw++] = pe;
91 ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff; 93 ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
92 for (; ndw > 0; ndw -= 2, --count, pe += 8) { 94 for (; ndw > 0; ndw -= 2, --count, pe += 8) {
93 if (flags & RADEON_VM_PAGE_SYSTEM) { 95 value = radeon_vm_map_gart(rdev, addr);
94 value = radeon_vm_map_gart(rdev, addr); 96 value &= 0xFFFFFFFFFFFFF000ULL;
95 value &= 0xFFFFFFFFFFFFF000ULL;
96 } else if (flags & RADEON_VM_PAGE_VALID) {
97 value = addr;
98 } else {
99 value = 0;
100 }
101 addr += incr; 97 addr += incr;
102 value |= r600_flags; 98 value |= flags;
103 ib->ptr[ib->length_dw++] = value; 99 ib->ptr[ib->length_dw++] = value;
104 ib->ptr[ib->length_dw++] = upper_32_bits(value); 100 ib->ptr[ib->length_dw++] = upper_32_bits(value);
105 } 101 }
@@ -110,7 +106,7 @@ void si_dma_vm_set_page(struct radeon_device *rdev,
110 if (ndw > 0xFFFFE) 106 if (ndw > 0xFFFFE)
111 ndw = 0xFFFFE; 107 ndw = 0xFFFFE;
112 108
113 if (flags & RADEON_VM_PAGE_VALID) 109 if (flags & R600_PTE_VALID)
114 value = addr; 110 value = addr;
115 else 111 else
116 value = 0; 112 value = 0;
@@ -118,7 +114,7 @@ void si_dma_vm_set_page(struct radeon_device *rdev,
118 ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw); 114 ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
119 ib->ptr[ib->length_dw++] = pe; /* dst addr */ 115 ib->ptr[ib->length_dw++] = pe; /* dst addr */
120 ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff; 116 ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
121 ib->ptr[ib->length_dw++] = r600_flags; /* mask */ 117 ib->ptr[ib->length_dw++] = flags; /* mask */
122 ib->ptr[ib->length_dw++] = 0; 118 ib->ptr[ib->length_dw++] = 0;
123 ib->ptr[ib->length_dw++] = value; /* value */ 119 ib->ptr[ib->length_dw++] = value; /* value */
124 ib->ptr[ib->length_dw++] = upper_32_bits(value); 120 ib->ptr[ib->length_dw++] = upper_32_bits(value);
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index 2332aa1bf93c..0b00c790fb77 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -3589,7 +3589,12 @@ static void si_program_display_gap(struct radeon_device *rdev)
3589 WREG32(DCCG_DISP_SLOW_SELECT_REG, tmp); 3589 WREG32(DCCG_DISP_SLOW_SELECT_REG, tmp);
3590 } 3590 }
3591 3591
3592 si_notify_smc_display_change(rdev, rdev->pm.dpm.new_active_crtc_count > 0); 3592 /* Setting this to false forces the performance state to low if the crtcs are disabled.
3593 * This can be a problem on PowerXpress systems or if you want to use the card
3594 * for offscreen rendering or compute if there are no crtcs enabled. Set it to
3595 * true for now so that performance scales even if the displays are off.
3596 */
3597 si_notify_smc_display_change(rdev, true /*rdev->pm.dpm.new_active_crtc_count > 0*/);
3593} 3598}
3594 3599
3595static void si_enable_spread_spectrum(struct radeon_device *rdev, bool enable) 3600static void si_enable_spread_spectrum(struct radeon_device *rdev, bool enable)
@@ -4553,7 +4558,7 @@ static int si_init_smc_table(struct radeon_device *rdev)
4553 table->systemFlags |= PPSMC_SYSTEMFLAG_GDDR5; 4558 table->systemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
4554 4559
4555 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REVERT_GPIO5_POLARITY) 4560 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REVERT_GPIO5_POLARITY)
4556 table->systemFlags |= PPSMC_EXTRAFLAGS_AC2DC_GPIO5_POLARITY_HIGH; 4561 table->extraFlags |= PPSMC_EXTRAFLAGS_AC2DC_GPIO5_POLARITY_HIGH;
4557 4562
4558 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_VRHOT_GPIO_CONFIGURABLE) { 4563 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_VRHOT_GPIO_CONFIGURABLE) {
4559 table->systemFlags |= PPSMC_SYSTEMFLAG_REGULATOR_HOT_PROG_GPIO; 4564 table->systemFlags |= PPSMC_SYSTEMFLAG_REGULATOR_HOT_PROG_GPIO;
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
index 7e2e0ea66a00..b322acc48097 100644
--- a/drivers/gpu/drm/radeon/sid.h
+++ b/drivers/gpu/drm/radeon/sid.h
@@ -478,7 +478,7 @@
478#define STATE3_MASK (0x1f << 15) 478#define STATE3_MASK (0x1f << 15)
479#define STATE3_SHIFT 15 479#define STATE3_SHIFT 15
480 480
481#define MC_SEQ_TRAIN_WAKEUP_CNTL 0x2808 481#define MC_SEQ_TRAIN_WAKEUP_CNTL 0x28e8
482#define TRAIN_DONE_D0 (1 << 30) 482#define TRAIN_DONE_D0 (1 << 30)
483#define TRAIN_DONE_D1 (1 << 31) 483#define TRAIN_DONE_D1 (1 << 31)
484 484
@@ -683,6 +683,51 @@
683 * bit5 = 176.4 kHz 683 * bit5 = 176.4 kHz
684 * bit6 = 192 kHz 684 * bit6 = 192 kHz
685 */ 685 */
686
687#define AZ_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC 0x37
688# define VIDEO_LIPSYNC(x) (((x) & 0xff) << 0)
689# define AUDIO_LIPSYNC(x) (((x) & 0xff) << 8)
690/* VIDEO_LIPSYNC, AUDIO_LIPSYNC
691 * 0 = invalid
692 * x = legal delay value
693 * 255 = sync not supported
694 */
695#define AZ_F0_CODEC_PIN_CONTROL_RESPONSE_HBR 0x38
696# define HBR_CAPABLE (1 << 0) /* enabled by default */
697
698#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO0 0x3a
699# define MANUFACTURER_ID(x) (((x) & 0xffff) << 0)
700# define PRODUCT_ID(x) (((x) & 0xffff) << 16)
701#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO1 0x3b
702# define SINK_DESCRIPTION_LEN(x) (((x) & 0xff) << 0)
703#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO2 0x3c
704# define PORT_ID0(x) (((x) & 0xffffffff) << 0)
705#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO3 0x3d
706# define PORT_ID1(x) (((x) & 0xffffffff) << 0)
707#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO4 0x3e
708# define DESCRIPTION0(x) (((x) & 0xff) << 0)
709# define DESCRIPTION1(x) (((x) & 0xff) << 8)
710# define DESCRIPTION2(x) (((x) & 0xff) << 16)
711# define DESCRIPTION3(x) (((x) & 0xff) << 24)
712#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO5 0x3f
713# define DESCRIPTION4(x) (((x) & 0xff) << 0)
714# define DESCRIPTION5(x) (((x) & 0xff) << 8)
715# define DESCRIPTION6(x) (((x) & 0xff) << 16)
716# define DESCRIPTION7(x) (((x) & 0xff) << 24)
717#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO6 0x40
718# define DESCRIPTION8(x) (((x) & 0xff) << 0)
719# define DESCRIPTION9(x) (((x) & 0xff) << 8)
720# define DESCRIPTION10(x) (((x) & 0xff) << 16)
721# define DESCRIPTION11(x) (((x) & 0xff) << 24)
722#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO7 0x41
723# define DESCRIPTION12(x) (((x) & 0xff) << 0)
724# define DESCRIPTION13(x) (((x) & 0xff) << 8)
725# define DESCRIPTION14(x) (((x) & 0xff) << 16)
726# define DESCRIPTION15(x) (((x) & 0xff) << 24)
727#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO8 0x42
728# define DESCRIPTION16(x) (((x) & 0xff) << 0)
729# define DESCRIPTION17(x) (((x) & 0xff) << 8)
730
686#define AZ_F0_CODEC_PIN_CONTROL_HOTPLUG_CONTROL 0x54 731#define AZ_F0_CODEC_PIN_CONTROL_HOTPLUG_CONTROL 0x54
687# define AUDIO_ENABLED (1 << 31) 732# define AUDIO_ENABLED (1 << 31)
688 733
diff --git a/drivers/gpu/drm/shmobile/Kconfig b/drivers/gpu/drm/shmobile/Kconfig
index d1372862d871..2ee44ca9d67f 100644
--- a/drivers/gpu/drm/shmobile/Kconfig
+++ b/drivers/gpu/drm/shmobile/Kconfig
@@ -1,6 +1,7 @@
1config DRM_SHMOBILE 1config DRM_SHMOBILE
2 tristate "DRM Support for SH Mobile" 2 tristate "DRM Support for SH Mobile"
3 depends on DRM && (ARM || SUPERH) 3 depends on DRM && (ARM || SUPERH)
4 select BACKLIGHT_CLASS_DEVICE
4 select DRM_KMS_HELPER 5 select DRM_KMS_HELPER
5 select DRM_KMS_FB_HELPER 6 select DRM_KMS_FB_HELPER
6 select DRM_KMS_CMA_HELPER 7 select DRM_KMS_CMA_HELPER
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
index 54bad98e9477..562f9a401cf6 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
@@ -40,7 +40,7 @@
40static void shmob_drm_clk_on(struct shmob_drm_device *sdev) 40static void shmob_drm_clk_on(struct shmob_drm_device *sdev)
41{ 41{
42 if (sdev->clock) 42 if (sdev->clock)
43 clk_enable(sdev->clock); 43 clk_prepare_enable(sdev->clock);
44#if 0 44#if 0
45 if (sdev->meram_dev && sdev->meram_dev->pdev) 45 if (sdev->meram_dev && sdev->meram_dev->pdev)
46 pm_runtime_get_sync(&sdev->meram_dev->pdev->dev); 46 pm_runtime_get_sync(&sdev->meram_dev->pdev->dev);
@@ -54,7 +54,7 @@ static void shmob_drm_clk_off(struct shmob_drm_device *sdev)
54 pm_runtime_put_sync(&sdev->meram_dev->pdev->dev); 54 pm_runtime_put_sync(&sdev->meram_dev->pdev->dev);
55#endif 55#endif
56 if (sdev->clock) 56 if (sdev->clock)
57 clk_disable(sdev->clock); 57 clk_disable_unprepare(sdev->clock);
58} 58}
59 59
60/* ----------------------------------------------------------------------------- 60/* -----------------------------------------------------------------------------
diff --git a/drivers/gpu/host1x/drm/Kconfig b/drivers/gpu/drm/tegra/Kconfig
index 0f36ddd74e87..8961ba6a34b8 100644
--- a/drivers/gpu/host1x/drm/Kconfig
+++ b/drivers/gpu/drm/tegra/Kconfig
@@ -1,6 +1,8 @@
1config DRM_TEGRA 1config DRM_TEGRA
2 bool "NVIDIA Tegra DRM" 2 bool "NVIDIA Tegra DRM"
3 depends on ARCH_TEGRA || ARCH_MULTIPLATFORM
3 depends on DRM 4 depends on DRM
5 select TEGRA_HOST1X
4 select DRM_KMS_HELPER 6 select DRM_KMS_HELPER
5 select DRM_KMS_FB_HELPER 7 select DRM_KMS_FB_HELPER
6 select FB_SYS_FILLRECT 8 select FB_SYS_FILLRECT
@@ -14,6 +16,11 @@ config DRM_TEGRA
14 16
15if DRM_TEGRA 17if DRM_TEGRA
16 18
19config DRM_TEGRA_DEBUG
20 bool "NVIDIA Tegra DRM debug support"
21 help
22 Say yes here to enable debugging support.
23
17config DRM_TEGRA_STAGING 24config DRM_TEGRA_STAGING
18 bool "Enable HOST1X interface" 25 bool "Enable HOST1X interface"
19 depends on STAGING 26 depends on STAGING
@@ -22,9 +29,4 @@ config DRM_TEGRA_STAGING
22 29
23 If unsure, choose N. 30 If unsure, choose N.
24 31
25config DRM_TEGRA_DEBUG
26 bool "NVIDIA Tegra DRM debug support"
27 help
28 Say yes here to enable debugging support.
29
30endif 32endif
diff --git a/drivers/gpu/drm/tegra/Makefile b/drivers/gpu/drm/tegra/Makefile
new file mode 100644
index 000000000000..edc76abd58bb
--- /dev/null
+++ b/drivers/gpu/drm/tegra/Makefile
@@ -0,0 +1,15 @@
1ccflags-$(CONFIG_DRM_TEGRA_DEBUG) += -DDEBUG
2
3tegra-drm-y := \
4 bus.o \
5 drm.o \
6 gem.o \
7 fb.o \
8 dc.o \
9 output.o \
10 rgb.o \
11 hdmi.o \
12 gr2d.o \
13 gr3d.o
14
15obj-$(CONFIG_DRM_TEGRA) += tegra-drm.o
diff --git a/drivers/gpu/drm/tegra/bus.c b/drivers/gpu/drm/tegra/bus.c
new file mode 100644
index 000000000000..565f8f7b9a47
--- /dev/null
+++ b/drivers/gpu/drm/tegra/bus.c
@@ -0,0 +1,76 @@
1/*
2 * Copyright (C) 2013 NVIDIA Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#include "drm.h"
10
11static int drm_host1x_set_busid(struct drm_device *dev,
12 struct drm_master *master)
13{
14 const char *device = dev_name(dev->dev);
15 const char *driver = dev->driver->name;
16 const char *bus = dev->dev->bus->name;
17 int length;
18
19 master->unique_len = strlen(bus) + 1 + strlen(device);
20 master->unique_size = master->unique_len;
21
22 master->unique = kmalloc(master->unique_len + 1, GFP_KERNEL);
23 if (!master->unique)
24 return -ENOMEM;
25
26 snprintf(master->unique, master->unique_len + 1, "%s:%s", bus, device);
27
28 length = strlen(driver) + 1 + master->unique_len;
29
30 dev->devname = kmalloc(length + 1, GFP_KERNEL);
31 if (!dev->devname)
32 return -ENOMEM;
33
34 snprintf(dev->devname, length + 1, "%s@%s", driver, master->unique);
35
36 return 0;
37}
38
39static struct drm_bus drm_host1x_bus = {
40 .bus_type = DRIVER_BUS_HOST1X,
41 .set_busid = drm_host1x_set_busid,
42};
43
44int drm_host1x_init(struct drm_driver *driver, struct host1x_device *device)
45{
46 struct drm_device *drm;
47 int ret;
48
49 INIT_LIST_HEAD(&driver->device_list);
50 driver->bus = &drm_host1x_bus;
51
52 drm = drm_dev_alloc(driver, &device->dev);
53 if (!drm)
54 return -ENOMEM;
55
56 ret = drm_dev_register(drm, 0);
57 if (ret)
58 goto err_free;
59
60 DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n", driver->name,
61 driver->major, driver->minor, driver->patchlevel,
62 driver->date, drm->primary->index);
63
64 return 0;
65
66err_free:
67 drm_dev_free(drm);
68 return ret;
69}
70
71void drm_host1x_exit(struct drm_driver *driver, struct host1x_device *device)
72{
73 struct tegra_drm *tegra = dev_get_drvdata(&device->dev);
74
75 drm_put_dev(tegra->drm);
76}
diff --git a/drivers/gpu/host1x/drm/dc.c b/drivers/gpu/drm/tegra/dc.c
index b1a05ad901c3..ae1cb31ead7e 100644
--- a/drivers/gpu/host1x/drm/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -8,13 +8,9 @@
8 */ 8 */
9 9
10#include <linux/clk.h> 10#include <linux/clk.h>
11#include <linux/debugfs.h>
12#include <linux/module.h>
13#include <linux/of.h>
14#include <linux/platform_device.h>
15#include <linux/clk/tegra.h> 11#include <linux/clk/tegra.h>
12#include <linux/debugfs.h>
16 13
17#include "host1x_client.h"
18#include "dc.h" 14#include "dc.h"
19#include "drm.h" 15#include "drm.h"
20#include "gem.h" 16#include "gem.h"
@@ -51,6 +47,8 @@ static int tegra_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
51 window.dst.h = crtc_h; 47 window.dst.h = crtc_h;
52 window.format = tegra_dc_format(fb->pixel_format); 48 window.format = tegra_dc_format(fb->pixel_format);
53 window.bits_per_pixel = fb->bits_per_pixel; 49 window.bits_per_pixel = fb->bits_per_pixel;
50 window.bottom_up = tegra_fb_is_bottom_up(fb);
51 window.tiled = tegra_fb_is_tiled(fb);
54 52
55 for (i = 0; i < drm_format_num_planes(fb->pixel_format); i++) { 53 for (i = 0; i < drm_format_num_planes(fb->pixel_format); i++) {
56 struct tegra_bo *bo = tegra_fb_get_plane(fb, i); 54 struct tegra_bo *bo = tegra_fb_get_plane(fb, i);
@@ -97,8 +95,11 @@ static int tegra_plane_disable(struct drm_plane *plane)
97 95
98static void tegra_plane_destroy(struct drm_plane *plane) 96static void tegra_plane_destroy(struct drm_plane *plane)
99{ 97{
98 struct tegra_plane *p = to_tegra_plane(plane);
99
100 tegra_plane_disable(plane); 100 tegra_plane_disable(plane);
101 drm_plane_cleanup(plane); 101 drm_plane_cleanup(plane);
102 kfree(p);
102} 103}
103 104
104static const struct drm_plane_funcs tegra_plane_funcs = { 105static const struct drm_plane_funcs tegra_plane_funcs = {
@@ -124,7 +125,7 @@ static int tegra_dc_add_planes(struct drm_device *drm, struct tegra_dc *dc)
124 for (i = 0; i < 2; i++) { 125 for (i = 0; i < 2; i++) {
125 struct tegra_plane *plane; 126 struct tegra_plane *plane;
126 127
127 plane = devm_kzalloc(drm->dev, sizeof(*plane), GFP_KERNEL); 128 plane = kzalloc(sizeof(*plane), GFP_KERNEL);
128 if (!plane) 129 if (!plane)
129 return -ENOMEM; 130 return -ENOMEM;
130 131
@@ -133,8 +134,10 @@ static int tegra_dc_add_planes(struct drm_device *drm, struct tegra_dc *dc)
133 err = drm_plane_init(drm, &plane->base, 1 << dc->pipe, 134 err = drm_plane_init(drm, &plane->base, 1 << dc->pipe,
134 &tegra_plane_funcs, plane_formats, 135 &tegra_plane_funcs, plane_formats,
135 ARRAY_SIZE(plane_formats), false); 136 ARRAY_SIZE(plane_formats), false);
136 if (err < 0) 137 if (err < 0) {
138 kfree(plane);
137 return err; 139 return err;
140 }
138 } 141 }
139 142
140 return 0; 143 return 0;
@@ -145,6 +148,7 @@ static int tegra_dc_set_base(struct tegra_dc *dc, int x, int y,
145{ 148{
146 unsigned int format = tegra_dc_format(fb->pixel_format); 149 unsigned int format = tegra_dc_format(fb->pixel_format);
147 struct tegra_bo *bo = tegra_fb_get_plane(fb, 0); 150 struct tegra_bo *bo = tegra_fb_get_plane(fb, 0);
151 unsigned int h_offset = 0, v_offset = 0;
148 unsigned long value; 152 unsigned long value;
149 153
150 tegra_dc_writel(dc, WINDOW_A_SELECT, DC_CMD_DISPLAY_WINDOW_HEADER); 154 tegra_dc_writel(dc, WINDOW_A_SELECT, DC_CMD_DISPLAY_WINDOW_HEADER);
@@ -156,6 +160,32 @@ static int tegra_dc_set_base(struct tegra_dc *dc, int x, int y,
156 tegra_dc_writel(dc, fb->pitches[0], DC_WIN_LINE_STRIDE); 160 tegra_dc_writel(dc, fb->pitches[0], DC_WIN_LINE_STRIDE);
157 tegra_dc_writel(dc, format, DC_WIN_COLOR_DEPTH); 161 tegra_dc_writel(dc, format, DC_WIN_COLOR_DEPTH);
158 162
163 if (tegra_fb_is_tiled(fb)) {
164 value = DC_WIN_BUFFER_ADDR_MODE_TILE_UV |
165 DC_WIN_BUFFER_ADDR_MODE_TILE;
166 } else {
167 value = DC_WIN_BUFFER_ADDR_MODE_LINEAR_UV |
168 DC_WIN_BUFFER_ADDR_MODE_LINEAR;
169 }
170
171 tegra_dc_writel(dc, value, DC_WIN_BUFFER_ADDR_MODE);
172
173 /* make sure bottom-up buffers are properly displayed */
174 if (tegra_fb_is_bottom_up(fb)) {
175 value = tegra_dc_readl(dc, DC_WIN_WIN_OPTIONS);
176 value |= INVERT_V;
177 tegra_dc_writel(dc, value, DC_WIN_WIN_OPTIONS);
178
179 v_offset += fb->height - 1;
180 } else {
181 value = tegra_dc_readl(dc, DC_WIN_WIN_OPTIONS);
182 value &= ~INVERT_V;
183 tegra_dc_writel(dc, value, DC_WIN_WIN_OPTIONS);
184 }
185
186 tegra_dc_writel(dc, h_offset, DC_WINBUF_ADDR_H_OFFSET);
187 tegra_dc_writel(dc, v_offset, DC_WINBUF_ADDR_V_OFFSET);
188
159 value = GENERAL_UPDATE | WIN_A_UPDATE; 189 value = GENERAL_UPDATE | WIN_A_UPDATE;
160 tegra_dc_writel(dc, value, DC_CMD_STATE_CONTROL); 190 tegra_dc_writel(dc, value, DC_CMD_STATE_CONTROL);
161 191
@@ -255,14 +285,26 @@ static int tegra_dc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
255 return 0; 285 return 0;
256} 286}
257 287
288static void drm_crtc_clear(struct drm_crtc *crtc)
289{
290 memset(crtc, 0, sizeof(*crtc));
291}
292
293static void tegra_dc_destroy(struct drm_crtc *crtc)
294{
295 drm_crtc_cleanup(crtc);
296 drm_crtc_clear(crtc);
297}
298
258static const struct drm_crtc_funcs tegra_crtc_funcs = { 299static const struct drm_crtc_funcs tegra_crtc_funcs = {
259 .page_flip = tegra_dc_page_flip, 300 .page_flip = tegra_dc_page_flip,
260 .set_config = drm_crtc_helper_set_config, 301 .set_config = drm_crtc_helper_set_config,
261 .destroy = drm_crtc_cleanup, 302 .destroy = tegra_dc_destroy,
262}; 303};
263 304
264static void tegra_crtc_disable(struct drm_crtc *crtc) 305static void tegra_crtc_disable(struct drm_crtc *crtc)
265{ 306{
307 struct tegra_dc *dc = to_tegra_dc(crtc);
266 struct drm_device *drm = crtc->dev; 308 struct drm_device *drm = crtc->dev;
267 struct drm_plane *plane; 309 struct drm_plane *plane;
268 310
@@ -277,6 +319,8 @@ static void tegra_crtc_disable(struct drm_crtc *crtc)
277 } 319 }
278 } 320 }
279 } 321 }
322
323 drm_vblank_off(drm, dc->pipe);
280} 324}
281 325
282static bool tegra_crtc_mode_fixup(struct drm_crtc *crtc, 326static bool tegra_crtc_mode_fixup(struct drm_crtc *crtc,
@@ -491,9 +535,22 @@ int tegra_dc_setup_window(struct tegra_dc *dc, unsigned int index,
491 tegra_dc_writel(dc, window->stride[0], DC_WIN_LINE_STRIDE); 535 tegra_dc_writel(dc, window->stride[0], DC_WIN_LINE_STRIDE);
492 } 536 }
493 537
538 if (window->bottom_up)
539 v_offset += window->src.h - 1;
540
494 tegra_dc_writel(dc, h_offset, DC_WINBUF_ADDR_H_OFFSET); 541 tegra_dc_writel(dc, h_offset, DC_WINBUF_ADDR_H_OFFSET);
495 tegra_dc_writel(dc, v_offset, DC_WINBUF_ADDR_V_OFFSET); 542 tegra_dc_writel(dc, v_offset, DC_WINBUF_ADDR_V_OFFSET);
496 543
544 if (window->tiled) {
545 value = DC_WIN_BUFFER_ADDR_MODE_TILE_UV |
546 DC_WIN_BUFFER_ADDR_MODE_TILE;
547 } else {
548 value = DC_WIN_BUFFER_ADDR_MODE_LINEAR_UV |
549 DC_WIN_BUFFER_ADDR_MODE_LINEAR;
550 }
551
552 tegra_dc_writel(dc, value, DC_WIN_BUFFER_ADDR_MODE);
553
497 value = WIN_ENABLE; 554 value = WIN_ENABLE;
498 555
499 if (yuv) { 556 if (yuv) {
@@ -512,6 +569,9 @@ int tegra_dc_setup_window(struct tegra_dc *dc, unsigned int index,
512 value |= COLOR_EXPAND; 569 value |= COLOR_EXPAND;
513 } 570 }
514 571
572 if (window->bottom_up)
573 value |= INVERT_V;
574
515 tegra_dc_writel(dc, value, DC_WIN_WIN_OPTIONS); 575 tegra_dc_writel(dc, value, DC_WIN_WIN_OPTIONS);
516 576
517 /* 577 /*
@@ -1041,30 +1101,30 @@ static int tegra_dc_debugfs_exit(struct tegra_dc *dc)
1041 return 0; 1101 return 0;
1042} 1102}
1043 1103
1044static int tegra_dc_drm_init(struct host1x_client *client, 1104static int tegra_dc_init(struct host1x_client *client)
1045 struct drm_device *drm)
1046{ 1105{
1106 struct tegra_drm *tegra = dev_get_drvdata(client->parent);
1047 struct tegra_dc *dc = host1x_client_to_dc(client); 1107 struct tegra_dc *dc = host1x_client_to_dc(client);
1048 int err; 1108 int err;
1049 1109
1050 dc->pipe = drm->mode_config.num_crtc; 1110 dc->pipe = tegra->drm->mode_config.num_crtc;
1051 1111
1052 drm_crtc_init(drm, &dc->base, &tegra_crtc_funcs); 1112 drm_crtc_init(tegra->drm, &dc->base, &tegra_crtc_funcs);
1053 drm_mode_crtc_set_gamma_size(&dc->base, 256); 1113 drm_mode_crtc_set_gamma_size(&dc->base, 256);
1054 drm_crtc_helper_add(&dc->base, &tegra_crtc_helper_funcs); 1114 drm_crtc_helper_add(&dc->base, &tegra_crtc_helper_funcs);
1055 1115
1056 err = tegra_dc_rgb_init(drm, dc); 1116 err = tegra_dc_rgb_init(tegra->drm, dc);
1057 if (err < 0 && err != -ENODEV) { 1117 if (err < 0 && err != -ENODEV) {
1058 dev_err(dc->dev, "failed to initialize RGB output: %d\n", err); 1118 dev_err(dc->dev, "failed to initialize RGB output: %d\n", err);
1059 return err; 1119 return err;
1060 } 1120 }
1061 1121
1062 err = tegra_dc_add_planes(drm, dc); 1122 err = tegra_dc_add_planes(tegra->drm, dc);
1063 if (err < 0) 1123 if (err < 0)
1064 return err; 1124 return err;
1065 1125
1066 if (IS_ENABLED(CONFIG_DEBUG_FS)) { 1126 if (IS_ENABLED(CONFIG_DEBUG_FS)) {
1067 err = tegra_dc_debugfs_init(dc, drm->primary); 1127 err = tegra_dc_debugfs_init(dc, tegra->drm->primary);
1068 if (err < 0) 1128 if (err < 0)
1069 dev_err(dc->dev, "debugfs setup failed: %d\n", err); 1129 dev_err(dc->dev, "debugfs setup failed: %d\n", err);
1070 } 1130 }
@@ -1080,7 +1140,7 @@ static int tegra_dc_drm_init(struct host1x_client *client,
1080 return 0; 1140 return 0;
1081} 1141}
1082 1142
1083static int tegra_dc_drm_exit(struct host1x_client *client) 1143static int tegra_dc_exit(struct host1x_client *client)
1084{ 1144{
1085 struct tegra_dc *dc = host1x_client_to_dc(client); 1145 struct tegra_dc *dc = host1x_client_to_dc(client);
1086 int err; 1146 int err;
@@ -1103,13 +1163,12 @@ static int tegra_dc_drm_exit(struct host1x_client *client)
1103} 1163}
1104 1164
1105static const struct host1x_client_ops dc_client_ops = { 1165static const struct host1x_client_ops dc_client_ops = {
1106 .drm_init = tegra_dc_drm_init, 1166 .init = tegra_dc_init,
1107 .drm_exit = tegra_dc_drm_exit, 1167 .exit = tegra_dc_exit,
1108}; 1168};
1109 1169
1110static int tegra_dc_probe(struct platform_device *pdev) 1170static int tegra_dc_probe(struct platform_device *pdev)
1111{ 1171{
1112 struct host1x_drm *host1x = host1x_get_drm_data(pdev->dev.parent);
1113 struct resource *regs; 1172 struct resource *regs;
1114 struct tegra_dc *dc; 1173 struct tegra_dc *dc;
1115 int err; 1174 int err;
@@ -1153,7 +1212,7 @@ static int tegra_dc_probe(struct platform_device *pdev)
1153 return err; 1212 return err;
1154 } 1213 }
1155 1214
1156 err = host1x_register_client(host1x, &dc->client); 1215 err = host1x_client_register(&dc->client);
1157 if (err < 0) { 1216 if (err < 0) {
1158 dev_err(&pdev->dev, "failed to register host1x client: %d\n", 1217 dev_err(&pdev->dev, "failed to register host1x client: %d\n",
1159 err); 1218 err);
@@ -1167,17 +1226,22 @@ static int tegra_dc_probe(struct platform_device *pdev)
1167 1226
1168static int tegra_dc_remove(struct platform_device *pdev) 1227static int tegra_dc_remove(struct platform_device *pdev)
1169{ 1228{
1170 struct host1x_drm *host1x = host1x_get_drm_data(pdev->dev.parent);
1171 struct tegra_dc *dc = platform_get_drvdata(pdev); 1229 struct tegra_dc *dc = platform_get_drvdata(pdev);
1172 int err; 1230 int err;
1173 1231
1174 err = host1x_unregister_client(host1x, &dc->client); 1232 err = host1x_client_unregister(&dc->client);
1175 if (err < 0) { 1233 if (err < 0) {
1176 dev_err(&pdev->dev, "failed to unregister host1x client: %d\n", 1234 dev_err(&pdev->dev, "failed to unregister host1x client: %d\n",
1177 err); 1235 err);
1178 return err; 1236 return err;
1179 } 1237 }
1180 1238
1239 err = tegra_dc_rgb_remove(dc);
1240 if (err < 0) {
1241 dev_err(&pdev->dev, "failed to remove RGB output: %d\n", err);
1242 return err;
1243 }
1244
1181 clk_disable_unprepare(dc->clk); 1245 clk_disable_unprepare(dc->clk);
1182 1246
1183 return 0; 1247 return 0;
diff --git a/drivers/gpu/host1x/drm/dc.h b/drivers/gpu/drm/tegra/dc.h
index 79eaec9aac77..91bbda291470 100644
--- a/drivers/gpu/host1x/drm/dc.h
+++ b/drivers/gpu/drm/tegra/dc.h
@@ -302,6 +302,7 @@
302#define DC_WIN_CSC_KVB 0x618 302#define DC_WIN_CSC_KVB 0x618
303 303
304#define DC_WIN_WIN_OPTIONS 0x700 304#define DC_WIN_WIN_OPTIONS 0x700
305#define INVERT_V (1 << 2)
305#define COLOR_EXPAND (1 << 6) 306#define COLOR_EXPAND (1 << 6)
306#define CSC_ENABLE (1 << 18) 307#define CSC_ENABLE (1 << 18)
307#define WIN_ENABLE (1 << 30) 308#define WIN_ENABLE (1 << 30)
@@ -365,6 +366,10 @@
365#define DC_WIN_BUF_STRIDE 0x70b 366#define DC_WIN_BUF_STRIDE 0x70b
366#define DC_WIN_UV_BUF_STRIDE 0x70c 367#define DC_WIN_UV_BUF_STRIDE 0x70c
367#define DC_WIN_BUFFER_ADDR_MODE 0x70d 368#define DC_WIN_BUFFER_ADDR_MODE 0x70d
369#define DC_WIN_BUFFER_ADDR_MODE_LINEAR (0 << 0)
370#define DC_WIN_BUFFER_ADDR_MODE_TILE (1 << 0)
371#define DC_WIN_BUFFER_ADDR_MODE_LINEAR_UV (0 << 16)
372#define DC_WIN_BUFFER_ADDR_MODE_TILE_UV (1 << 16)
368#define DC_WIN_DV_CONTROL 0x70e 373#define DC_WIN_DV_CONTROL 0x70e
369 374
370#define DC_WIN_BLEND_NOKEY 0x70f 375#define DC_WIN_BLEND_NOKEY 0x70f
diff --git a/drivers/gpu/host1x/drm/drm.c b/drivers/gpu/drm/tegra/drm.c
index df7d90a3a4fa..28e178137718 100644
--- a/drivers/gpu/host1x/drm/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -7,21 +7,10 @@
7 * published by the Free Software Foundation. 7 * published by the Free Software Foundation.
8 */ 8 */
9 9
10#include <linux/module.h> 10#include <linux/host1x.h>
11#include <linux/of_address.h>
12#include <linux/of_platform.h>
13 11
14#include <linux/dma-mapping.h>
15#include <asm/dma-iommu.h>
16
17#include <drm/drm.h>
18#include <drm/drmP.h>
19
20#include "host1x_client.h"
21#include "dev.h"
22#include "drm.h" 12#include "drm.h"
23#include "gem.h" 13#include "gem.h"
24#include "syncpt.h"
25 14
26#define DRIVER_NAME "tegra" 15#define DRIVER_NAME "tegra"
27#define DRIVER_DESC "NVIDIA Tegra graphics" 16#define DRIVER_DESC "NVIDIA Tegra graphics"
@@ -30,297 +19,235 @@
30#define DRIVER_MINOR 0 19#define DRIVER_MINOR 0
31#define DRIVER_PATCHLEVEL 0 20#define DRIVER_PATCHLEVEL 0
32 21
33struct host1x_drm_client { 22struct tegra_drm_file {
34 struct host1x_client *client; 23 struct list_head contexts;
35 struct device_node *np;
36 struct list_head list;
37}; 24};
38 25
39static int host1x_add_drm_client(struct host1x_drm *host1x, 26static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
40 struct device_node *np)
41{ 27{
42 struct host1x_drm_client *client; 28 struct host1x_device *device = to_host1x_device(drm->dev);
29 struct tegra_drm *tegra;
30 int err;
43 31
44 client = kzalloc(sizeof(*client), GFP_KERNEL); 32 tegra = kzalloc(sizeof(*tegra), GFP_KERNEL);
45 if (!client) 33 if (!tegra)
46 return -ENOMEM; 34 return -ENOMEM;
47 35
48 INIT_LIST_HEAD(&client->list); 36 dev_set_drvdata(drm->dev, tegra);
49 client->np = of_node_get(np); 37 mutex_init(&tegra->clients_lock);
38 INIT_LIST_HEAD(&tegra->clients);
39 drm->dev_private = tegra;
40 tegra->drm = drm;
50 41
51 list_add_tail(&client->list, &host1x->drm_clients); 42 drm_mode_config_init(drm);
52 43
53 return 0; 44 err = host1x_device_init(device);
54} 45 if (err < 0)
46 return err;
55 47
56static int host1x_activate_drm_client(struct host1x_drm *host1x, 48 /*
57 struct host1x_drm_client *drm, 49 * We don't use the drm_irq_install() helpers provided by the DRM
58 struct host1x_client *client) 50 * core, so we need to set this manually in order to allow the
59{ 51 * DRM_IOCTL_WAIT_VBLANK to operate correctly.
60 mutex_lock(&host1x->drm_clients_lock); 52 */
61 list_del_init(&drm->list); 53 drm->irq_enabled = true;
62 list_add_tail(&drm->list, &host1x->drm_active);
63 drm->client = client;
64 mutex_unlock(&host1x->drm_clients_lock);
65 54
66 return 0; 55 err = drm_vblank_init(drm, drm->mode_config.num_crtc);
67} 56 if (err < 0)
57 return err;
68 58
69static int host1x_remove_drm_client(struct host1x_drm *host1x, 59 err = tegra_drm_fb_init(drm);
70 struct host1x_drm_client *client) 60 if (err < 0)
71{ 61 return err;
72 mutex_lock(&host1x->drm_clients_lock);
73 list_del_init(&client->list);
74 mutex_unlock(&host1x->drm_clients_lock);
75 62
76 of_node_put(client->np); 63 drm_kms_helper_poll_init(drm);
77 kfree(client);
78 64
79 return 0; 65 return 0;
80} 66}
81 67
82static int host1x_parse_dt(struct host1x_drm *host1x) 68static int tegra_drm_unload(struct drm_device *drm)
83{ 69{
84 static const char * const compat[] = { 70 struct host1x_device *device = to_host1x_device(drm->dev);
85 "nvidia,tegra20-dc",
86 "nvidia,tegra20-hdmi",
87 "nvidia,tegra20-gr2d",
88 "nvidia,tegra30-dc",
89 "nvidia,tegra30-hdmi",
90 "nvidia,tegra30-gr2d",
91 };
92 unsigned int i;
93 int err; 71 int err;
94 72
95 for (i = 0; i < ARRAY_SIZE(compat); i++) { 73 drm_kms_helper_poll_fini(drm);
96 struct device_node *np; 74 tegra_drm_fb_exit(drm);
75 drm_vblank_cleanup(drm);
76 drm_mode_config_cleanup(drm);
97 77
98 for_each_child_of_node(host1x->dev->of_node, np) { 78 err = host1x_device_exit(device);
99 if (of_device_is_compatible(np, compat[i]) && 79 if (err < 0)
100 of_device_is_available(np)) { 80 return err;
101 err = host1x_add_drm_client(host1x, np);
102 if (err < 0)
103 return err;
104 }
105 }
106 }
107 81
108 return 0; 82 return 0;
109} 83}
110 84
111int host1x_drm_alloc(struct platform_device *pdev) 85static int tegra_drm_open(struct drm_device *drm, struct drm_file *filp)
112{ 86{
113 struct host1x_drm *host1x; 87 struct tegra_drm_file *fpriv;
114 int err;
115 88
116 host1x = devm_kzalloc(&pdev->dev, sizeof(*host1x), GFP_KERNEL); 89 fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
117 if (!host1x) 90 if (!fpriv)
118 return -ENOMEM; 91 return -ENOMEM;
119 92
120 mutex_init(&host1x->drm_clients_lock); 93 INIT_LIST_HEAD(&fpriv->contexts);
121 INIT_LIST_HEAD(&host1x->drm_clients); 94 filp->driver_priv = fpriv;
122 INIT_LIST_HEAD(&host1x->drm_active);
123 mutex_init(&host1x->clients_lock);
124 INIT_LIST_HEAD(&host1x->clients);
125 host1x->dev = &pdev->dev;
126
127 err = host1x_parse_dt(host1x);
128 if (err < 0) {
129 dev_err(&pdev->dev, "failed to parse DT: %d\n", err);
130 return err;
131 }
132
133 host1x_set_drm_data(&pdev->dev, host1x);
134 95
135 return 0; 96 return 0;
136} 97}
137 98
138int host1x_drm_init(struct host1x_drm *host1x, struct drm_device *drm) 99static void tegra_drm_context_free(struct tegra_drm_context *context)
139{ 100{
140 struct host1x_client *client; 101 context->client->ops->close_channel(context);
141 102 kfree(context);
142 mutex_lock(&host1x->clients_lock);
143
144 list_for_each_entry(client, &host1x->clients, list) {
145 if (client->ops && client->ops->drm_init) {
146 int err = client->ops->drm_init(client, drm);
147 if (err < 0) {
148 dev_err(host1x->dev,
149 "DRM setup failed for %s: %d\n",
150 dev_name(client->dev), err);
151 mutex_unlock(&host1x->clients_lock);
152 return err;
153 }
154 }
155 }
156
157 mutex_unlock(&host1x->clients_lock);
158
159 return 0;
160} 103}
161 104
162int host1x_drm_exit(struct host1x_drm *host1x) 105static void tegra_drm_lastclose(struct drm_device *drm)
163{ 106{
164 struct platform_device *pdev = to_platform_device(host1x->dev); 107 struct tegra_drm *tegra = drm->dev_private;
165 struct host1x_client *client;
166
167 if (!host1x->drm)
168 return 0;
169
170 mutex_lock(&host1x->clients_lock);
171 108
172 list_for_each_entry_reverse(client, &host1x->clients, list) { 109 tegra_fbdev_restore_mode(tegra->fbdev);
173 if (client->ops && client->ops->drm_exit) { 110}
174 int err = client->ops->drm_exit(client);
175 if (err < 0) {
176 dev_err(host1x->dev,
177 "DRM cleanup failed for %s: %d\n",
178 dev_name(client->dev), err);
179 mutex_unlock(&host1x->clients_lock);
180 return err;
181 }
182 }
183 }
184 111
185 mutex_unlock(&host1x->clients_lock); 112static struct host1x_bo *
113host1x_bo_lookup(struct drm_device *drm, struct drm_file *file, u32 handle)
114{
115 struct drm_gem_object *gem;
116 struct tegra_bo *bo;
186 117
187 drm_platform_exit(&tegra_drm_driver, pdev); 118 gem = drm_gem_object_lookup(drm, file, handle);
188 host1x->drm = NULL; 119 if (!gem)
120 return NULL;
189 121
190 return 0; 122 mutex_lock(&drm->struct_mutex);
191} 123 drm_gem_object_unreference(gem);
124 mutex_unlock(&drm->struct_mutex);
192 125
193int host1x_register_client(struct host1x_drm *host1x, 126 bo = to_tegra_bo(gem);
194 struct host1x_client *client) 127 return &bo->base;
195{ 128}
196 struct host1x_drm_client *drm, *tmp; 129
130int tegra_drm_submit(struct tegra_drm_context *context,
131 struct drm_tegra_submit *args, struct drm_device *drm,
132 struct drm_file *file)
133{
134 unsigned int num_cmdbufs = args->num_cmdbufs;
135 unsigned int num_relocs = args->num_relocs;
136 unsigned int num_waitchks = args->num_waitchks;
137 struct drm_tegra_cmdbuf __user *cmdbufs =
138 (void * __user)(uintptr_t)args->cmdbufs;
139 struct drm_tegra_reloc __user *relocs =
140 (void * __user)(uintptr_t)args->relocs;
141 struct drm_tegra_waitchk __user *waitchks =
142 (void * __user)(uintptr_t)args->waitchks;
143 struct drm_tegra_syncpt syncpt;
144 struct host1x_job *job;
197 int err; 145 int err;
198 146
199 mutex_lock(&host1x->clients_lock); 147 /* We don't yet support other than one syncpt_incr struct per submit */
200 list_add_tail(&client->list, &host1x->clients); 148 if (args->num_syncpts != 1)
201 mutex_unlock(&host1x->clients_lock); 149 return -EINVAL;
202 150
203 list_for_each_entry_safe(drm, tmp, &host1x->drm_clients, list) 151 job = host1x_job_alloc(context->channel, args->num_cmdbufs,
204 if (drm->np == client->dev->of_node) 152 args->num_relocs, args->num_waitchks);
205 host1x_activate_drm_client(host1x, drm, client); 153 if (!job)
154 return -ENOMEM;
206 155
207 if (list_empty(&host1x->drm_clients)) { 156 job->num_relocs = args->num_relocs;
208 struct platform_device *pdev = to_platform_device(host1x->dev); 157 job->num_waitchk = args->num_waitchks;
158 job->client = (u32)args->context;
159 job->class = context->client->base.class;
160 job->serialize = true;
209 161
210 err = drm_platform_init(&tegra_drm_driver, pdev); 162 while (num_cmdbufs) {
211 if (err < 0) { 163 struct drm_tegra_cmdbuf cmdbuf;
212 dev_err(host1x->dev, "drm_platform_init(): %d\n", err); 164 struct host1x_bo *bo;
213 return err;
214 }
215 }
216 165
217 return 0; 166 err = copy_from_user(&cmdbuf, cmdbufs, sizeof(cmdbuf));
218} 167 if (err)
168 goto fail;
219 169
220int host1x_unregister_client(struct host1x_drm *host1x, 170 bo = host1x_bo_lookup(drm, file, cmdbuf.handle);
221 struct host1x_client *client) 171 if (!bo) {
222{ 172 err = -ENOENT;
223 struct host1x_drm_client *drm, *tmp; 173 goto fail;
224 int err;
225
226 list_for_each_entry_safe(drm, tmp, &host1x->drm_active, list) {
227 if (drm->client == client) {
228 err = host1x_drm_exit(host1x);
229 if (err < 0) {
230 dev_err(host1x->dev, "host1x_drm_exit(): %d\n",
231 err);
232 return err;
233 }
234
235 host1x_remove_drm_client(host1x, drm);
236 break;
237 } 174 }
175
176 host1x_job_add_gather(job, bo, cmdbuf.words, cmdbuf.offset);
177 num_cmdbufs--;
178 cmdbufs++;
238 } 179 }
239 180
240 mutex_lock(&host1x->clients_lock); 181 err = copy_from_user(job->relocarray, relocs,
241 list_del_init(&client->list); 182 sizeof(*relocs) * num_relocs);
242 mutex_unlock(&host1x->clients_lock); 183 if (err)
184 goto fail;
243 185
244 return 0; 186 while (num_relocs--) {
245} 187 struct host1x_reloc *reloc = &job->relocarray[num_relocs];
188 struct host1x_bo *cmdbuf, *target;
246 189
247static int tegra_drm_load(struct drm_device *drm, unsigned long flags) 190 cmdbuf = host1x_bo_lookup(drm, file, (u32)reloc->cmdbuf);
248{ 191 target = host1x_bo_lookup(drm, file, (u32)reloc->target);
249 struct host1x_drm *host1x;
250 int err;
251 192
252 host1x = host1x_get_drm_data(drm->dev); 193 reloc->cmdbuf = cmdbuf;
253 drm->dev_private = host1x; 194 reloc->target = target;
254 host1x->drm = drm;
255 195
256 drm_mode_config_init(drm); 196 if (!reloc->target || !reloc->cmdbuf) {
197 err = -ENOENT;
198 goto fail;
199 }
200 }
257 201
258 err = host1x_drm_init(host1x, drm); 202 err = copy_from_user(job->waitchk, waitchks,
259 if (err < 0) 203 sizeof(*waitchks) * num_waitchks);
260 return err; 204 if (err)
205 goto fail;
261 206
262 /* 207 err = copy_from_user(&syncpt, (void * __user)(uintptr_t)args->syncpts,
263 * We don't use the drm_irq_install() helpers provided by the DRM 208 sizeof(syncpt));
264 * core, so we need to set this manually in order to allow the 209 if (err)
265 * DRM_IOCTL_WAIT_VBLANK to operate correctly. 210 goto fail;
266 */
267 drm->irq_enabled = true;
268 211
269 err = drm_vblank_init(drm, drm->mode_config.num_crtc); 212 job->is_addr_reg = context->client->ops->is_addr_reg;
270 if (err < 0) 213 job->syncpt_incrs = syncpt.incrs;
271 return err; 214 job->syncpt_id = syncpt.id;
215 job->timeout = 10000;
272 216
273 err = tegra_drm_fb_init(drm); 217 if (args->timeout && args->timeout < 10000)
274 if (err < 0) 218 job->timeout = args->timeout;
275 return err;
276 219
277 drm_kms_helper_poll_init(drm); 220 err = host1x_job_pin(job, context->client->base.dev);
221 if (err)
222 goto fail;
278 223
279 return 0; 224 err = host1x_job_submit(job);
280} 225 if (err)
226 goto fail_submit;
281 227
282static int tegra_drm_unload(struct drm_device *drm) 228 args->fence = job->syncpt_end;
283{
284 drm_kms_helper_poll_fini(drm);
285 tegra_drm_fb_exit(drm);
286
287 drm_mode_config_cleanup(drm);
288 229
230 host1x_job_put(job);
289 return 0; 231 return 0;
290}
291 232
292static int tegra_drm_open(struct drm_device *drm, struct drm_file *filp) 233fail_submit:
293{ 234 host1x_job_unpin(job);
294 struct host1x_drm_file *fpriv; 235fail:
295 236 host1x_job_put(job);
296 fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL); 237 return err;
297 if (!fpriv)
298 return -ENOMEM;
299
300 INIT_LIST_HEAD(&fpriv->contexts);
301 filp->driver_priv = fpriv;
302
303 return 0;
304} 238}
305 239
306static void host1x_drm_context_free(struct host1x_drm_context *context)
307{
308 context->client->ops->close_channel(context);
309 kfree(context);
310}
311 240
312static void tegra_drm_lastclose(struct drm_device *drm) 241#ifdef CONFIG_DRM_TEGRA_STAGING
242static struct tegra_drm_context *tegra_drm_get_context(__u64 context)
313{ 243{
314 struct host1x_drm *host1x = drm->dev_private; 244 return (struct tegra_drm_context *)(uintptr_t)context;
315
316 tegra_fbdev_restore_mode(host1x->fbdev);
317} 245}
318 246
319#ifdef CONFIG_DRM_TEGRA_STAGING 247static bool tegra_drm_file_owns_context(struct tegra_drm_file *file,
320static bool host1x_drm_file_owns_context(struct host1x_drm_file *file, 248 struct tegra_drm_context *context)
321 struct host1x_drm_context *context)
322{ 249{
323 struct host1x_drm_context *ctx; 250 struct tegra_drm_context *ctx;
324 251
325 list_for_each_entry(ctx, &file->contexts, list) 252 list_for_each_entry(ctx, &file->contexts, list)
326 if (ctx == context) 253 if (ctx == context)
@@ -335,7 +262,7 @@ static int tegra_gem_create(struct drm_device *drm, void *data,
335 struct drm_tegra_gem_create *args = data; 262 struct drm_tegra_gem_create *args = data;
336 struct tegra_bo *bo; 263 struct tegra_bo *bo;
337 264
338 bo = tegra_bo_create_with_handle(file, drm, args->size, 265 bo = tegra_bo_create_with_handle(file, drm, args->size, args->flags,
339 &args->handle); 266 &args->handle);
340 if (IS_ERR(bo)) 267 if (IS_ERR(bo))
341 return PTR_ERR(bo); 268 return PTR_ERR(bo);
@@ -366,10 +293,11 @@ static int tegra_gem_mmap(struct drm_device *drm, void *data,
366static int tegra_syncpt_read(struct drm_device *drm, void *data, 293static int tegra_syncpt_read(struct drm_device *drm, void *data,
367 struct drm_file *file) 294 struct drm_file *file)
368{ 295{
296 struct host1x *host = dev_get_drvdata(drm->dev->parent);
369 struct drm_tegra_syncpt_read *args = data; 297 struct drm_tegra_syncpt_read *args = data;
370 struct host1x *host = dev_get_drvdata(drm->dev); 298 struct host1x_syncpt *sp;
371 struct host1x_syncpt *sp = host1x_syncpt_get(host, args->id);
372 299
300 sp = host1x_syncpt_get(host, args->id);
373 if (!sp) 301 if (!sp)
374 return -EINVAL; 302 return -EINVAL;
375 303
@@ -380,10 +308,11 @@ static int tegra_syncpt_read(struct drm_device *drm, void *data,
380static int tegra_syncpt_incr(struct drm_device *drm, void *data, 308static int tegra_syncpt_incr(struct drm_device *drm, void *data,
381 struct drm_file *file) 309 struct drm_file *file)
382{ 310{
311 struct host1x *host1x = dev_get_drvdata(drm->dev->parent);
383 struct drm_tegra_syncpt_incr *args = data; 312 struct drm_tegra_syncpt_incr *args = data;
384 struct host1x *host = dev_get_drvdata(drm->dev); 313 struct host1x_syncpt *sp;
385 struct host1x_syncpt *sp = host1x_syncpt_get(host, args->id);
386 314
315 sp = host1x_syncpt_get(host1x, args->id);
387 if (!sp) 316 if (!sp)
388 return -EINVAL; 317 return -EINVAL;
389 318
@@ -393,10 +322,11 @@ static int tegra_syncpt_incr(struct drm_device *drm, void *data,
393static int tegra_syncpt_wait(struct drm_device *drm, void *data, 322static int tegra_syncpt_wait(struct drm_device *drm, void *data,
394 struct drm_file *file) 323 struct drm_file *file)
395{ 324{
325 struct host1x *host1x = dev_get_drvdata(drm->dev->parent);
396 struct drm_tegra_syncpt_wait *args = data; 326 struct drm_tegra_syncpt_wait *args = data;
397 struct host1x *host = dev_get_drvdata(drm->dev); 327 struct host1x_syncpt *sp;
398 struct host1x_syncpt *sp = host1x_syncpt_get(host, args->id);
399 328
329 sp = host1x_syncpt_get(host1x, args->id);
400 if (!sp) 330 if (!sp)
401 return -EINVAL; 331 return -EINVAL;
402 332
@@ -407,26 +337,26 @@ static int tegra_syncpt_wait(struct drm_device *drm, void *data,
407static int tegra_open_channel(struct drm_device *drm, void *data, 337static int tegra_open_channel(struct drm_device *drm, void *data,
408 struct drm_file *file) 338 struct drm_file *file)
409{ 339{
340 struct tegra_drm_file *fpriv = file->driver_priv;
341 struct tegra_drm *tegra = drm->dev_private;
410 struct drm_tegra_open_channel *args = data; 342 struct drm_tegra_open_channel *args = data;
411 struct host1x_client *client; 343 struct tegra_drm_context *context;
412 struct host1x_drm_context *context; 344 struct tegra_drm_client *client;
413 struct host1x_drm_file *fpriv = file->driver_priv;
414 struct host1x_drm *host1x = drm->dev_private;
415 int err = -ENODEV; 345 int err = -ENODEV;
416 346
417 context = kzalloc(sizeof(*context), GFP_KERNEL); 347 context = kzalloc(sizeof(*context), GFP_KERNEL);
418 if (!context) 348 if (!context)
419 return -ENOMEM; 349 return -ENOMEM;
420 350
421 list_for_each_entry(client, &host1x->clients, list) 351 list_for_each_entry(client, &tegra->clients, list)
422 if (client->class == args->client) { 352 if (client->base.class == args->client) {
423 err = client->ops->open_channel(client, context); 353 err = client->ops->open_channel(client, context);
424 if (err) 354 if (err)
425 break; 355 break;
426 356
427 context->client = client;
428 list_add(&context->list, &fpriv->contexts); 357 list_add(&context->list, &fpriv->contexts);
429 args->context = (uintptr_t)context; 358 args->context = (uintptr_t)context;
359 context->client = client;
430 return 0; 360 return 0;
431 } 361 }
432 362
@@ -437,16 +367,17 @@ static int tegra_open_channel(struct drm_device *drm, void *data,
437static int tegra_close_channel(struct drm_device *drm, void *data, 367static int tegra_close_channel(struct drm_device *drm, void *data,
438 struct drm_file *file) 368 struct drm_file *file)
439{ 369{
370 struct tegra_drm_file *fpriv = file->driver_priv;
440 struct drm_tegra_close_channel *args = data; 371 struct drm_tegra_close_channel *args = data;
441 struct host1x_drm_file *fpriv = file->driver_priv; 372 struct tegra_drm_context *context;
442 struct host1x_drm_context *context = 373
443 (struct host1x_drm_context *)(uintptr_t)args->context; 374 context = tegra_drm_get_context(args->context);
444 375
445 if (!host1x_drm_file_owns_context(fpriv, context)) 376 if (!tegra_drm_file_owns_context(fpriv, context))
446 return -EINVAL; 377 return -EINVAL;
447 378
448 list_del(&context->list); 379 list_del(&context->list);
449 host1x_drm_context_free(context); 380 tegra_drm_context_free(context);
450 381
451 return 0; 382 return 0;
452} 383}
@@ -454,19 +385,20 @@ static int tegra_close_channel(struct drm_device *drm, void *data,
454static int tegra_get_syncpt(struct drm_device *drm, void *data, 385static int tegra_get_syncpt(struct drm_device *drm, void *data,
455 struct drm_file *file) 386 struct drm_file *file)
456{ 387{
388 struct tegra_drm_file *fpriv = file->driver_priv;
457 struct drm_tegra_get_syncpt *args = data; 389 struct drm_tegra_get_syncpt *args = data;
458 struct host1x_drm_file *fpriv = file->driver_priv; 390 struct tegra_drm_context *context;
459 struct host1x_drm_context *context =
460 (struct host1x_drm_context *)(uintptr_t)args->context;
461 struct host1x_syncpt *syncpt; 391 struct host1x_syncpt *syncpt;
462 392
463 if (!host1x_drm_file_owns_context(fpriv, context)) 393 context = tegra_drm_get_context(args->context);
394
395 if (!tegra_drm_file_owns_context(fpriv, context))
464 return -ENODEV; 396 return -ENODEV;
465 397
466 if (args->index >= context->client->num_syncpts) 398 if (args->index >= context->client->base.num_syncpts)
467 return -EINVAL; 399 return -EINVAL;
468 400
469 syncpt = context->client->syncpts[args->index]; 401 syncpt = context->client->base.syncpts[args->index];
470 args->id = host1x_syncpt_id(syncpt); 402 args->id = host1x_syncpt_id(syncpt);
471 403
472 return 0; 404 return 0;
@@ -475,16 +407,45 @@ static int tegra_get_syncpt(struct drm_device *drm, void *data,
475static int tegra_submit(struct drm_device *drm, void *data, 407static int tegra_submit(struct drm_device *drm, void *data,
476 struct drm_file *file) 408 struct drm_file *file)
477{ 409{
410 struct tegra_drm_file *fpriv = file->driver_priv;
478 struct drm_tegra_submit *args = data; 411 struct drm_tegra_submit *args = data;
479 struct host1x_drm_file *fpriv = file->driver_priv; 412 struct tegra_drm_context *context;
480 struct host1x_drm_context *context =
481 (struct host1x_drm_context *)(uintptr_t)args->context;
482 413
483 if (!host1x_drm_file_owns_context(fpriv, context)) 414 context = tegra_drm_get_context(args->context);
415
416 if (!tegra_drm_file_owns_context(fpriv, context))
484 return -ENODEV; 417 return -ENODEV;
485 418
486 return context->client->ops->submit(context, args, drm, file); 419 return context->client->ops->submit(context, args, drm, file);
487} 420}
421
422static int tegra_get_syncpt_base(struct drm_device *drm, void *data,
423 struct drm_file *file)
424{
425 struct tegra_drm_file *fpriv = file->driver_priv;
426 struct drm_tegra_get_syncpt_base *args = data;
427 struct tegra_drm_context *context;
428 struct host1x_syncpt_base *base;
429 struct host1x_syncpt *syncpt;
430
431 context = tegra_drm_get_context(args->context);
432
433 if (!tegra_drm_file_owns_context(fpriv, context))
434 return -ENODEV;
435
436 if (args->syncpt >= context->client->base.num_syncpts)
437 return -EINVAL;
438
439 syncpt = context->client->base.syncpts[args->syncpt];
440
441 base = host1x_syncpt_get_base(syncpt);
442 if (!base)
443 return -ENXIO;
444
445 args->id = host1x_syncpt_base_id(base);
446
447 return 0;
448}
488#endif 449#endif
489 450
490static const struct drm_ioctl_desc tegra_drm_ioctls[] = { 451static const struct drm_ioctl_desc tegra_drm_ioctls[] = {
@@ -498,6 +459,7 @@ static const struct drm_ioctl_desc tegra_drm_ioctls[] = {
498 DRM_IOCTL_DEF_DRV(TEGRA_CLOSE_CHANNEL, tegra_close_channel, DRM_UNLOCKED), 459 DRM_IOCTL_DEF_DRV(TEGRA_CLOSE_CHANNEL, tegra_close_channel, DRM_UNLOCKED),
499 DRM_IOCTL_DEF_DRV(TEGRA_GET_SYNCPT, tegra_get_syncpt, DRM_UNLOCKED), 460 DRM_IOCTL_DEF_DRV(TEGRA_GET_SYNCPT, tegra_get_syncpt, DRM_UNLOCKED),
500 DRM_IOCTL_DEF_DRV(TEGRA_SUBMIT, tegra_submit, DRM_UNLOCKED), 461 DRM_IOCTL_DEF_DRV(TEGRA_SUBMIT, tegra_submit, DRM_UNLOCKED),
462 DRM_IOCTL_DEF_DRV(TEGRA_GET_SYNCPT_BASE, tegra_get_syncpt_base, DRM_UNLOCKED),
501#endif 463#endif
502}; 464};
503 465
@@ -559,15 +521,15 @@ static void tegra_drm_disable_vblank(struct drm_device *drm, int pipe)
559 521
560static void tegra_drm_preclose(struct drm_device *drm, struct drm_file *file) 522static void tegra_drm_preclose(struct drm_device *drm, struct drm_file *file)
561{ 523{
562 struct host1x_drm_file *fpriv = file->driver_priv; 524 struct tegra_drm_file *fpriv = file->driver_priv;
563 struct host1x_drm_context *context, *tmp; 525 struct tegra_drm_context *context, *tmp;
564 struct drm_crtc *crtc; 526 struct drm_crtc *crtc;
565 527
566 list_for_each_entry(crtc, &drm->mode_config.crtc_list, head) 528 list_for_each_entry(crtc, &drm->mode_config.crtc_list, head)
567 tegra_dc_cancel_page_flip(crtc, file); 529 tegra_dc_cancel_page_flip(crtc, file);
568 530
569 list_for_each_entry_safe(context, tmp, &fpriv->contexts, list) 531 list_for_each_entry_safe(context, tmp, &fpriv->contexts, list)
570 host1x_drm_context_free(context); 532 tegra_drm_context_free(context);
571 533
572 kfree(fpriv); 534 kfree(fpriv);
573} 535}
@@ -645,3 +607,108 @@ struct drm_driver tegra_drm_driver = {
645 .minor = DRIVER_MINOR, 607 .minor = DRIVER_MINOR,
646 .patchlevel = DRIVER_PATCHLEVEL, 608 .patchlevel = DRIVER_PATCHLEVEL,
647}; 609};
610
611int tegra_drm_register_client(struct tegra_drm *tegra,
612 struct tegra_drm_client *client)
613{
614 mutex_lock(&tegra->clients_lock);
615 list_add_tail(&client->list, &tegra->clients);
616 mutex_unlock(&tegra->clients_lock);
617
618 return 0;
619}
620
621int tegra_drm_unregister_client(struct tegra_drm *tegra,
622 struct tegra_drm_client *client)
623{
624 mutex_lock(&tegra->clients_lock);
625 list_del_init(&client->list);
626 mutex_unlock(&tegra->clients_lock);
627
628 return 0;
629}
630
631static int host1x_drm_probe(struct host1x_device *device)
632{
633 return drm_host1x_init(&tegra_drm_driver, device);
634}
635
636static int host1x_drm_remove(struct host1x_device *device)
637{
638 drm_host1x_exit(&tegra_drm_driver, device);
639
640 return 0;
641}
642
643static const struct of_device_id host1x_drm_subdevs[] = {
644 { .compatible = "nvidia,tegra20-dc", },
645 { .compatible = "nvidia,tegra20-hdmi", },
646 { .compatible = "nvidia,tegra20-gr2d", },
647 { .compatible = "nvidia,tegra20-gr3d", },
648 { .compatible = "nvidia,tegra30-dc", },
649 { .compatible = "nvidia,tegra30-hdmi", },
650 { .compatible = "nvidia,tegra30-gr2d", },
651 { .compatible = "nvidia,tegra30-gr3d", },
652 { .compatible = "nvidia,tegra114-hdmi", },
653 { .compatible = "nvidia,tegra114-gr3d", },
654 { /* sentinel */ }
655};
656
657static struct host1x_driver host1x_drm_driver = {
658 .name = "drm",
659 .probe = host1x_drm_probe,
660 .remove = host1x_drm_remove,
661 .subdevs = host1x_drm_subdevs,
662};
663
664static int __init host1x_drm_init(void)
665{
666 int err;
667
668 err = host1x_driver_register(&host1x_drm_driver);
669 if (err < 0)
670 return err;
671
672 err = platform_driver_register(&tegra_dc_driver);
673 if (err < 0)
674 goto unregister_host1x;
675
676 err = platform_driver_register(&tegra_hdmi_driver);
677 if (err < 0)
678 goto unregister_dc;
679
680 err = platform_driver_register(&tegra_gr2d_driver);
681 if (err < 0)
682 goto unregister_hdmi;
683
684 err = platform_driver_register(&tegra_gr3d_driver);
685 if (err < 0)
686 goto unregister_gr2d;
687
688 return 0;
689
690unregister_gr2d:
691 platform_driver_unregister(&tegra_gr2d_driver);
692unregister_hdmi:
693 platform_driver_unregister(&tegra_hdmi_driver);
694unregister_dc:
695 platform_driver_unregister(&tegra_dc_driver);
696unregister_host1x:
697 host1x_driver_unregister(&host1x_drm_driver);
698 return err;
699}
700module_init(host1x_drm_init);
701
702static void __exit host1x_drm_exit(void)
703{
704 platform_driver_unregister(&tegra_gr3d_driver);
705 platform_driver_unregister(&tegra_gr2d_driver);
706 platform_driver_unregister(&tegra_hdmi_driver);
707 platform_driver_unregister(&tegra_dc_driver);
708 host1x_driver_unregister(&host1x_drm_driver);
709}
710module_exit(host1x_drm_exit);
711
712MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>");
713MODULE_DESCRIPTION("NVIDIA Tegra DRM driver");
714MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/host1x/drm/drm.h b/drivers/gpu/drm/tegra/drm.h
index 02ce020f2575..fdfe259ed7f8 100644
--- a/drivers/gpu/host1x/drm/drm.h
+++ b/drivers/gpu/drm/tegra/drm.h
@@ -10,14 +10,14 @@
10#ifndef HOST1X_DRM_H 10#ifndef HOST1X_DRM_H
11#define HOST1X_DRM_H 1 11#define HOST1X_DRM_H 1
12 12
13#include <uapi/drm/tegra_drm.h>
14#include <linux/host1x.h>
15
13#include <drm/drmP.h> 16#include <drm/drmP.h>
14#include <drm/drm_crtc_helper.h> 17#include <drm/drm_crtc_helper.h>
15#include <drm/drm_edid.h> 18#include <drm/drm_edid.h>
16#include <drm/drm_fb_helper.h> 19#include <drm/drm_fb_helper.h>
17#include <drm/drm_fixed.h> 20#include <drm/drm_fixed.h>
18#include <uapi/drm/tegra_drm.h>
19
20#include "host1x.h"
21 21
22struct tegra_fb { 22struct tegra_fb {
23 struct drm_framebuffer base; 23 struct drm_framebuffer base;
@@ -30,17 +30,8 @@ struct tegra_fbdev {
30 struct tegra_fb *fb; 30 struct tegra_fb *fb;
31}; 31};
32 32
33struct host1x_drm { 33struct tegra_drm {
34 struct drm_device *drm; 34 struct drm_device *drm;
35 struct device *dev;
36 void __iomem *regs;
37 struct clk *clk;
38 int syncpt;
39 int irq;
40
41 struct mutex drm_clients_lock;
42 struct list_head drm_clients;
43 struct list_head drm_active;
44 35
45 struct mutex clients_lock; 36 struct mutex clients_lock;
46 struct list_head clients; 37 struct list_head clients;
@@ -48,66 +39,60 @@ struct host1x_drm {
48 struct tegra_fbdev *fbdev; 39 struct tegra_fbdev *fbdev;
49}; 40};
50 41
51struct host1x_client; 42struct tegra_drm_client;
52 43
53struct host1x_drm_context { 44struct tegra_drm_context {
54 struct host1x_client *client; 45 struct tegra_drm_client *client;
55 struct host1x_channel *channel; 46 struct host1x_channel *channel;
56 struct list_head list; 47 struct list_head list;
57}; 48};
58 49
59struct host1x_client_ops { 50struct tegra_drm_client_ops {
60 int (*drm_init)(struct host1x_client *client, struct drm_device *drm); 51 int (*open_channel)(struct tegra_drm_client *client,
61 int (*drm_exit)(struct host1x_client *client); 52 struct tegra_drm_context *context);
62 int (*open_channel)(struct host1x_client *client, 53 void (*close_channel)(struct tegra_drm_context *context);
63 struct host1x_drm_context *context); 54 int (*is_addr_reg)(struct device *dev, u32 class, u32 offset);
64 void (*close_channel)(struct host1x_drm_context *context); 55 int (*submit)(struct tegra_drm_context *context,
65 int (*submit)(struct host1x_drm_context *context,
66 struct drm_tegra_submit *args, struct drm_device *drm, 56 struct drm_tegra_submit *args, struct drm_device *drm,
67 struct drm_file *file); 57 struct drm_file *file);
68}; 58};
69 59
70struct host1x_drm_file { 60int tegra_drm_submit(struct tegra_drm_context *context,
71 struct list_head contexts; 61 struct drm_tegra_submit *args, struct drm_device *drm,
72}; 62 struct drm_file *file);
73
74struct host1x_client {
75 struct host1x_drm *host1x;
76 struct device *dev;
77
78 const struct host1x_client_ops *ops;
79
80 enum host1x_class class;
81 struct host1x_channel *channel;
82
83 struct host1x_syncpt **syncpts;
84 unsigned int num_syncpts;
85 63
64struct tegra_drm_client {
65 struct host1x_client base;
86 struct list_head list; 66 struct list_head list;
67
68 const struct tegra_drm_client_ops *ops;
87}; 69};
88 70
89extern int host1x_drm_init(struct host1x_drm *host1x, struct drm_device *drm); 71static inline struct tegra_drm_client *
90extern int host1x_drm_exit(struct host1x_drm *host1x); 72host1x_to_drm_client(struct host1x_client *client)
73{
74 return container_of(client, struct tegra_drm_client, base);
75}
76
77extern int tegra_drm_register_client(struct tegra_drm *tegra,
78 struct tegra_drm_client *client);
79extern int tegra_drm_unregister_client(struct tegra_drm *tegra,
80 struct tegra_drm_client *client);
91 81
92extern int host1x_register_client(struct host1x_drm *host1x, 82extern int tegra_drm_init(struct tegra_drm *tegra, struct drm_device *drm);
93 struct host1x_client *client); 83extern int tegra_drm_exit(struct tegra_drm *tegra);
94extern int host1x_unregister_client(struct host1x_drm *host1x,
95 struct host1x_client *client);
96 84
97struct tegra_output; 85struct tegra_output;
98 86
99struct tegra_dc { 87struct tegra_dc {
100 struct host1x_client client; 88 struct host1x_client client;
101 spinlock_t lock;
102
103 struct host1x_drm *host1x;
104 struct device *dev; 89 struct device *dev;
90 spinlock_t lock;
105 91
106 struct drm_crtc base; 92 struct drm_crtc base;
107 int pipe; 93 int pipe;
108 94
109 struct clk *clk; 95 struct clk *clk;
110
111 void __iomem *regs; 96 void __iomem *regs;
112 int irq; 97 int irq;
113 98
@@ -123,7 +108,8 @@ struct tegra_dc {
123 struct drm_pending_vblank_event *event; 108 struct drm_pending_vblank_event *event;
124}; 109};
125 110
126static inline struct tegra_dc *host1x_client_to_dc(struct host1x_client *client) 111static inline struct tegra_dc *
112host1x_client_to_dc(struct host1x_client *client)
127{ 113{
128 return container_of(client, struct tegra_dc, client); 114 return container_of(client, struct tegra_dc, client);
129} 115}
@@ -162,6 +148,8 @@ struct tegra_dc_window {
162 unsigned int format; 148 unsigned int format;
163 unsigned int stride[2]; 149 unsigned int stride[2];
164 unsigned long base[3]; 150 unsigned long base[3];
151 bool bottom_up;
152 bool tiled;
165}; 153};
166 154
167/* from dc.c */ 155/* from dc.c */
@@ -249,23 +237,34 @@ static inline int tegra_output_check_mode(struct tegra_output *output,
249 return output ? -ENOSYS : -EINVAL; 237 return output ? -ENOSYS : -EINVAL;
250} 238}
251 239
240/* from bus.c */
241int drm_host1x_init(struct drm_driver *driver, struct host1x_device *device);
242void drm_host1x_exit(struct drm_driver *driver, struct host1x_device *device);
243
252/* from rgb.c */ 244/* from rgb.c */
253extern int tegra_dc_rgb_probe(struct tegra_dc *dc); 245extern int tegra_dc_rgb_probe(struct tegra_dc *dc);
246extern int tegra_dc_rgb_remove(struct tegra_dc *dc);
254extern int tegra_dc_rgb_init(struct drm_device *drm, struct tegra_dc *dc); 247extern int tegra_dc_rgb_init(struct drm_device *drm, struct tegra_dc *dc);
255extern int tegra_dc_rgb_exit(struct tegra_dc *dc); 248extern int tegra_dc_rgb_exit(struct tegra_dc *dc);
256 249
257/* from output.c */ 250/* from output.c */
258extern int tegra_output_parse_dt(struct tegra_output *output); 251extern int tegra_output_probe(struct tegra_output *output);
252extern int tegra_output_remove(struct tegra_output *output);
259extern int tegra_output_init(struct drm_device *drm, struct tegra_output *output); 253extern int tegra_output_init(struct drm_device *drm, struct tegra_output *output);
260extern int tegra_output_exit(struct tegra_output *output); 254extern int tegra_output_exit(struct tegra_output *output);
261 255
262/* from fb.c */ 256/* from fb.c */
263struct tegra_bo *tegra_fb_get_plane(struct drm_framebuffer *framebuffer, 257struct tegra_bo *tegra_fb_get_plane(struct drm_framebuffer *framebuffer,
264 unsigned int index); 258 unsigned int index);
259bool tegra_fb_is_bottom_up(struct drm_framebuffer *framebuffer);
260bool tegra_fb_is_tiled(struct drm_framebuffer *framebuffer);
265extern int tegra_drm_fb_init(struct drm_device *drm); 261extern int tegra_drm_fb_init(struct drm_device *drm);
266extern void tegra_drm_fb_exit(struct drm_device *drm); 262extern void tegra_drm_fb_exit(struct drm_device *drm);
267extern void tegra_fbdev_restore_mode(struct tegra_fbdev *fbdev); 263extern void tegra_fbdev_restore_mode(struct tegra_fbdev *fbdev);
268 264
269extern struct drm_driver tegra_drm_driver; 265extern struct platform_driver tegra_dc_driver;
266extern struct platform_driver tegra_hdmi_driver;
267extern struct platform_driver tegra_gr2d_driver;
268extern struct platform_driver tegra_gr3d_driver;
270 269
271#endif /* HOST1X_DRM_H */ 270#endif /* HOST1X_DRM_H */
diff --git a/drivers/gpu/host1x/drm/fb.c b/drivers/gpu/drm/tegra/fb.c
index 979a3e32b78b..490f7719e317 100644
--- a/drivers/gpu/host1x/drm/fb.c
+++ b/drivers/gpu/drm/tegra/fb.c
@@ -10,8 +10,6 @@
10 * published by the Free Software Foundation. 10 * published by the Free Software Foundation.
11 */ 11 */
12 12
13#include <linux/module.h>
14
15#include "drm.h" 13#include "drm.h"
16#include "gem.h" 14#include "gem.h"
17 15
@@ -36,6 +34,26 @@ struct tegra_bo *tegra_fb_get_plane(struct drm_framebuffer *framebuffer,
36 return fb->planes[index]; 34 return fb->planes[index];
37} 35}
38 36
37bool tegra_fb_is_bottom_up(struct drm_framebuffer *framebuffer)
38{
39 struct tegra_fb *fb = to_tegra_fb(framebuffer);
40
41 if (fb->planes[0]->flags & TEGRA_BO_BOTTOM_UP)
42 return true;
43
44 return false;
45}
46
47bool tegra_fb_is_tiled(struct drm_framebuffer *framebuffer)
48{
49 struct tegra_fb *fb = to_tegra_fb(framebuffer);
50
51 if (fb->planes[0]->flags & TEGRA_BO_TILED)
52 return true;
53
54 return false;
55}
56
39static void tegra_fb_destroy(struct drm_framebuffer *framebuffer) 57static void tegra_fb_destroy(struct drm_framebuffer *framebuffer)
40{ 58{
41 struct tegra_fb *fb = to_tegra_fb(framebuffer); 59 struct tegra_fb *fb = to_tegra_fb(framebuffer);
@@ -190,7 +208,7 @@ static int tegra_fbdev_probe(struct drm_fb_helper *helper,
190 208
191 size = cmd.pitches[0] * cmd.height; 209 size = cmd.pitches[0] * cmd.height;
192 210
193 bo = tegra_bo_create(drm, size); 211 bo = tegra_bo_create(drm, size, 0);
194 if (IS_ERR(bo)) 212 if (IS_ERR(bo))
195 return PTR_ERR(bo); 213 return PTR_ERR(bo);
196 214
@@ -323,10 +341,10 @@ static void tegra_fbdev_free(struct tegra_fbdev *fbdev)
323 341
324static void tegra_fb_output_poll_changed(struct drm_device *drm) 342static void tegra_fb_output_poll_changed(struct drm_device *drm)
325{ 343{
326 struct host1x_drm *host1x = drm->dev_private; 344 struct tegra_drm *tegra = drm->dev_private;
327 345
328 if (host1x->fbdev) 346 if (tegra->fbdev)
329 drm_fb_helper_hotplug_event(&host1x->fbdev->base); 347 drm_fb_helper_hotplug_event(&tegra->fbdev->base);
330} 348}
331 349
332static const struct drm_mode_config_funcs tegra_drm_mode_funcs = { 350static const struct drm_mode_config_funcs tegra_drm_mode_funcs = {
@@ -336,7 +354,7 @@ static const struct drm_mode_config_funcs tegra_drm_mode_funcs = {
336 354
337int tegra_drm_fb_init(struct drm_device *drm) 355int tegra_drm_fb_init(struct drm_device *drm)
338{ 356{
339 struct host1x_drm *host1x = drm->dev_private; 357 struct tegra_drm *tegra = drm->dev_private;
340 struct tegra_fbdev *fbdev; 358 struct tegra_fbdev *fbdev;
341 359
342 drm->mode_config.min_width = 0; 360 drm->mode_config.min_width = 0;
@@ -352,16 +370,16 @@ int tegra_drm_fb_init(struct drm_device *drm)
352 if (IS_ERR(fbdev)) 370 if (IS_ERR(fbdev))
353 return PTR_ERR(fbdev); 371 return PTR_ERR(fbdev);
354 372
355 host1x->fbdev = fbdev; 373 tegra->fbdev = fbdev;
356 374
357 return 0; 375 return 0;
358} 376}
359 377
360void tegra_drm_fb_exit(struct drm_device *drm) 378void tegra_drm_fb_exit(struct drm_device *drm)
361{ 379{
362 struct host1x_drm *host1x = drm->dev_private; 380 struct tegra_drm *tegra = drm->dev_private;
363 381
364 tegra_fbdev_free(host1x->fbdev); 382 tegra_fbdev_free(tegra->fbdev);
365} 383}
366 384
367void tegra_fbdev_restore_mode(struct tegra_fbdev *fbdev) 385void tegra_fbdev_restore_mode(struct tegra_fbdev *fbdev)
diff --git a/drivers/gpu/host1x/drm/gem.c b/drivers/gpu/drm/tegra/gem.c
index 59623de4ee15..28a9cbc07ab9 100644
--- a/drivers/gpu/host1x/drm/gem.c
+++ b/drivers/gpu/drm/tegra/gem.c
@@ -18,25 +18,18 @@
18 * GNU General Public License for more details. 18 * GNU General Public License for more details.
19 */ 19 */
20 20
21#include <linux/mm.h> 21#include <drm/tegra_drm.h>
22#include <linux/slab.h>
23#include <linux/mutex.h>
24#include <linux/export.h>
25#include <linux/dma-mapping.h>
26
27#include <drm/drmP.h>
28#include <drm/drm.h>
29 22
30#include "gem.h" 23#include "gem.h"
31 24
32static inline struct tegra_bo *host1x_to_drm_bo(struct host1x_bo *bo) 25static inline struct tegra_bo *host1x_to_tegra_bo(struct host1x_bo *bo)
33{ 26{
34 return container_of(bo, struct tegra_bo, base); 27 return container_of(bo, struct tegra_bo, base);
35} 28}
36 29
37static void tegra_bo_put(struct host1x_bo *bo) 30static void tegra_bo_put(struct host1x_bo *bo)
38{ 31{
39 struct tegra_bo *obj = host1x_to_drm_bo(bo); 32 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
40 struct drm_device *drm = obj->gem.dev; 33 struct drm_device *drm = obj->gem.dev;
41 34
42 mutex_lock(&drm->struct_mutex); 35 mutex_lock(&drm->struct_mutex);
@@ -46,7 +39,7 @@ static void tegra_bo_put(struct host1x_bo *bo)
46 39
47static dma_addr_t tegra_bo_pin(struct host1x_bo *bo, struct sg_table **sgt) 40static dma_addr_t tegra_bo_pin(struct host1x_bo *bo, struct sg_table **sgt)
48{ 41{
49 struct tegra_bo *obj = host1x_to_drm_bo(bo); 42 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
50 43
51 return obj->paddr; 44 return obj->paddr;
52} 45}
@@ -57,7 +50,7 @@ static void tegra_bo_unpin(struct host1x_bo *bo, struct sg_table *sgt)
57 50
58static void *tegra_bo_mmap(struct host1x_bo *bo) 51static void *tegra_bo_mmap(struct host1x_bo *bo)
59{ 52{
60 struct tegra_bo *obj = host1x_to_drm_bo(bo); 53 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
61 54
62 return obj->vaddr; 55 return obj->vaddr;
63} 56}
@@ -68,7 +61,7 @@ static void tegra_bo_munmap(struct host1x_bo *bo, void *addr)
68 61
69static void *tegra_bo_kmap(struct host1x_bo *bo, unsigned int page) 62static void *tegra_bo_kmap(struct host1x_bo *bo, unsigned int page)
70{ 63{
71 struct tegra_bo *obj = host1x_to_drm_bo(bo); 64 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
72 65
73 return obj->vaddr + page * PAGE_SIZE; 66 return obj->vaddr + page * PAGE_SIZE;
74} 67}
@@ -80,7 +73,7 @@ static void tegra_bo_kunmap(struct host1x_bo *bo, unsigned int page,
80 73
81static struct host1x_bo *tegra_bo_get(struct host1x_bo *bo) 74static struct host1x_bo *tegra_bo_get(struct host1x_bo *bo)
82{ 75{
83 struct tegra_bo *obj = host1x_to_drm_bo(bo); 76 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
84 struct drm_device *drm = obj->gem.dev; 77 struct drm_device *drm = obj->gem.dev;
85 78
86 mutex_lock(&drm->struct_mutex); 79 mutex_lock(&drm->struct_mutex);
@@ -106,7 +99,8 @@ static void tegra_bo_destroy(struct drm_device *drm, struct tegra_bo *bo)
106 dma_free_writecombine(drm->dev, bo->gem.size, bo->vaddr, bo->paddr); 99 dma_free_writecombine(drm->dev, bo->gem.size, bo->vaddr, bo->paddr);
107} 100}
108 101
109struct tegra_bo *tegra_bo_create(struct drm_device *drm, unsigned int size) 102struct tegra_bo *tegra_bo_create(struct drm_device *drm, unsigned int size,
103 unsigned long flags)
110{ 104{
111 struct tegra_bo *bo; 105 struct tegra_bo *bo;
112 int err; 106 int err;
@@ -135,6 +129,12 @@ struct tegra_bo *tegra_bo_create(struct drm_device *drm, unsigned int size)
135 if (err) 129 if (err)
136 goto err_mmap; 130 goto err_mmap;
137 131
132 if (flags & DRM_TEGRA_GEM_CREATE_TILED)
133 bo->flags |= TEGRA_BO_TILED;
134
135 if (flags & DRM_TEGRA_GEM_CREATE_BOTTOM_UP)
136 bo->flags |= TEGRA_BO_BOTTOM_UP;
137
138 return bo; 138 return bo;
139 139
140err_mmap: 140err_mmap:
@@ -149,14 +149,15 @@ err_dma:
149} 149}
150 150
151struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file, 151struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file,
152 struct drm_device *drm, 152 struct drm_device *drm,
153 unsigned int size, 153 unsigned int size,
154 unsigned int *handle) 154 unsigned long flags,
155 unsigned int *handle)
155{ 156{
156 struct tegra_bo *bo; 157 struct tegra_bo *bo;
157 int ret; 158 int ret;
158 159
159 bo = tegra_bo_create(drm, size); 160 bo = tegra_bo_create(drm, size, flags);
160 if (IS_ERR(bo)) 161 if (IS_ERR(bo))
161 return bo; 162 return bo;
162 163
@@ -178,7 +179,6 @@ void tegra_bo_free_object(struct drm_gem_object *gem)
178 struct tegra_bo *bo = to_tegra_bo(gem); 179 struct tegra_bo *bo = to_tegra_bo(gem);
179 180
180 drm_gem_free_mmap_offset(gem); 181 drm_gem_free_mmap_offset(gem);
181
182 drm_gem_object_release(gem); 182 drm_gem_object_release(gem);
183 tegra_bo_destroy(gem->dev, bo); 183 tegra_bo_destroy(gem->dev, bo);
184 184
@@ -197,8 +197,8 @@ int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm,
197 if (args->size < args->pitch * args->height) 197 if (args->size < args->pitch * args->height)
198 args->size = args->pitch * args->height; 198 args->size = args->pitch * args->height;
199 199
200 bo = tegra_bo_create_with_handle(file, drm, args->size, 200 bo = tegra_bo_create_with_handle(file, drm, args->size, 0,
201 &args->handle); 201 &args->handle);
202 if (IS_ERR(bo)) 202 if (IS_ERR(bo))
203 return PTR_ERR(bo); 203 return PTR_ERR(bo);
204 204
diff --git a/drivers/gpu/host1x/drm/gem.h b/drivers/gpu/drm/tegra/gem.h
index 492533a2dacb..7674000bf47d 100644
--- a/drivers/gpu/host1x/drm/gem.h
+++ b/drivers/gpu/drm/tegra/gem.h
@@ -19,14 +19,18 @@
19#ifndef __HOST1X_GEM_H 19#ifndef __HOST1X_GEM_H
20#define __HOST1X_GEM_H 20#define __HOST1X_GEM_H
21 21
22#include <linux/host1x.h>
23
22#include <drm/drm.h> 24#include <drm/drm.h>
23#include <drm/drmP.h> 25#include <drm/drmP.h>
24 26
25#include "host1x_bo.h" 27#define TEGRA_BO_TILED (1 << 0)
28#define TEGRA_BO_BOTTOM_UP (1 << 1)
26 29
27struct tegra_bo { 30struct tegra_bo {
28 struct drm_gem_object gem; 31 struct drm_gem_object gem;
29 struct host1x_bo base; 32 struct host1x_bo base;
33 unsigned long flags;
30 dma_addr_t paddr; 34 dma_addr_t paddr;
31 void *vaddr; 35 void *vaddr;
32}; 36};
@@ -38,11 +42,13 @@ static inline struct tegra_bo *to_tegra_bo(struct drm_gem_object *gem)
38 42
39extern const struct host1x_bo_ops tegra_bo_ops; 43extern const struct host1x_bo_ops tegra_bo_ops;
40 44
41struct tegra_bo *tegra_bo_create(struct drm_device *drm, unsigned int size); 45struct tegra_bo *tegra_bo_create(struct drm_device *drm, unsigned int size,
46 unsigned long flags);
42struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file, 47struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file,
43 struct drm_device *drm, 48 struct drm_device *drm,
44 unsigned int size, 49 unsigned int size,
45 unsigned int *handle); 50 unsigned long flags,
51 unsigned int *handle);
46void tegra_bo_free_object(struct drm_gem_object *gem); 52void tegra_bo_free_object(struct drm_gem_object *gem);
47int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm, 53int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm,
48 struct drm_mode_create_dumb *args); 54 struct drm_mode_create_dumb *args);
diff --git a/drivers/gpu/drm/tegra/gr2d.c b/drivers/gpu/drm/tegra/gr2d.c
new file mode 100644
index 000000000000..7ec4259ffded
--- /dev/null
+++ b/drivers/gpu/drm/tegra/gr2d.c
@@ -0,0 +1,227 @@
1/*
2 * Copyright (c) 2012-2013, NVIDIA Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17#include <linux/clk.h>
18
19#include "drm.h"
20#include "gem.h"
21#include "gr2d.h"
22
23struct gr2d {
24 struct tegra_drm_client client;
25 struct host1x_channel *channel;
26 struct clk *clk;
27
28 DECLARE_BITMAP(addr_regs, GR2D_NUM_REGS);
29};
30
31static inline struct gr2d *to_gr2d(struct tegra_drm_client *client)
32{
33 return container_of(client, struct gr2d, client);
34}
35
36static int gr2d_init(struct host1x_client *client)
37{
38 struct tegra_drm_client *drm = host1x_to_drm_client(client);
39 struct tegra_drm *tegra = dev_get_drvdata(client->parent);
40 unsigned long flags = HOST1X_SYNCPT_HAS_BASE;
41 struct gr2d *gr2d = to_gr2d(drm);
42
43 gr2d->channel = host1x_channel_request(client->dev);
44 if (!gr2d->channel)
45 return -ENOMEM;
46
47 client->syncpts[0] = host1x_syncpt_request(client->dev, flags);
48 if (!client->syncpts[0]) {
49 host1x_channel_free(gr2d->channel);
50 return -ENOMEM;
51 }
52
53 return tegra_drm_register_client(tegra, drm);
54}
55
56static int gr2d_exit(struct host1x_client *client)
57{
58 struct tegra_drm_client *drm = host1x_to_drm_client(client);
59 struct tegra_drm *tegra = dev_get_drvdata(client->parent);
60 struct gr2d *gr2d = to_gr2d(drm);
61 int err;
62
63 err = tegra_drm_unregister_client(tegra, drm);
64 if (err < 0)
65 return err;
66
67 host1x_syncpt_free(client->syncpts[0]);
68 host1x_channel_free(gr2d->channel);
69
70 return 0;
71}
72
73static const struct host1x_client_ops gr2d_client_ops = {
74 .init = gr2d_init,
75 .exit = gr2d_exit,
76};
77
78static int gr2d_open_channel(struct tegra_drm_client *client,
79 struct tegra_drm_context *context)
80{
81 struct gr2d *gr2d = to_gr2d(client);
82
83 context->channel = host1x_channel_get(gr2d->channel);
84 if (!context->channel)
85 return -ENOMEM;
86
87 return 0;
88}
89
90static void gr2d_close_channel(struct tegra_drm_context *context)
91{
92 host1x_channel_put(context->channel);
93}
94
95static int gr2d_is_addr_reg(struct device *dev, u32 class, u32 offset)
96{
97 struct gr2d *gr2d = dev_get_drvdata(dev);
98
99 switch (class) {
100 case HOST1X_CLASS_HOST1X:
101 if (offset == 0x2b)
102 return 1;
103
104 break;
105
106 case HOST1X_CLASS_GR2D:
107 case HOST1X_CLASS_GR2D_SB:
108 if (offset >= GR2D_NUM_REGS)
109 break;
110
111 if (test_bit(offset, gr2d->addr_regs))
112 return 1;
113
114 break;
115 }
116
117 return 0;
118}
119
120static const struct tegra_drm_client_ops gr2d_ops = {
121 .open_channel = gr2d_open_channel,
122 .close_channel = gr2d_close_channel,
123 .is_addr_reg = gr2d_is_addr_reg,
124 .submit = tegra_drm_submit,
125};
126
127static const struct of_device_id gr2d_match[] = {
128 { .compatible = "nvidia,tegra30-gr2d" },
129 { .compatible = "nvidia,tegra20-gr2d" },
130 { },
131};
132
133static const u32 gr2d_addr_regs[] = {
134 GR2D_UA_BASE_ADDR,
135 GR2D_VA_BASE_ADDR,
136 GR2D_PAT_BASE_ADDR,
137 GR2D_DSTA_BASE_ADDR,
138 GR2D_DSTB_BASE_ADDR,
139 GR2D_DSTC_BASE_ADDR,
140 GR2D_SRCA_BASE_ADDR,
141 GR2D_SRCB_BASE_ADDR,
142 GR2D_SRC_BASE_ADDR_SB,
143 GR2D_DSTA_BASE_ADDR_SB,
144 GR2D_DSTB_BASE_ADDR_SB,
145 GR2D_UA_BASE_ADDR_SB,
146 GR2D_VA_BASE_ADDR_SB,
147};
148
149static int gr2d_probe(struct platform_device *pdev)
150{
151 struct device *dev = &pdev->dev;
152 struct host1x_syncpt **syncpts;
153 struct gr2d *gr2d;
154 unsigned int i;
155 int err;
156
157 gr2d = devm_kzalloc(dev, sizeof(*gr2d), GFP_KERNEL);
158 if (!gr2d)
159 return -ENOMEM;
160
161 syncpts = devm_kzalloc(dev, sizeof(*syncpts), GFP_KERNEL);
162 if (!syncpts)
163 return -ENOMEM;
164
165 gr2d->clk = devm_clk_get(dev, NULL);
166 if (IS_ERR(gr2d->clk)) {
167 dev_err(dev, "cannot get clock\n");
168 return PTR_ERR(gr2d->clk);
169 }
170
171 err = clk_prepare_enable(gr2d->clk);
172 if (err) {
173 dev_err(dev, "cannot turn on clock\n");
174 return err;
175 }
176
177 INIT_LIST_HEAD(&gr2d->client.base.list);
178 gr2d->client.base.ops = &gr2d_client_ops;
179 gr2d->client.base.dev = dev;
180 gr2d->client.base.class = HOST1X_CLASS_GR2D;
181 gr2d->client.base.syncpts = syncpts;
182 gr2d->client.base.num_syncpts = 1;
183
184 INIT_LIST_HEAD(&gr2d->client.list);
185 gr2d->client.ops = &gr2d_ops;
186
187 err = host1x_client_register(&gr2d->client.base);
188 if (err < 0) {
189 dev_err(dev, "failed to register host1x client: %d\n", err);
190 clk_disable_unprepare(gr2d->clk);
191 return err;
192 }
193
194 /* initialize address register map */
195 for (i = 0; i < ARRAY_SIZE(gr2d_addr_regs); i++)
196 set_bit(gr2d_addr_regs[i], gr2d->addr_regs);
197
198 platform_set_drvdata(pdev, gr2d);
199
200 return 0;
201}
202
203static int gr2d_remove(struct platform_device *pdev)
204{
205 struct gr2d *gr2d = platform_get_drvdata(pdev);
206 int err;
207
208 err = host1x_client_unregister(&gr2d->client.base);
209 if (err < 0) {
210 dev_err(&pdev->dev, "failed to unregister host1x client: %d\n",
211 err);
212 return err;
213 }
214
215 clk_disable_unprepare(gr2d->clk);
216
217 return 0;
218}
219
220struct platform_driver tegra_gr2d_driver = {
221 .driver = {
222 .name = "tegra-gr2d",
223 .of_match_table = gr2d_match,
224 },
225 .probe = gr2d_probe,
226 .remove = gr2d_remove,
227};
diff --git a/drivers/gpu/drm/tegra/gr2d.h b/drivers/gpu/drm/tegra/gr2d.h
new file mode 100644
index 000000000000..4d7304fb015e
--- /dev/null
+++ b/drivers/gpu/drm/tegra/gr2d.h
@@ -0,0 +1,28 @@
1/*
2 * Copyright (C) 2013 NVIDIA Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#ifndef TEGRA_GR2D_H
10#define TEGRA_GR2D_H
11
12#define GR2D_UA_BASE_ADDR 0x1a
13#define GR2D_VA_BASE_ADDR 0x1b
14#define GR2D_PAT_BASE_ADDR 0x26
15#define GR2D_DSTA_BASE_ADDR 0x2b
16#define GR2D_DSTB_BASE_ADDR 0x2c
17#define GR2D_DSTC_BASE_ADDR 0x2d
18#define GR2D_SRCA_BASE_ADDR 0x31
19#define GR2D_SRCB_BASE_ADDR 0x32
20#define GR2D_SRC_BASE_ADDR_SB 0x48
21#define GR2D_DSTA_BASE_ADDR_SB 0x49
22#define GR2D_DSTB_BASE_ADDR_SB 0x4a
23#define GR2D_UA_BASE_ADDR_SB 0x4b
24#define GR2D_VA_BASE_ADDR_SB 0x4c
25
26#define GR2D_NUM_REGS 0x4d
27
28#endif
diff --git a/drivers/gpu/drm/tegra/gr3d.c b/drivers/gpu/drm/tegra/gr3d.c
new file mode 100644
index 000000000000..4cec8f526af7
--- /dev/null
+++ b/drivers/gpu/drm/tegra/gr3d.c
@@ -0,0 +1,338 @@
1/*
2 * Copyright (C) 2013 Avionic Design GmbH
3 * Copyright (C) 2013 NVIDIA Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9
10#include <linux/clk.h>
11#include <linux/host1x.h>
12#include <linux/module.h>
13#include <linux/platform_device.h>
14#include <linux/tegra-powergate.h>
15
16#include "drm.h"
17#include "gem.h"
18#include "gr3d.h"
19
20struct gr3d {
21 struct tegra_drm_client client;
22 struct host1x_channel *channel;
23 struct clk *clk_secondary;
24 struct clk *clk;
25
26 DECLARE_BITMAP(addr_regs, GR3D_NUM_REGS);
27};
28
29static inline struct gr3d *to_gr3d(struct tegra_drm_client *client)
30{
31 return container_of(client, struct gr3d, client);
32}
33
34static int gr3d_init(struct host1x_client *client)
35{
36 struct tegra_drm_client *drm = host1x_to_drm_client(client);
37 struct tegra_drm *tegra = dev_get_drvdata(client->parent);
38 unsigned long flags = HOST1X_SYNCPT_HAS_BASE;
39 struct gr3d *gr3d = to_gr3d(drm);
40
41 gr3d->channel = host1x_channel_request(client->dev);
42 if (!gr3d->channel)
43 return -ENOMEM;
44
45 client->syncpts[0] = host1x_syncpt_request(client->dev, flags);
46 if (!client->syncpts[0]) {
47 host1x_channel_free(gr3d->channel);
48 return -ENOMEM;
49 }
50
51 return tegra_drm_register_client(tegra, drm);
52}
53
54static int gr3d_exit(struct host1x_client *client)
55{
56 struct tegra_drm_client *drm = host1x_to_drm_client(client);
57 struct tegra_drm *tegra = dev_get_drvdata(client->parent);
58 struct gr3d *gr3d = to_gr3d(drm);
59 int err;
60
61 err = tegra_drm_unregister_client(tegra, drm);
62 if (err < 0)
63 return err;
64
65 host1x_syncpt_free(client->syncpts[0]);
66 host1x_channel_free(gr3d->channel);
67
68 return 0;
69}
70
71static const struct host1x_client_ops gr3d_client_ops = {
72 .init = gr3d_init,
73 .exit = gr3d_exit,
74};
75
76static int gr3d_open_channel(struct tegra_drm_client *client,
77 struct tegra_drm_context *context)
78{
79 struct gr3d *gr3d = to_gr3d(client);
80
81 context->channel = host1x_channel_get(gr3d->channel);
82 if (!context->channel)
83 return -ENOMEM;
84
85 return 0;
86}
87
88static void gr3d_close_channel(struct tegra_drm_context *context)
89{
90 host1x_channel_put(context->channel);
91}
92
93static int gr3d_is_addr_reg(struct device *dev, u32 class, u32 offset)
94{
95 struct gr3d *gr3d = dev_get_drvdata(dev);
96
97 switch (class) {
98 case HOST1X_CLASS_HOST1X:
99 if (offset == 0x2b)
100 return 1;
101
102 break;
103
104 case HOST1X_CLASS_GR3D:
105 if (offset >= GR3D_NUM_REGS)
106 break;
107
108 if (test_bit(offset, gr3d->addr_regs))
109 return 1;
110
111 break;
112 }
113
114 return 0;
115}
116
117static const struct tegra_drm_client_ops gr3d_ops = {
118 .open_channel = gr3d_open_channel,
119 .close_channel = gr3d_close_channel,
120 .is_addr_reg = gr3d_is_addr_reg,
121 .submit = tegra_drm_submit,
122};
123
124static const struct of_device_id tegra_gr3d_match[] = {
125 { .compatible = "nvidia,tegra114-gr3d" },
126 { .compatible = "nvidia,tegra30-gr3d" },
127 { .compatible = "nvidia,tegra20-gr3d" },
128 { }
129};
130
131static const u32 gr3d_addr_regs[] = {
132 GR3D_IDX_ATTRIBUTE( 0),
133 GR3D_IDX_ATTRIBUTE( 1),
134 GR3D_IDX_ATTRIBUTE( 2),
135 GR3D_IDX_ATTRIBUTE( 3),
136 GR3D_IDX_ATTRIBUTE( 4),
137 GR3D_IDX_ATTRIBUTE( 5),
138 GR3D_IDX_ATTRIBUTE( 6),
139 GR3D_IDX_ATTRIBUTE( 7),
140 GR3D_IDX_ATTRIBUTE( 8),
141 GR3D_IDX_ATTRIBUTE( 9),
142 GR3D_IDX_ATTRIBUTE(10),
143 GR3D_IDX_ATTRIBUTE(11),
144 GR3D_IDX_ATTRIBUTE(12),
145 GR3D_IDX_ATTRIBUTE(13),
146 GR3D_IDX_ATTRIBUTE(14),
147 GR3D_IDX_ATTRIBUTE(15),
148 GR3D_IDX_INDEX_BASE,
149 GR3D_QR_ZTAG_ADDR,
150 GR3D_QR_CTAG_ADDR,
151 GR3D_QR_CZ_ADDR,
152 GR3D_TEX_TEX_ADDR( 0),
153 GR3D_TEX_TEX_ADDR( 1),
154 GR3D_TEX_TEX_ADDR( 2),
155 GR3D_TEX_TEX_ADDR( 3),
156 GR3D_TEX_TEX_ADDR( 4),
157 GR3D_TEX_TEX_ADDR( 5),
158 GR3D_TEX_TEX_ADDR( 6),
159 GR3D_TEX_TEX_ADDR( 7),
160 GR3D_TEX_TEX_ADDR( 8),
161 GR3D_TEX_TEX_ADDR( 9),
162 GR3D_TEX_TEX_ADDR(10),
163 GR3D_TEX_TEX_ADDR(11),
164 GR3D_TEX_TEX_ADDR(12),
165 GR3D_TEX_TEX_ADDR(13),
166 GR3D_TEX_TEX_ADDR(14),
167 GR3D_TEX_TEX_ADDR(15),
168 GR3D_DW_MEMORY_OUTPUT_ADDRESS,
169 GR3D_GLOBAL_SURFADDR( 0),
170 GR3D_GLOBAL_SURFADDR( 1),
171 GR3D_GLOBAL_SURFADDR( 2),
172 GR3D_GLOBAL_SURFADDR( 3),
173 GR3D_GLOBAL_SURFADDR( 4),
174 GR3D_GLOBAL_SURFADDR( 5),
175 GR3D_GLOBAL_SURFADDR( 6),
176 GR3D_GLOBAL_SURFADDR( 7),
177 GR3D_GLOBAL_SURFADDR( 8),
178 GR3D_GLOBAL_SURFADDR( 9),
179 GR3D_GLOBAL_SURFADDR(10),
180 GR3D_GLOBAL_SURFADDR(11),
181 GR3D_GLOBAL_SURFADDR(12),
182 GR3D_GLOBAL_SURFADDR(13),
183 GR3D_GLOBAL_SURFADDR(14),
184 GR3D_GLOBAL_SURFADDR(15),
185 GR3D_GLOBAL_SPILLSURFADDR,
186 GR3D_GLOBAL_SURFOVERADDR( 0),
187 GR3D_GLOBAL_SURFOVERADDR( 1),
188 GR3D_GLOBAL_SURFOVERADDR( 2),
189 GR3D_GLOBAL_SURFOVERADDR( 3),
190 GR3D_GLOBAL_SURFOVERADDR( 4),
191 GR3D_GLOBAL_SURFOVERADDR( 5),
192 GR3D_GLOBAL_SURFOVERADDR( 6),
193 GR3D_GLOBAL_SURFOVERADDR( 7),
194 GR3D_GLOBAL_SURFOVERADDR( 8),
195 GR3D_GLOBAL_SURFOVERADDR( 9),
196 GR3D_GLOBAL_SURFOVERADDR(10),
197 GR3D_GLOBAL_SURFOVERADDR(11),
198 GR3D_GLOBAL_SURFOVERADDR(12),
199 GR3D_GLOBAL_SURFOVERADDR(13),
200 GR3D_GLOBAL_SURFOVERADDR(14),
201 GR3D_GLOBAL_SURFOVERADDR(15),
202 GR3D_GLOBAL_SAMP01SURFADDR( 0),
203 GR3D_GLOBAL_SAMP01SURFADDR( 1),
204 GR3D_GLOBAL_SAMP01SURFADDR( 2),
205 GR3D_GLOBAL_SAMP01SURFADDR( 3),
206 GR3D_GLOBAL_SAMP01SURFADDR( 4),
207 GR3D_GLOBAL_SAMP01SURFADDR( 5),
208 GR3D_GLOBAL_SAMP01SURFADDR( 6),
209 GR3D_GLOBAL_SAMP01SURFADDR( 7),
210 GR3D_GLOBAL_SAMP01SURFADDR( 8),
211 GR3D_GLOBAL_SAMP01SURFADDR( 9),
212 GR3D_GLOBAL_SAMP01SURFADDR(10),
213 GR3D_GLOBAL_SAMP01SURFADDR(11),
214 GR3D_GLOBAL_SAMP01SURFADDR(12),
215 GR3D_GLOBAL_SAMP01SURFADDR(13),
216 GR3D_GLOBAL_SAMP01SURFADDR(14),
217 GR3D_GLOBAL_SAMP01SURFADDR(15),
218 GR3D_GLOBAL_SAMP23SURFADDR( 0),
219 GR3D_GLOBAL_SAMP23SURFADDR( 1),
220 GR3D_GLOBAL_SAMP23SURFADDR( 2),
221 GR3D_GLOBAL_SAMP23SURFADDR( 3),
222 GR3D_GLOBAL_SAMP23SURFADDR( 4),
223 GR3D_GLOBAL_SAMP23SURFADDR( 5),
224 GR3D_GLOBAL_SAMP23SURFADDR( 6),
225 GR3D_GLOBAL_SAMP23SURFADDR( 7),
226 GR3D_GLOBAL_SAMP23SURFADDR( 8),
227 GR3D_GLOBAL_SAMP23SURFADDR( 9),
228 GR3D_GLOBAL_SAMP23SURFADDR(10),
229 GR3D_GLOBAL_SAMP23SURFADDR(11),
230 GR3D_GLOBAL_SAMP23SURFADDR(12),
231 GR3D_GLOBAL_SAMP23SURFADDR(13),
232 GR3D_GLOBAL_SAMP23SURFADDR(14),
233 GR3D_GLOBAL_SAMP23SURFADDR(15),
234};
235
236static int gr3d_probe(struct platform_device *pdev)
237{
238 struct device_node *np = pdev->dev.of_node;
239 struct host1x_syncpt **syncpts;
240 struct gr3d *gr3d;
241 unsigned int i;
242 int err;
243
244 gr3d = devm_kzalloc(&pdev->dev, sizeof(*gr3d), GFP_KERNEL);
245 if (!gr3d)
246 return -ENOMEM;
247
248 syncpts = devm_kzalloc(&pdev->dev, sizeof(*syncpts), GFP_KERNEL);
249 if (!syncpts)
250 return -ENOMEM;
251
252 gr3d->clk = devm_clk_get(&pdev->dev, NULL);
253 if (IS_ERR(gr3d->clk)) {
254 dev_err(&pdev->dev, "cannot get clock\n");
255 return PTR_ERR(gr3d->clk);
256 }
257
258 if (of_device_is_compatible(np, "nvidia,tegra30-gr3d")) {
259 gr3d->clk_secondary = devm_clk_get(&pdev->dev, "3d2");
260 if (IS_ERR(gr3d->clk)) {
261 dev_err(&pdev->dev, "cannot get secondary clock\n");
262 return PTR_ERR(gr3d->clk);
263 }
264 }
265
266 err = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_3D, gr3d->clk);
267 if (err < 0) {
268 dev_err(&pdev->dev, "failed to power up 3D unit\n");
269 return err;
270 }
271
272 if (gr3d->clk_secondary) {
273 err = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_3D1,
274 gr3d->clk_secondary);
275 if (err < 0) {
276 dev_err(&pdev->dev,
277 "failed to power up secondary 3D unit\n");
278 return err;
279 }
280 }
281
282 INIT_LIST_HEAD(&gr3d->client.base.list);
283 gr3d->client.base.ops = &gr3d_client_ops;
284 gr3d->client.base.dev = &pdev->dev;
285 gr3d->client.base.class = HOST1X_CLASS_GR3D;
286 gr3d->client.base.syncpts = syncpts;
287 gr3d->client.base.num_syncpts = 1;
288
289 INIT_LIST_HEAD(&gr3d->client.list);
290 gr3d->client.ops = &gr3d_ops;
291
292 err = host1x_client_register(&gr3d->client.base);
293 if (err < 0) {
294 dev_err(&pdev->dev, "failed to register host1x client: %d\n",
295 err);
296 return err;
297 }
298
299 /* initialize address register map */
300 for (i = 0; i < ARRAY_SIZE(gr3d_addr_regs); i++)
301 set_bit(gr3d_addr_regs[i], gr3d->addr_regs);
302
303 platform_set_drvdata(pdev, gr3d);
304
305 return 0;
306}
307
308static int gr3d_remove(struct platform_device *pdev)
309{
310 struct gr3d *gr3d = platform_get_drvdata(pdev);
311 int err;
312
313 err = host1x_client_unregister(&gr3d->client.base);
314 if (err < 0) {
315 dev_err(&pdev->dev, "failed to unregister host1x client: %d\n",
316 err);
317 return err;
318 }
319
320 if (gr3d->clk_secondary) {
321 tegra_powergate_power_off(TEGRA_POWERGATE_3D1);
322 clk_disable_unprepare(gr3d->clk_secondary);
323 }
324
325 tegra_powergate_power_off(TEGRA_POWERGATE_3D);
326 clk_disable_unprepare(gr3d->clk);
327
328 return 0;
329}
330
331struct platform_driver tegra_gr3d_driver = {
332 .driver = {
333 .name = "tegra-gr3d",
334 .of_match_table = tegra_gr3d_match,
335 },
336 .probe = gr3d_probe,
337 .remove = gr3d_remove,
338};
diff --git a/drivers/gpu/drm/tegra/gr3d.h b/drivers/gpu/drm/tegra/gr3d.h
new file mode 100644
index 000000000000..0c30a1351c83
--- /dev/null
+++ b/drivers/gpu/drm/tegra/gr3d.h
@@ -0,0 +1,27 @@
1/*
2 * Copyright (C) 2013 NVIDIA Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#ifndef TEGRA_GR3D_H
10#define TEGRA_GR3D_H
11
12#define GR3D_IDX_ATTRIBUTE(x) (0x100 + (x) * 2)
13#define GR3D_IDX_INDEX_BASE 0x121
14#define GR3D_QR_ZTAG_ADDR 0x415
15#define GR3D_QR_CTAG_ADDR 0x417
16#define GR3D_QR_CZ_ADDR 0x419
17#define GR3D_TEX_TEX_ADDR(x) (0x710 + (x))
18#define GR3D_DW_MEMORY_OUTPUT_ADDRESS 0x904
19#define GR3D_GLOBAL_SURFADDR(x) (0xe00 + (x))
20#define GR3D_GLOBAL_SPILLSURFADDR 0xe2a
21#define GR3D_GLOBAL_SURFOVERADDR(x) (0xe30 + (x))
22#define GR3D_GLOBAL_SAMP01SURFADDR(x) (0xe50 + (x))
23#define GR3D_GLOBAL_SAMP23SURFADDR(x) (0xe60 + (x))
24
25#define GR3D_NUM_REGS 0xe88
26
27#endif
diff --git a/drivers/gpu/host1x/drm/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
index 644d95c7d489..0cd9bc2056e8 100644
--- a/drivers/gpu/host1x/drm/hdmi.c
+++ b/drivers/gpu/drm/tegra/hdmi.c
@@ -8,21 +8,33 @@
8 */ 8 */
9 9
10#include <linux/clk.h> 10#include <linux/clk.h>
11#include <linux/clk/tegra.h>
11#include <linux/debugfs.h> 12#include <linux/debugfs.h>
12#include <linux/gpio.h>
13#include <linux/hdmi.h> 13#include <linux/hdmi.h>
14#include <linux/module.h>
15#include <linux/of.h>
16#include <linux/platform_device.h>
17#include <linux/regulator/consumer.h> 14#include <linux/regulator/consumer.h>
18#include <linux/clk/tegra.h>
19
20#include <drm/drm_edid.h>
21 15
22#include "hdmi.h" 16#include "hdmi.h"
23#include "drm.h" 17#include "drm.h"
24#include "dc.h" 18#include "dc.h"
25#include "host1x_client.h" 19
20struct tmds_config {
21 unsigned int pclk;
22 u32 pll0;
23 u32 pll1;
24 u32 pe_current;
25 u32 drive_current;
26 u32 peak_current;
27};
28
29struct tegra_hdmi_config {
30 const struct tmds_config *tmds;
31 unsigned int num_tmds;
32
33 unsigned long fuse_override_offset;
34 unsigned long fuse_override_value;
35
36 bool has_sor_io_peak_current;
37};
26 38
27struct tegra_hdmi { 39struct tegra_hdmi {
28 struct host1x_client client; 40 struct host1x_client client;
@@ -38,6 +50,8 @@ struct tegra_hdmi {
38 struct clk *clk_parent; 50 struct clk *clk_parent;
39 struct clk *clk; 51 struct clk *clk;
40 52
53 const struct tegra_hdmi_config *config;
54
41 unsigned int audio_source; 55 unsigned int audio_source;
42 unsigned int audio_freq; 56 unsigned int audio_freq;
43 bool stereo; 57 bool stereo;
@@ -143,15 +157,7 @@ static const struct tegra_hdmi_audio_config tegra_hdmi_audio_192k[] = {
143 { 0, 0, 0, 0 }, 157 { 0, 0, 0, 0 },
144}; 158};
145 159
146struct tmds_config { 160static const struct tmds_config tegra20_tmds_config[] = {
147 unsigned int pclk;
148 u32 pll0;
149 u32 pll1;
150 u32 pe_current;
151 u32 drive_current;
152};
153
154static const struct tmds_config tegra2_tmds_config[] = {
155 { /* slow pixel clock modes */ 161 { /* slow pixel clock modes */
156 .pclk = 27000000, 162 .pclk = 27000000,
157 .pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) | 163 .pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) |
@@ -184,7 +190,7 @@ static const struct tmds_config tegra2_tmds_config[] = {
184 }, 190 },
185}; 191};
186 192
187static const struct tmds_config tegra3_tmds_config[] = { 193static const struct tmds_config tegra30_tmds_config[] = {
188 { /* 480p modes */ 194 { /* 480p modes */
189 .pclk = 27000000, 195 .pclk = 27000000,
190 .pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) | 196 .pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) |
@@ -230,6 +236,85 @@ static const struct tmds_config tegra3_tmds_config[] = {
230 }, 236 },
231}; 237};
232 238
239static const struct tmds_config tegra114_tmds_config[] = {
240 { /* 480p/576p / 25.2MHz/27MHz modes */
241 .pclk = 27000000,
242 .pll0 = SOR_PLL_ICHPMP(1) | SOR_PLL_BG_V17_S(3) |
243 SOR_PLL_VCOCAP(0) | SOR_PLL_RESISTORSEL,
244 .pll1 = SOR_PLL_LOADADJ(3) | SOR_PLL_TMDS_TERMADJ(0),
245 .pe_current = PE_CURRENT0(PE_CURRENT_0_mA_T114) |
246 PE_CURRENT1(PE_CURRENT_0_mA_T114) |
247 PE_CURRENT2(PE_CURRENT_0_mA_T114) |
248 PE_CURRENT3(PE_CURRENT_0_mA_T114),
249 .drive_current =
250 DRIVE_CURRENT_LANE0_T114(DRIVE_CURRENT_10_400_mA_T114) |
251 DRIVE_CURRENT_LANE1_T114(DRIVE_CURRENT_10_400_mA_T114) |
252 DRIVE_CURRENT_LANE2_T114(DRIVE_CURRENT_10_400_mA_T114) |
253 DRIVE_CURRENT_LANE3_T114(DRIVE_CURRENT_10_400_mA_T114),
254 .peak_current = PEAK_CURRENT_LANE0(PEAK_CURRENT_0_000_mA) |
255 PEAK_CURRENT_LANE1(PEAK_CURRENT_0_000_mA) |
256 PEAK_CURRENT_LANE2(PEAK_CURRENT_0_000_mA) |
257 PEAK_CURRENT_LANE3(PEAK_CURRENT_0_000_mA),
258 }, { /* 720p / 74.25MHz modes */
259 .pclk = 74250000,
260 .pll0 = SOR_PLL_ICHPMP(1) | SOR_PLL_BG_V17_S(3) |
261 SOR_PLL_VCOCAP(1) | SOR_PLL_RESISTORSEL,
262 .pll1 = SOR_PLL_PE_EN | SOR_PLL_LOADADJ(3) |
263 SOR_PLL_TMDS_TERMADJ(0),
264 .pe_current = PE_CURRENT0(PE_CURRENT_15_mA_T114) |
265 PE_CURRENT1(PE_CURRENT_15_mA_T114) |
266 PE_CURRENT2(PE_CURRENT_15_mA_T114) |
267 PE_CURRENT3(PE_CURRENT_15_mA_T114),
268 .drive_current =
269 DRIVE_CURRENT_LANE0_T114(DRIVE_CURRENT_10_400_mA_T114) |
270 DRIVE_CURRENT_LANE1_T114(DRIVE_CURRENT_10_400_mA_T114) |
271 DRIVE_CURRENT_LANE2_T114(DRIVE_CURRENT_10_400_mA_T114) |
272 DRIVE_CURRENT_LANE3_T114(DRIVE_CURRENT_10_400_mA_T114),
273 .peak_current = PEAK_CURRENT_LANE0(PEAK_CURRENT_0_000_mA) |
274 PEAK_CURRENT_LANE1(PEAK_CURRENT_0_000_mA) |
275 PEAK_CURRENT_LANE2(PEAK_CURRENT_0_000_mA) |
276 PEAK_CURRENT_LANE3(PEAK_CURRENT_0_000_mA),
277 }, { /* 1080p / 148.5MHz modes */
278 .pclk = 148500000,
279 .pll0 = SOR_PLL_ICHPMP(1) | SOR_PLL_BG_V17_S(3) |
280 SOR_PLL_VCOCAP(3) | SOR_PLL_RESISTORSEL,
281 .pll1 = SOR_PLL_PE_EN | SOR_PLL_LOADADJ(3) |
282 SOR_PLL_TMDS_TERMADJ(0),
283 .pe_current = PE_CURRENT0(PE_CURRENT_10_mA_T114) |
284 PE_CURRENT1(PE_CURRENT_10_mA_T114) |
285 PE_CURRENT2(PE_CURRENT_10_mA_T114) |
286 PE_CURRENT3(PE_CURRENT_10_mA_T114),
287 .drive_current =
288 DRIVE_CURRENT_LANE0_T114(DRIVE_CURRENT_12_400_mA_T114) |
289 DRIVE_CURRENT_LANE1_T114(DRIVE_CURRENT_12_400_mA_T114) |
290 DRIVE_CURRENT_LANE2_T114(DRIVE_CURRENT_12_400_mA_T114) |
291 DRIVE_CURRENT_LANE3_T114(DRIVE_CURRENT_12_400_mA_T114),
292 .peak_current = PEAK_CURRENT_LANE0(PEAK_CURRENT_0_000_mA) |
293 PEAK_CURRENT_LANE1(PEAK_CURRENT_0_000_mA) |
294 PEAK_CURRENT_LANE2(PEAK_CURRENT_0_000_mA) |
295 PEAK_CURRENT_LANE3(PEAK_CURRENT_0_000_mA),
296 }, { /* 225/297MHz modes */
297 .pclk = UINT_MAX,
298 .pll0 = SOR_PLL_ICHPMP(1) | SOR_PLL_BG_V17_S(3) |
299 SOR_PLL_VCOCAP(0xf) | SOR_PLL_RESISTORSEL,
300 .pll1 = SOR_PLL_LOADADJ(3) | SOR_PLL_TMDS_TERMADJ(7)
301 | SOR_PLL_TMDS_TERM_ENABLE,
302 .pe_current = PE_CURRENT0(PE_CURRENT_0_mA_T114) |
303 PE_CURRENT1(PE_CURRENT_0_mA_T114) |
304 PE_CURRENT2(PE_CURRENT_0_mA_T114) |
305 PE_CURRENT3(PE_CURRENT_0_mA_T114),
306 .drive_current =
307 DRIVE_CURRENT_LANE0_T114(DRIVE_CURRENT_25_200_mA_T114) |
308 DRIVE_CURRENT_LANE1_T114(DRIVE_CURRENT_25_200_mA_T114) |
309 DRIVE_CURRENT_LANE2_T114(DRIVE_CURRENT_25_200_mA_T114) |
310 DRIVE_CURRENT_LANE3_T114(DRIVE_CURRENT_19_200_mA_T114),
311 .peak_current = PEAK_CURRENT_LANE0(PEAK_CURRENT_3_000_mA) |
312 PEAK_CURRENT_LANE1(PEAK_CURRENT_3_000_mA) |
313 PEAK_CURRENT_LANE2(PEAK_CURRENT_3_000_mA) |
314 PEAK_CURRENT_LANE3(PEAK_CURRENT_0_800_mA),
315 },
316};
317
233static const struct tegra_hdmi_audio_config * 318static const struct tegra_hdmi_audio_config *
234tegra_hdmi_get_audio_config(unsigned int audio_freq, unsigned int pclk) 319tegra_hdmi_get_audio_config(unsigned int audio_freq, unsigned int pclk)
235{ 320{
@@ -511,7 +596,7 @@ static void tegra_hdmi_setup_audio_infoframe(struct tegra_hdmi *hdmi)
511 596
512 err = hdmi_audio_infoframe_init(&frame); 597 err = hdmi_audio_infoframe_init(&frame);
513 if (err < 0) { 598 if (err < 0) {
514 dev_err(hdmi->dev, "failed to initialize audio infoframe: %d\n", 599 dev_err(hdmi->dev, "failed to setup audio infoframe: %zd\n",
515 err); 600 err);
516 return; 601 return;
517 } 602 }
@@ -531,7 +616,7 @@ static void tegra_hdmi_setup_audio_infoframe(struct tegra_hdmi *hdmi)
531 * contain 7 bytes. Including the 3 byte header only the first 10 616 * contain 7 bytes. Including the 3 byte header only the first 10
532 * bytes can be programmed. 617 * bytes can be programmed.
533 */ 618 */
534 tegra_hdmi_write_infopack(hdmi, buffer, min(10, err)); 619 tegra_hdmi_write_infopack(hdmi, buffer, min_t(size_t, 10, err));
535 620
536 tegra_hdmi_writel(hdmi, INFOFRAME_CTRL_ENABLE, 621 tegra_hdmi_writel(hdmi, INFOFRAME_CTRL_ENABLE,
537 HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL); 622 HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL);
@@ -577,8 +662,28 @@ static void tegra_hdmi_setup_tmds(struct tegra_hdmi *hdmi,
577 tegra_hdmi_writel(hdmi, tmds->pll1, HDMI_NV_PDISP_SOR_PLL1); 662 tegra_hdmi_writel(hdmi, tmds->pll1, HDMI_NV_PDISP_SOR_PLL1);
578 tegra_hdmi_writel(hdmi, tmds->pe_current, HDMI_NV_PDISP_PE_CURRENT); 663 tegra_hdmi_writel(hdmi, tmds->pe_current, HDMI_NV_PDISP_PE_CURRENT);
579 664
580 value = tmds->drive_current | DRIVE_CURRENT_FUSE_OVERRIDE; 665 tegra_hdmi_writel(hdmi, tmds->drive_current,
581 tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_LANE_DRIVE_CURRENT); 666 HDMI_NV_PDISP_SOR_LANE_DRIVE_CURRENT);
667
668 value = tegra_hdmi_readl(hdmi, hdmi->config->fuse_override_offset);
669 value |= hdmi->config->fuse_override_value;
670 tegra_hdmi_writel(hdmi, value, hdmi->config->fuse_override_offset);
671
672 if (hdmi->config->has_sor_io_peak_current)
673 tegra_hdmi_writel(hdmi, tmds->peak_current,
674 HDMI_NV_PDISP_SOR_IO_PEAK_CURRENT);
675}
676
677static bool tegra_output_is_hdmi(struct tegra_output *output)
678{
679 struct edid *edid;
680
681 if (!output->connector.edid_blob_ptr)
682 return false;
683
684 edid = (struct edid *)output->connector.edid_blob_ptr->data;
685
686 return drm_detect_hdmi_monitor(edid);
582} 687}
583 688
584static int tegra_output_hdmi_enable(struct tegra_output *output) 689static int tegra_output_hdmi_enable(struct tegra_output *output)
@@ -589,23 +694,17 @@ static int tegra_output_hdmi_enable(struct tegra_output *output)
589 struct tegra_hdmi *hdmi = to_hdmi(output); 694 struct tegra_hdmi *hdmi = to_hdmi(output);
590 struct device_node *node = hdmi->dev->of_node; 695 struct device_node *node = hdmi->dev->of_node;
591 unsigned int pulse_start, div82, pclk; 696 unsigned int pulse_start, div82, pclk;
592 const struct tmds_config *tmds;
593 unsigned int num_tmds;
594 unsigned long value; 697 unsigned long value;
595 int retries = 1000; 698 int retries = 1000;
596 int err; 699 int err;
597 700
701 hdmi->dvi = !tegra_output_is_hdmi(output);
702
598 pclk = mode->clock * 1000; 703 pclk = mode->clock * 1000;
599 h_sync_width = mode->hsync_end - mode->hsync_start; 704 h_sync_width = mode->hsync_end - mode->hsync_start;
600 h_back_porch = mode->htotal - mode->hsync_end; 705 h_back_porch = mode->htotal - mode->hsync_end;
601 h_front_porch = mode->hsync_start - mode->hdisplay; 706 h_front_porch = mode->hsync_start - mode->hdisplay;
602 707
603 err = regulator_enable(hdmi->vdd);
604 if (err < 0) {
605 dev_err(hdmi->dev, "failed to enable VDD regulator: %d\n", err);
606 return err;
607 }
608
609 err = regulator_enable(hdmi->pll); 708 err = regulator_enable(hdmi->pll);
610 if (err < 0) { 709 if (err < 0) {
611 dev_err(hdmi->dev, "failed to enable PLL regulator: %d\n", err); 710 dev_err(hdmi->dev, "failed to enable PLL regulator: %d\n", err);
@@ -710,17 +809,9 @@ static int tegra_output_hdmi_enable(struct tegra_output *output)
710 tegra_hdmi_setup_stereo_infoframe(hdmi); 809 tegra_hdmi_setup_stereo_infoframe(hdmi);
711 810
712 /* TMDS CONFIG */ 811 /* TMDS CONFIG */
713 if (of_device_is_compatible(node, "nvidia,tegra30-hdmi")) { 812 for (i = 0; i < hdmi->config->num_tmds; i++) {
714 num_tmds = ARRAY_SIZE(tegra3_tmds_config); 813 if (pclk <= hdmi->config->tmds[i].pclk) {
715 tmds = tegra3_tmds_config; 814 tegra_hdmi_setup_tmds(hdmi, &hdmi->config->tmds[i]);
716 } else {
717 num_tmds = ARRAY_SIZE(tegra2_tmds_config);
718 tmds = tegra2_tmds_config;
719 }
720
721 for (i = 0; i < num_tmds; i++) {
722 if (pclk <= tmds[i].pclk) {
723 tegra_hdmi_setup_tmds(hdmi, &tmds[i]);
724 break; 815 break;
725 } 816 }
726 } 817 }
@@ -824,7 +915,6 @@ static int tegra_output_hdmi_disable(struct tegra_output *output)
824 tegra_periph_reset_assert(hdmi->clk); 915 tegra_periph_reset_assert(hdmi->clk);
825 clk_disable(hdmi->clk); 916 clk_disable(hdmi->clk);
826 regulator_disable(hdmi->pll); 917 regulator_disable(hdmi->pll);
827 regulator_disable(hdmi->vdd);
828 918
829 return 0; 919 return 0;
830} 920}
@@ -1055,6 +1145,7 @@ static int tegra_hdmi_show_regs(struct seq_file *s, void *data)
1055 DUMP_REG(HDMI_NV_PDISP_SOR_AUDIO_CNTRL0); 1145 DUMP_REG(HDMI_NV_PDISP_SOR_AUDIO_CNTRL0);
1056 DUMP_REG(HDMI_NV_PDISP_SOR_AUDIO_HDA_ELD_BUFWR); 1146 DUMP_REG(HDMI_NV_PDISP_SOR_AUDIO_HDA_ELD_BUFWR);
1057 DUMP_REG(HDMI_NV_PDISP_SOR_AUDIO_HDA_PRESENSE); 1147 DUMP_REG(HDMI_NV_PDISP_SOR_AUDIO_HDA_PRESENSE);
1148 DUMP_REG(HDMI_NV_PDISP_SOR_IO_PEAK_CURRENT);
1058 1149
1059#undef DUMP_REG 1150#undef DUMP_REG
1060 1151
@@ -1122,24 +1213,31 @@ static int tegra_hdmi_debugfs_exit(struct tegra_hdmi *hdmi)
1122 return 0; 1213 return 0;
1123} 1214}
1124 1215
1125static int tegra_hdmi_drm_init(struct host1x_client *client, 1216static int tegra_hdmi_init(struct host1x_client *client)
1126 struct drm_device *drm)
1127{ 1217{
1218 struct tegra_drm *tegra = dev_get_drvdata(client->parent);
1128 struct tegra_hdmi *hdmi = host1x_client_to_hdmi(client); 1219 struct tegra_hdmi *hdmi = host1x_client_to_hdmi(client);
1129 int err; 1220 int err;
1130 1221
1222 err = regulator_enable(hdmi->vdd);
1223 if (err < 0) {
1224 dev_err(client->dev, "failed to enable VDD regulator: %d\n",
1225 err);
1226 return err;
1227 }
1228
1131 hdmi->output.type = TEGRA_OUTPUT_HDMI; 1229 hdmi->output.type = TEGRA_OUTPUT_HDMI;
1132 hdmi->output.dev = client->dev; 1230 hdmi->output.dev = client->dev;
1133 hdmi->output.ops = &hdmi_ops; 1231 hdmi->output.ops = &hdmi_ops;
1134 1232
1135 err = tegra_output_init(drm, &hdmi->output); 1233 err = tegra_output_init(tegra->drm, &hdmi->output);
1136 if (err < 0) { 1234 if (err < 0) {
1137 dev_err(client->dev, "output setup failed: %d\n", err); 1235 dev_err(client->dev, "output setup failed: %d\n", err);
1138 return err; 1236 return err;
1139 } 1237 }
1140 1238
1141 if (IS_ENABLED(CONFIG_DEBUG_FS)) { 1239 if (IS_ENABLED(CONFIG_DEBUG_FS)) {
1142 err = tegra_hdmi_debugfs_init(hdmi, drm->primary); 1240 err = tegra_hdmi_debugfs_init(hdmi, tegra->drm->primary);
1143 if (err < 0) 1241 if (err < 0)
1144 dev_err(client->dev, "debugfs setup failed: %d\n", err); 1242 dev_err(client->dev, "debugfs setup failed: %d\n", err);
1145 } 1243 }
@@ -1147,7 +1245,7 @@ static int tegra_hdmi_drm_init(struct host1x_client *client,
1147 return 0; 1245 return 0;
1148} 1246}
1149 1247
1150static int tegra_hdmi_drm_exit(struct host1x_client *client) 1248static int tegra_hdmi_exit(struct host1x_client *client)
1151{ 1249{
1152 struct tegra_hdmi *hdmi = host1x_client_to_hdmi(client); 1250 struct tegra_hdmi *hdmi = host1x_client_to_hdmi(client);
1153 int err; 1251 int err;
@@ -1171,25 +1269,63 @@ static int tegra_hdmi_drm_exit(struct host1x_client *client)
1171 return err; 1269 return err;
1172 } 1270 }
1173 1271
1272 regulator_disable(hdmi->vdd);
1273
1174 return 0; 1274 return 0;
1175} 1275}
1176 1276
1177static const struct host1x_client_ops hdmi_client_ops = { 1277static const struct host1x_client_ops hdmi_client_ops = {
1178 .drm_init = tegra_hdmi_drm_init, 1278 .init = tegra_hdmi_init,
1179 .drm_exit = tegra_hdmi_drm_exit, 1279 .exit = tegra_hdmi_exit,
1280};
1281
1282static const struct tegra_hdmi_config tegra20_hdmi_config = {
1283 .tmds = tegra20_tmds_config,
1284 .num_tmds = ARRAY_SIZE(tegra20_tmds_config),
1285 .fuse_override_offset = HDMI_NV_PDISP_SOR_LANE_DRIVE_CURRENT,
1286 .fuse_override_value = 1 << 31,
1287 .has_sor_io_peak_current = false,
1288};
1289
1290static const struct tegra_hdmi_config tegra30_hdmi_config = {
1291 .tmds = tegra30_tmds_config,
1292 .num_tmds = ARRAY_SIZE(tegra30_tmds_config),
1293 .fuse_override_offset = HDMI_NV_PDISP_SOR_LANE_DRIVE_CURRENT,
1294 .fuse_override_value = 1 << 31,
1295 .has_sor_io_peak_current = false,
1296};
1297
1298static const struct tegra_hdmi_config tegra114_hdmi_config = {
1299 .tmds = tegra114_tmds_config,
1300 .num_tmds = ARRAY_SIZE(tegra114_tmds_config),
1301 .fuse_override_offset = HDMI_NV_PDISP_SOR_PAD_CTLS0,
1302 .fuse_override_value = 1 << 31,
1303 .has_sor_io_peak_current = true,
1304};
1305
1306static const struct of_device_id tegra_hdmi_of_match[] = {
1307 { .compatible = "nvidia,tegra114-hdmi", .data = &tegra114_hdmi_config },
1308 { .compatible = "nvidia,tegra30-hdmi", .data = &tegra30_hdmi_config },
1309 { .compatible = "nvidia,tegra20-hdmi", .data = &tegra20_hdmi_config },
1310 { },
1180}; 1311};
1181 1312
1182static int tegra_hdmi_probe(struct platform_device *pdev) 1313static int tegra_hdmi_probe(struct platform_device *pdev)
1183{ 1314{
1184 struct host1x_drm *host1x = host1x_get_drm_data(pdev->dev.parent); 1315 const struct of_device_id *match;
1185 struct tegra_hdmi *hdmi; 1316 struct tegra_hdmi *hdmi;
1186 struct resource *regs; 1317 struct resource *regs;
1187 int err; 1318 int err;
1188 1319
1320 match = of_match_node(tegra_hdmi_of_match, pdev->dev.of_node);
1321 if (!match)
1322 return -ENODEV;
1323
1189 hdmi = devm_kzalloc(&pdev->dev, sizeof(*hdmi), GFP_KERNEL); 1324 hdmi = devm_kzalloc(&pdev->dev, sizeof(*hdmi), GFP_KERNEL);
1190 if (!hdmi) 1325 if (!hdmi)
1191 return -ENOMEM; 1326 return -ENOMEM;
1192 1327
1328 hdmi->config = match->data;
1193 hdmi->dev = &pdev->dev; 1329 hdmi->dev = &pdev->dev;
1194 hdmi->audio_source = AUTO; 1330 hdmi->audio_source = AUTO;
1195 hdmi->audio_freq = 44100; 1331 hdmi->audio_freq = 44100;
@@ -1234,7 +1370,7 @@ static int tegra_hdmi_probe(struct platform_device *pdev)
1234 1370
1235 hdmi->output.dev = &pdev->dev; 1371 hdmi->output.dev = &pdev->dev;
1236 1372
1237 err = tegra_output_parse_dt(&hdmi->output); 1373 err = tegra_output_probe(&hdmi->output);
1238 if (err < 0) 1374 if (err < 0)
1239 return err; 1375 return err;
1240 1376
@@ -1252,11 +1388,11 @@ static int tegra_hdmi_probe(struct platform_device *pdev)
1252 1388
1253 hdmi->irq = err; 1389 hdmi->irq = err;
1254 1390
1255 hdmi->client.ops = &hdmi_client_ops;
1256 INIT_LIST_HEAD(&hdmi->client.list); 1391 INIT_LIST_HEAD(&hdmi->client.list);
1392 hdmi->client.ops = &hdmi_client_ops;
1257 hdmi->client.dev = &pdev->dev; 1393 hdmi->client.dev = &pdev->dev;
1258 1394
1259 err = host1x_register_client(host1x, &hdmi->client); 1395 err = host1x_client_register(&hdmi->client);
1260 if (err < 0) { 1396 if (err < 0) {
1261 dev_err(&pdev->dev, "failed to register host1x client: %d\n", 1397 dev_err(&pdev->dev, "failed to register host1x client: %d\n",
1262 err); 1398 err);
@@ -1270,29 +1406,28 @@ static int tegra_hdmi_probe(struct platform_device *pdev)
1270 1406
1271static int tegra_hdmi_remove(struct platform_device *pdev) 1407static int tegra_hdmi_remove(struct platform_device *pdev)
1272{ 1408{
1273 struct host1x_drm *host1x = host1x_get_drm_data(pdev->dev.parent);
1274 struct tegra_hdmi *hdmi = platform_get_drvdata(pdev); 1409 struct tegra_hdmi *hdmi = platform_get_drvdata(pdev);
1275 int err; 1410 int err;
1276 1411
1277 err = host1x_unregister_client(host1x, &hdmi->client); 1412 err = host1x_client_unregister(&hdmi->client);
1278 if (err < 0) { 1413 if (err < 0) {
1279 dev_err(&pdev->dev, "failed to unregister host1x client: %d\n", 1414 dev_err(&pdev->dev, "failed to unregister host1x client: %d\n",
1280 err); 1415 err);
1281 return err; 1416 return err;
1282 } 1417 }
1283 1418
1419 err = tegra_output_remove(&hdmi->output);
1420 if (err < 0) {
1421 dev_err(&pdev->dev, "failed to remove output: %d\n", err);
1422 return err;
1423 }
1424
1284 clk_unprepare(hdmi->clk_parent); 1425 clk_unprepare(hdmi->clk_parent);
1285 clk_unprepare(hdmi->clk); 1426 clk_unprepare(hdmi->clk);
1286 1427
1287 return 0; 1428 return 0;
1288} 1429}
1289 1430
1290static struct of_device_id tegra_hdmi_of_match[] = {
1291 { .compatible = "nvidia,tegra30-hdmi", },
1292 { .compatible = "nvidia,tegra20-hdmi", },
1293 { },
1294};
1295
1296struct platform_driver tegra_hdmi_driver = { 1431struct platform_driver tegra_hdmi_driver = {
1297 .driver = { 1432 .driver = {
1298 .name = "tegra-hdmi", 1433 .name = "tegra-hdmi",
diff --git a/drivers/gpu/host1x/drm/hdmi.h b/drivers/gpu/drm/tegra/hdmi.h
index 52ac36e08ccb..0aebc485f7fa 100644
--- a/drivers/gpu/host1x/drm/hdmi.h
+++ b/drivers/gpu/drm/tegra/hdmi.h
@@ -233,7 +233,10 @@
233#define DRIVE_CURRENT_LANE1(x) (((x) & 0x3f) << 8) 233#define DRIVE_CURRENT_LANE1(x) (((x) & 0x3f) << 8)
234#define DRIVE_CURRENT_LANE2(x) (((x) & 0x3f) << 16) 234#define DRIVE_CURRENT_LANE2(x) (((x) & 0x3f) << 16)
235#define DRIVE_CURRENT_LANE3(x) (((x) & 0x3f) << 24) 235#define DRIVE_CURRENT_LANE3(x) (((x) & 0x3f) << 24)
236#define DRIVE_CURRENT_FUSE_OVERRIDE (1 << 31) 236#define DRIVE_CURRENT_LANE0_T114(x) (((x) & 0x7f) << 0)
237#define DRIVE_CURRENT_LANE1_T114(x) (((x) & 0x7f) << 8)
238#define DRIVE_CURRENT_LANE2_T114(x) (((x) & 0x7f) << 16)
239#define DRIVE_CURRENT_LANE3_T114(x) (((x) & 0x7f) << 24)
237 240
238#define DRIVE_CURRENT_1_500_mA 0x00 241#define DRIVE_CURRENT_1_500_mA 0x00
239#define DRIVE_CURRENT_1_875_mA 0x01 242#define DRIVE_CURRENT_1_875_mA 0x01
@@ -299,6 +302,79 @@
299#define DRIVE_CURRENT_24_375_mA 0x3d 302#define DRIVE_CURRENT_24_375_mA 0x3d
300#define DRIVE_CURRENT_24_750_mA 0x3e 303#define DRIVE_CURRENT_24_750_mA 0x3e
301 304
305#define DRIVE_CURRENT_0_000_mA_T114 0x00
306#define DRIVE_CURRENT_0_400_mA_T114 0x01
307#define DRIVE_CURRENT_0_800_mA_T114 0x02
308#define DRIVE_CURRENT_1_200_mA_T114 0x03
309#define DRIVE_CURRENT_1_600_mA_T114 0x04
310#define DRIVE_CURRENT_2_000_mA_T114 0x05
311#define DRIVE_CURRENT_2_400_mA_T114 0x06
312#define DRIVE_CURRENT_2_800_mA_T114 0x07
313#define DRIVE_CURRENT_3_200_mA_T114 0x08
314#define DRIVE_CURRENT_3_600_mA_T114 0x09
315#define DRIVE_CURRENT_4_000_mA_T114 0x0a
316#define DRIVE_CURRENT_4_400_mA_T114 0x0b
317#define DRIVE_CURRENT_4_800_mA_T114 0x0c
318#define DRIVE_CURRENT_5_200_mA_T114 0x0d
319#define DRIVE_CURRENT_5_600_mA_T114 0x0e
320#define DRIVE_CURRENT_6_000_mA_T114 0x0f
321#define DRIVE_CURRENT_6_400_mA_T114 0x10
322#define DRIVE_CURRENT_6_800_mA_T114 0x11
323#define DRIVE_CURRENT_7_200_mA_T114 0x12
324#define DRIVE_CURRENT_7_600_mA_T114 0x13
325#define DRIVE_CURRENT_8_000_mA_T114 0x14
326#define DRIVE_CURRENT_8_400_mA_T114 0x15
327#define DRIVE_CURRENT_8_800_mA_T114 0x16
328#define DRIVE_CURRENT_9_200_mA_T114 0x17
329#define DRIVE_CURRENT_9_600_mA_T114 0x18
330#define DRIVE_CURRENT_10_000_mA_T114 0x19
331#define DRIVE_CURRENT_10_400_mA_T114 0x1a
332#define DRIVE_CURRENT_10_800_mA_T114 0x1b
333#define DRIVE_CURRENT_11_200_mA_T114 0x1c
334#define DRIVE_CURRENT_11_600_mA_T114 0x1d
335#define DRIVE_CURRENT_12_000_mA_T114 0x1e
336#define DRIVE_CURRENT_12_400_mA_T114 0x1f
337#define DRIVE_CURRENT_12_800_mA_T114 0x20
338#define DRIVE_CURRENT_13_200_mA_T114 0x21
339#define DRIVE_CURRENT_13_600_mA_T114 0x22
340#define DRIVE_CURRENT_14_000_mA_T114 0x23
341#define DRIVE_CURRENT_14_400_mA_T114 0x24
342#define DRIVE_CURRENT_14_800_mA_T114 0x25
343#define DRIVE_CURRENT_15_200_mA_T114 0x26
344#define DRIVE_CURRENT_15_600_mA_T114 0x27
345#define DRIVE_CURRENT_16_000_mA_T114 0x28
346#define DRIVE_CURRENT_16_400_mA_T114 0x29
347#define DRIVE_CURRENT_16_800_mA_T114 0x2a
348#define DRIVE_CURRENT_17_200_mA_T114 0x2b
349#define DRIVE_CURRENT_17_600_mA_T114 0x2c
350#define DRIVE_CURRENT_18_000_mA_T114 0x2d
351#define DRIVE_CURRENT_18_400_mA_T114 0x2e
352#define DRIVE_CURRENT_18_800_mA_T114 0x2f
353#define DRIVE_CURRENT_19_200_mA_T114 0x30
354#define DRIVE_CURRENT_19_600_mA_T114 0x31
355#define DRIVE_CURRENT_20_000_mA_T114 0x32
356#define DRIVE_CURRENT_20_400_mA_T114 0x33
357#define DRIVE_CURRENT_20_800_mA_T114 0x34
358#define DRIVE_CURRENT_21_200_mA_T114 0x35
359#define DRIVE_CURRENT_21_600_mA_T114 0x36
360#define DRIVE_CURRENT_22_000_mA_T114 0x37
361#define DRIVE_CURRENT_22_400_mA_T114 0x38
362#define DRIVE_CURRENT_22_800_mA_T114 0x39
363#define DRIVE_CURRENT_23_200_mA_T114 0x3a
364#define DRIVE_CURRENT_23_600_mA_T114 0x3b
365#define DRIVE_CURRENT_24_000_mA_T114 0x3c
366#define DRIVE_CURRENT_24_400_mA_T114 0x3d
367#define DRIVE_CURRENT_24_800_mA_T114 0x3e
368#define DRIVE_CURRENT_25_200_mA_T114 0x3f
369#define DRIVE_CURRENT_25_400_mA_T114 0x40
370#define DRIVE_CURRENT_25_800_mA_T114 0x41
371#define DRIVE_CURRENT_26_200_mA_T114 0x42
372#define DRIVE_CURRENT_26_600_mA_T114 0x43
373#define DRIVE_CURRENT_27_000_mA_T114 0x44
374#define DRIVE_CURRENT_27_400_mA_T114 0x45
375#define DRIVE_CURRENT_27_800_mA_T114 0x46
376#define DRIVE_CURRENT_28_200_mA_T114 0x47
377
302#define HDMI_NV_PDISP_AUDIO_DEBUG0 0x7f 378#define HDMI_NV_PDISP_AUDIO_DEBUG0 0x7f
303#define HDMI_NV_PDISP_AUDIO_DEBUG1 0x80 379#define HDMI_NV_PDISP_AUDIO_DEBUG1 0x80
304#define HDMI_NV_PDISP_AUDIO_DEBUG2 0x81 380#define HDMI_NV_PDISP_AUDIO_DEBUG2 0x81
@@ -358,6 +434,23 @@
358#define PE_CURRENT_7_0_mA 0xe 434#define PE_CURRENT_7_0_mA 0xe
359#define PE_CURRENT_7_5_mA 0xf 435#define PE_CURRENT_7_5_mA 0xf
360 436
437#define PE_CURRENT_0_mA_T114 0x0
438#define PE_CURRENT_1_mA_T114 0x1
439#define PE_CURRENT_2_mA_T114 0x2
440#define PE_CURRENT_3_mA_T114 0x3
441#define PE_CURRENT_4_mA_T114 0x4
442#define PE_CURRENT_5_mA_T114 0x5
443#define PE_CURRENT_6_mA_T114 0x6
444#define PE_CURRENT_7_mA_T114 0x7
445#define PE_CURRENT_8_mA_T114 0x8
446#define PE_CURRENT_9_mA_T114 0x9
447#define PE_CURRENT_10_mA_T114 0xa
448#define PE_CURRENT_11_mA_T114 0xb
449#define PE_CURRENT_12_mA_T114 0xc
450#define PE_CURRENT_13_mA_T114 0xd
451#define PE_CURRENT_14_mA_T114 0xe
452#define PE_CURRENT_15_mA_T114 0xf
453
361#define HDMI_NV_PDISP_KEY_CTRL 0x9a 454#define HDMI_NV_PDISP_KEY_CTRL 0x9a
362#define HDMI_NV_PDISP_KEY_DEBUG0 0x9b 455#define HDMI_NV_PDISP_KEY_DEBUG0 0x9b
363#define HDMI_NV_PDISP_KEY_DEBUG1 0x9c 456#define HDMI_NV_PDISP_KEY_DEBUG1 0x9c
@@ -383,4 +476,61 @@
383#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_1920 0xc5 476#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_1920 0xc5
384#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_DEFAULT 0xc5 477#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_DEFAULT 0xc5
385 478
479#define HDMI_NV_PDISP_SOR_IO_PEAK_CURRENT 0xd1
480#define PEAK_CURRENT_LANE0(x) (((x) & 0x7f) << 0)
481#define PEAK_CURRENT_LANE1(x) (((x) & 0x7f) << 8)
482#define PEAK_CURRENT_LANE2(x) (((x) & 0x7f) << 16)
483#define PEAK_CURRENT_LANE3(x) (((x) & 0x7f) << 24)
484
485#define PEAK_CURRENT_0_000_mA 0x00
486#define PEAK_CURRENT_0_200_mA 0x01
487#define PEAK_CURRENT_0_400_mA 0x02
488#define PEAK_CURRENT_0_600_mA 0x03
489#define PEAK_CURRENT_0_800_mA 0x04
490#define PEAK_CURRENT_1_000_mA 0x05
491#define PEAK_CURRENT_1_200_mA 0x06
492#define PEAK_CURRENT_1_400_mA 0x07
493#define PEAK_CURRENT_1_600_mA 0x08
494#define PEAK_CURRENT_1_800_mA 0x09
495#define PEAK_CURRENT_2_000_mA 0x0a
496#define PEAK_CURRENT_2_200_mA 0x0b
497#define PEAK_CURRENT_2_400_mA 0x0c
498#define PEAK_CURRENT_2_600_mA 0x0d
499#define PEAK_CURRENT_2_800_mA 0x0e
500#define PEAK_CURRENT_3_000_mA 0x0f
501#define PEAK_CURRENT_3_200_mA 0x10
502#define PEAK_CURRENT_3_400_mA 0x11
503#define PEAK_CURRENT_3_600_mA 0x12
504#define PEAK_CURRENT_3_800_mA 0x13
505#define PEAK_CURRENT_4_000_mA 0x14
506#define PEAK_CURRENT_4_200_mA 0x15
507#define PEAK_CURRENT_4_400_mA 0x16
508#define PEAK_CURRENT_4_600_mA 0x17
509#define PEAK_CURRENT_4_800_mA 0x18
510#define PEAK_CURRENT_5_000_mA 0x19
511#define PEAK_CURRENT_5_200_mA 0x1a
512#define PEAK_CURRENT_5_400_mA 0x1b
513#define PEAK_CURRENT_5_600_mA 0x1c
514#define PEAK_CURRENT_5_800_mA 0x1d
515#define PEAK_CURRENT_6_000_mA 0x1e
516#define PEAK_CURRENT_6_200_mA 0x1f
517#define PEAK_CURRENT_6_400_mA 0x20
518#define PEAK_CURRENT_6_600_mA 0x21
519#define PEAK_CURRENT_6_800_mA 0x22
520#define PEAK_CURRENT_7_000_mA 0x23
521#define PEAK_CURRENT_7_200_mA 0x24
522#define PEAK_CURRENT_7_400_mA 0x25
523#define PEAK_CURRENT_7_600_mA 0x26
524#define PEAK_CURRENT_7_800_mA 0x27
525#define PEAK_CURRENT_8_000_mA 0x28
526#define PEAK_CURRENT_8_200_mA 0x29
527#define PEAK_CURRENT_8_400_mA 0x2a
528#define PEAK_CURRENT_8_600_mA 0x2b
529#define PEAK_CURRENT_8_800_mA 0x2c
530#define PEAK_CURRENT_9_000_mA 0x2d
531#define PEAK_CURRENT_9_200_mA 0x2e
532#define PEAK_CURRENT_9_400_mA 0x2f
533
534#define HDMI_NV_PDISP_SOR_PAD_CTLS0 0xd2
535
386#endif /* TEGRA_HDMI_H */ 536#endif /* TEGRA_HDMI_H */
diff --git a/drivers/gpu/host1x/drm/output.c b/drivers/gpu/drm/tegra/output.c
index 137ae81ab80e..2cb0065e0578 100644
--- a/drivers/gpu/host1x/drm/output.c
+++ b/drivers/gpu/drm/tegra/output.c
@@ -7,9 +7,7 @@
7 * published by the Free Software Foundation. 7 * published by the Free Software Foundation.
8 */ 8 */
9 9
10#include <linux/module.h>
11#include <linux/of_gpio.h> 10#include <linux/of_gpio.h>
12#include <linux/i2c.h>
13 11
14#include "drm.h" 12#include "drm.h"
15 13
@@ -81,10 +79,16 @@ tegra_connector_detect(struct drm_connector *connector, bool force)
81 return status; 79 return status;
82} 80}
83 81
82static void drm_connector_clear(struct drm_connector *connector)
83{
84 memset(connector, 0, sizeof(*connector));
85}
86
84static void tegra_connector_destroy(struct drm_connector *connector) 87static void tegra_connector_destroy(struct drm_connector *connector)
85{ 88{
86 drm_sysfs_connector_remove(connector); 89 drm_sysfs_connector_remove(connector);
87 drm_connector_cleanup(connector); 90 drm_connector_cleanup(connector);
91 drm_connector_clear(connector);
88} 92}
89 93
90static const struct drm_connector_funcs connector_funcs = { 94static const struct drm_connector_funcs connector_funcs = {
@@ -94,9 +98,15 @@ static const struct drm_connector_funcs connector_funcs = {
94 .destroy = tegra_connector_destroy, 98 .destroy = tegra_connector_destroy,
95}; 99};
96 100
101static void drm_encoder_clear(struct drm_encoder *encoder)
102{
103 memset(encoder, 0, sizeof(*encoder));
104}
105
97static void tegra_encoder_destroy(struct drm_encoder *encoder) 106static void tegra_encoder_destroy(struct drm_encoder *encoder)
98{ 107{
99 drm_encoder_cleanup(encoder); 108 drm_encoder_cleanup(encoder);
109 drm_encoder_clear(encoder);
100} 110}
101 111
102static const struct drm_encoder_funcs encoder_funcs = { 112static const struct drm_encoder_funcs encoder_funcs = {
@@ -151,7 +161,7 @@ static irqreturn_t hpd_irq(int irq, void *data)
151 return IRQ_HANDLED; 161 return IRQ_HANDLED;
152} 162}
153 163
154int tegra_output_parse_dt(struct tegra_output *output) 164int tegra_output_probe(struct tegra_output *output)
155{ 165{
156 enum of_gpio_flags flags; 166 enum of_gpio_flags flags;
157 struct device_node *ddc; 167 struct device_node *ddc;
@@ -181,14 +191,6 @@ int tegra_output_parse_dt(struct tegra_output *output)
181 output->hpd_gpio = of_get_named_gpio_flags(output->of_node, 191 output->hpd_gpio = of_get_named_gpio_flags(output->of_node,
182 "nvidia,hpd-gpio", 0, 192 "nvidia,hpd-gpio", 0,
183 &flags); 193 &flags);
184
185 return 0;
186}
187
188int tegra_output_init(struct drm_device *drm, struct tegra_output *output)
189{
190 int connector, encoder, err;
191
192 if (gpio_is_valid(output->hpd_gpio)) { 194 if (gpio_is_valid(output->hpd_gpio)) {
193 unsigned long flags; 195 unsigned long flags;
194 196
@@ -202,7 +204,8 @@ int tegra_output_init(struct drm_device *drm, struct tegra_output *output)
202 err = gpio_to_irq(output->hpd_gpio); 204 err = gpio_to_irq(output->hpd_gpio);
203 if (err < 0) { 205 if (err < 0) {
204 dev_err(output->dev, "gpio_to_irq(): %d\n", err); 206 dev_err(output->dev, "gpio_to_irq(): %d\n", err);
205 goto free_hpd; 207 gpio_free(output->hpd_gpio);
208 return err;
206 } 209 }
207 210
208 output->hpd_irq = err; 211 output->hpd_irq = err;
@@ -215,12 +218,33 @@ int tegra_output_init(struct drm_device *drm, struct tegra_output *output)
215 if (err < 0) { 218 if (err < 0) {
216 dev_err(output->dev, "failed to request IRQ#%u: %d\n", 219 dev_err(output->dev, "failed to request IRQ#%u: %d\n",
217 output->hpd_irq, err); 220 output->hpd_irq, err);
218 goto free_hpd; 221 gpio_free(output->hpd_gpio);
222 return err;
219 } 223 }
220 224
221 output->connector.polled = DRM_CONNECTOR_POLL_HPD; 225 output->connector.polled = DRM_CONNECTOR_POLL_HPD;
222 } 226 }
223 227
228 return 0;
229}
230
231int tegra_output_remove(struct tegra_output *output)
232{
233 if (gpio_is_valid(output->hpd_gpio)) {
234 free_irq(output->hpd_irq, output);
235 gpio_free(output->hpd_gpio);
236 }
237
238 if (output->ddc)
239 put_device(&output->ddc->dev);
240
241 return 0;
242}
243
244int tegra_output_init(struct drm_device *drm, struct tegra_output *output)
245{
246 int connector, encoder;
247
224 switch (output->type) { 248 switch (output->type) {
225 case TEGRA_OUTPUT_RGB: 249 case TEGRA_OUTPUT_RGB:
226 connector = DRM_MODE_CONNECTOR_LVDS; 250 connector = DRM_MODE_CONNECTOR_LVDS;
@@ -241,6 +265,7 @@ int tegra_output_init(struct drm_device *drm, struct tegra_output *output)
241 drm_connector_init(drm, &output->connector, &connector_funcs, 265 drm_connector_init(drm, &output->connector, &connector_funcs,
242 connector); 266 connector);
243 drm_connector_helper_add(&output->connector, &connector_helper_funcs); 267 drm_connector_helper_add(&output->connector, &connector_helper_funcs);
268 output->connector.dpms = DRM_MODE_DPMS_OFF;
244 269
245 drm_encoder_init(drm, &output->encoder, &encoder_funcs, encoder); 270 drm_encoder_init(drm, &output->encoder, &encoder_funcs, encoder);
246 drm_encoder_helper_add(&output->encoder, &encoder_helper_funcs); 271 drm_encoder_helper_add(&output->encoder, &encoder_helper_funcs);
@@ -251,22 +276,9 @@ int tegra_output_init(struct drm_device *drm, struct tegra_output *output)
251 output->encoder.possible_crtcs = 0x3; 276 output->encoder.possible_crtcs = 0x3;
252 277
253 return 0; 278 return 0;
254
255free_hpd:
256 gpio_free(output->hpd_gpio);
257
258 return err;
259} 279}
260 280
261int tegra_output_exit(struct tegra_output *output) 281int tegra_output_exit(struct tegra_output *output)
262{ 282{
263 if (gpio_is_valid(output->hpd_gpio)) {
264 free_irq(output->hpd_irq, output);
265 gpio_free(output->hpd_gpio);
266 }
267
268 if (output->ddc)
269 put_device(&output->ddc->dev);
270
271 return 0; 283 return 0;
272} 284}
diff --git a/drivers/gpu/host1x/drm/rgb.c b/drivers/gpu/drm/tegra/rgb.c
index 5aa66ef7a946..ba47ca4fb880 100644
--- a/drivers/gpu/host1x/drm/rgb.c
+++ b/drivers/gpu/drm/tegra/rgb.c
@@ -8,9 +8,6 @@
8 */ 8 */
9 9
10#include <linux/clk.h> 10#include <linux/clk.h>
11#include <linux/module.h>
12#include <linux/of.h>
13#include <linux/platform_device.h>
14 11
15#include "drm.h" 12#include "drm.h"
16#include "dc.h" 13#include "dc.h"
@@ -150,7 +147,7 @@ int tegra_dc_rgb_probe(struct tegra_dc *dc)
150 rgb->output.dev = dc->dev; 147 rgb->output.dev = dc->dev;
151 rgb->output.of_node = np; 148 rgb->output.of_node = np;
152 149
153 err = tegra_output_parse_dt(&rgb->output); 150 err = tegra_output_probe(&rgb->output);
154 if (err < 0) 151 if (err < 0)
155 return err; 152 return err;
156 153
@@ -177,6 +174,20 @@ int tegra_dc_rgb_probe(struct tegra_dc *dc)
177 return 0; 174 return 0;
178} 175}
179 176
177int tegra_dc_rgb_remove(struct tegra_dc *dc)
178{
179 int err;
180
181 if (!dc->rgb)
182 return 0;
183
184 err = tegra_output_remove(dc->rgb);
185 if (err < 0)
186 return err;
187
188 return 0;
189}
190
180int tegra_dc_rgb_init(struct drm_device *drm, struct tegra_dc *dc) 191int tegra_dc_rgb_init(struct drm_device *drm, struct tegra_dc *dc)
181{ 192{
182 struct tegra_rgb *rgb = to_rgb(dc->rgb); 193 struct tegra_rgb *rgb = to_rgb(dc->rgb);
diff --git a/drivers/gpu/drm/ttm/Makefile b/drivers/gpu/drm/ttm/Makefile
index b2b33dde2afb..b433b9f040c9 100644
--- a/drivers/gpu/drm/ttm/Makefile
+++ b/drivers/gpu/drm/ttm/Makefile
@@ -5,10 +5,6 @@ ccflags-y := -Iinclude/drm
5ttm-y := ttm_agp_backend.o ttm_memory.o ttm_tt.o ttm_bo.o \ 5ttm-y := ttm_agp_backend.o ttm_memory.o ttm_tt.o ttm_bo.o \
6 ttm_bo_util.o ttm_bo_vm.o ttm_module.o \ 6 ttm_bo_util.o ttm_bo_vm.o ttm_module.o \
7 ttm_object.o ttm_lock.o ttm_execbuf_util.o ttm_page_alloc.o \ 7 ttm_object.o ttm_lock.o ttm_execbuf_util.o ttm_page_alloc.o \
8 ttm_bo_manager.o 8 ttm_bo_manager.o ttm_page_alloc_dma.o
9
10ifeq ($(CONFIG_SWIOTLB),y)
11ttm-y += ttm_page_alloc_dma.o
12endif
13 9
14obj-$(CONFIG_DRM_TTM) += ttm.o 10obj-$(CONFIG_DRM_TTM) += ttm.o
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index f1a857ec1021..8d5a646ebe6a 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -429,8 +429,20 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
429 sync_obj = driver->sync_obj_ref(bo->sync_obj); 429 sync_obj = driver->sync_obj_ref(bo->sync_obj);
430 spin_unlock(&bdev->fence_lock); 430 spin_unlock(&bdev->fence_lock);
431 431
432 if (!ret) 432 if (!ret) {
433
434 /*
435 * Make NO_EVICT bos immediately available to
436 * shrinkers, now that they are queued for
437 * destruction.
438 */
439 if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) {
440 bo->mem.placement &= ~TTM_PL_FLAG_NO_EVICT;
441 ttm_bo_add_to_lru(bo);
442 }
443
433 ww_mutex_unlock(&bo->resv->lock); 444 ww_mutex_unlock(&bo->resv->lock);
445 }
434 446
435 kref_get(&bo->list_kref); 447 kref_get(&bo->list_kref);
436 list_add_tail(&bo->ddestroy, &bdev->ddestroy); 448 list_add_tail(&bo->ddestroy, &bdev->ddestroy);
@@ -986,24 +998,32 @@ out_unlock:
986 return ret; 998 return ret;
987} 999}
988 1000
989static int ttm_bo_mem_compat(struct ttm_placement *placement, 1001static bool ttm_bo_mem_compat(struct ttm_placement *placement,
990 struct ttm_mem_reg *mem) 1002 struct ttm_mem_reg *mem,
1003 uint32_t *new_flags)
991{ 1004{
992 int i; 1005 int i;
993 1006
994 if (mem->mm_node && placement->lpfn != 0 && 1007 if (mem->mm_node && placement->lpfn != 0 &&
995 (mem->start < placement->fpfn || 1008 (mem->start < placement->fpfn ||
996 mem->start + mem->num_pages > placement->lpfn)) 1009 mem->start + mem->num_pages > placement->lpfn))
997 return -1; 1010 return false;
998 1011
999 for (i = 0; i < placement->num_placement; i++) { 1012 for (i = 0; i < placement->num_placement; i++) {
1000 if ((placement->placement[i] & mem->placement & 1013 *new_flags = placement->placement[i];
1001 TTM_PL_MASK_CACHING) && 1014 if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) &&
1002 (placement->placement[i] & mem->placement & 1015 (*new_flags & mem->placement & TTM_PL_MASK_MEM))
1003 TTM_PL_MASK_MEM)) 1016 return true;
1004 return i;
1005 } 1017 }
1006 return -1; 1018
1019 for (i = 0; i < placement->num_busy_placement; i++) {
1020 *new_flags = placement->busy_placement[i];
1021 if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) &&
1022 (*new_flags & mem->placement & TTM_PL_MASK_MEM))
1023 return true;
1024 }
1025
1026 return false;
1007} 1027}
1008 1028
1009int ttm_bo_validate(struct ttm_buffer_object *bo, 1029int ttm_bo_validate(struct ttm_buffer_object *bo,
@@ -1012,6 +1032,7 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
1012 bool no_wait_gpu) 1032 bool no_wait_gpu)
1013{ 1033{
1014 int ret; 1034 int ret;
1035 uint32_t new_flags;
1015 1036
1016 lockdep_assert_held(&bo->resv->lock.base); 1037 lockdep_assert_held(&bo->resv->lock.base);
1017 /* Check that range is valid */ 1038 /* Check that range is valid */
@@ -1022,8 +1043,7 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
1022 /* 1043 /*
1023 * Check whether we need to move buffer. 1044 * Check whether we need to move buffer.
1024 */ 1045 */
1025 ret = ttm_bo_mem_compat(placement, &bo->mem); 1046 if (!ttm_bo_mem_compat(placement, &bo->mem, &new_flags)) {
1026 if (ret < 0) {
1027 ret = ttm_bo_move_buffer(bo, placement, interruptible, 1047 ret = ttm_bo_move_buffer(bo, placement, interruptible,
1028 no_wait_gpu); 1048 no_wait_gpu);
1029 if (ret) 1049 if (ret)
@@ -1033,7 +1053,7 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
1033 * Use the access and other non-mapping-related flag bits from 1053 * Use the access and other non-mapping-related flag bits from
1034 * the compatible memory placement flags to the active flags 1054 * the compatible memory placement flags to the active flags
1035 */ 1055 */
1036 ttm_flag_masked(&bo->mem.placement, placement->placement[ret], 1056 ttm_flag_masked(&bo->mem.placement, new_flags,
1037 ~TTM_PL_MASK_MEMTYPE); 1057 ~TTM_PL_MASK_MEMTYPE);
1038 } 1058 }
1039 /* 1059 /*
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 7cc904d3a4d1..4834c463c38b 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -343,19 +343,25 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
343 if (ret) 343 if (ret)
344 goto out; 344 goto out;
345 345
346 /*
347 * Single TTM move. NOP.
348 */
346 if (old_iomap == NULL && new_iomap == NULL) 349 if (old_iomap == NULL && new_iomap == NULL)
347 goto out2; 350 goto out2;
351
352 /*
353 * Move nonexistent data. NOP.
354 */
348 if (old_iomap == NULL && ttm == NULL) 355 if (old_iomap == NULL && ttm == NULL)
349 goto out2; 356 goto out2;
350 357
351 if (ttm->state == tt_unpopulated) { 358 /*
359 * TTM might be null for moves within the same region.
360 */
361 if (ttm && ttm->state == tt_unpopulated) {
352 ret = ttm->bdev->driver->ttm_tt_populate(ttm); 362 ret = ttm->bdev->driver->ttm_tt_populate(ttm);
353 if (ret) { 363 if (ret)
354 /* if we fail here don't nuke the mm node
355 * as the bo still owns it */
356 old_copy.mm_node = NULL;
357 goto out1; 364 goto out1;
358 }
359 } 365 }
360 366
361 add = 0; 367 add = 0;
@@ -381,11 +387,8 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
381 prot); 387 prot);
382 } else 388 } else
383 ret = ttm_copy_io_page(new_iomap, old_iomap, page); 389 ret = ttm_copy_io_page(new_iomap, old_iomap, page);
384 if (ret) { 390 if (ret)
385 /* failing here, means keep old copy as-is */
386 old_copy.mm_node = NULL;
387 goto out1; 391 goto out1;
388 }
389 } 392 }
390 mb(); 393 mb();
391out2: 394out2:
@@ -403,7 +406,12 @@ out1:
403 ttm_mem_reg_iounmap(bdev, old_mem, new_iomap); 406 ttm_mem_reg_iounmap(bdev, old_mem, new_iomap);
404out: 407out:
405 ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap); 408 ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
406 ttm_bo_mem_put(bo, &old_copy); 409
410 /*
411 * On error, keep the mm node!
412 */
413 if (!ret)
414 ttm_bo_mem_put(bo, &old_copy);
407 return ret; 415 return ret;
408} 416}
409EXPORT_SYMBOL(ttm_bo_move_memcpy); 417EXPORT_SYMBOL(ttm_bo_move_memcpy);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 1006c15445e9..ac617f3ecd0c 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -41,6 +41,51 @@
41 41
42#define TTM_BO_VM_NUM_PREFAULT 16 42#define TTM_BO_VM_NUM_PREFAULT 16
43 43
44static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
45 struct vm_area_struct *vma,
46 struct vm_fault *vmf)
47{
48 struct ttm_bo_device *bdev = bo->bdev;
49 int ret = 0;
50
51 spin_lock(&bdev->fence_lock);
52 if (likely(!test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)))
53 goto out_unlock;
54
55 /*
56 * Quick non-stalling check for idle.
57 */
58 ret = ttm_bo_wait(bo, false, false, true);
59 if (likely(ret == 0))
60 goto out_unlock;
61
62 /*
63 * If possible, avoid waiting for GPU with mmap_sem
64 * held.
65 */
66 if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) {
67 ret = VM_FAULT_RETRY;
68 if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
69 goto out_unlock;
70
71 up_read(&vma->vm_mm->mmap_sem);
72 (void) ttm_bo_wait(bo, false, true, false);
73 goto out_unlock;
74 }
75
76 /*
77 * Ordinary wait.
78 */
79 ret = ttm_bo_wait(bo, false, true, false);
80 if (unlikely(ret != 0))
81 ret = (ret != -ERESTARTSYS) ? VM_FAULT_SIGBUS :
82 VM_FAULT_NOPAGE;
83
84out_unlock:
85 spin_unlock(&bdev->fence_lock);
86 return ret;
87}
88
44static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 89static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
45{ 90{
46 struct ttm_buffer_object *bo = (struct ttm_buffer_object *) 91 struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
@@ -57,6 +102,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
57 int retval = VM_FAULT_NOPAGE; 102 int retval = VM_FAULT_NOPAGE;
58 struct ttm_mem_type_manager *man = 103 struct ttm_mem_type_manager *man =
59 &bdev->man[bo->mem.mem_type]; 104 &bdev->man[bo->mem.mem_type];
105 struct vm_area_struct cvma;
60 106
61 /* 107 /*
62 * Work around locking order reversal in fault / nopfn 108 * Work around locking order reversal in fault / nopfn
@@ -91,18 +137,11 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
91 * Wait for buffer data in transit, due to a pipelined 137 * Wait for buffer data in transit, due to a pipelined
92 * move. 138 * move.
93 */ 139 */
94 140 ret = ttm_bo_vm_fault_idle(bo, vma, vmf);
95 spin_lock(&bdev->fence_lock); 141 if (unlikely(ret != 0)) {
96 if (test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)) { 142 retval = ret;
97 ret = ttm_bo_wait(bo, false, true, false); 143 goto out_unlock;
98 spin_unlock(&bdev->fence_lock); 144 }
99 if (unlikely(ret != 0)) {
100 retval = (ret != -ERESTARTSYS) ?
101 VM_FAULT_SIGBUS : VM_FAULT_NOPAGE;
102 goto out_unlock;
103 }
104 } else
105 spin_unlock(&bdev->fence_lock);
106 145
107 ret = ttm_mem_io_lock(man, true); 146 ret = ttm_mem_io_lock(man, true);
108 if (unlikely(ret != 0)) { 147 if (unlikely(ret != 0)) {
@@ -126,26 +165,21 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
126 } 165 }
127 166
128 /* 167 /*
129 * Strictly, we're not allowed to modify vma->vm_page_prot here, 168 * Make a local vma copy to modify the page_prot member
130 * since the mmap_sem is only held in read mode. However, we 169 * and vm_flags if necessary. The vma parameter is protected
131 * modify only the caching bits of vma->vm_page_prot and 170 * by mmap_sem in write mode.
132 * consider those bits protected by
133 * the bo->mutex, as we should be the only writers.
134 * There shouldn't really be any readers of these bits except
135 * within vm_insert_mixed()? fork?
136 *
137 * TODO: Add a list of vmas to the bo, and change the
138 * vma->vm_page_prot when the object changes caching policy, with
139 * the correct locks held.
140 */ 171 */
172 cvma = *vma;
173 cvma.vm_page_prot = vm_get_page_prot(cvma.vm_flags);
174
141 if (bo->mem.bus.is_iomem) { 175 if (bo->mem.bus.is_iomem) {
142 vma->vm_page_prot = ttm_io_prot(bo->mem.placement, 176 cvma.vm_page_prot = ttm_io_prot(bo->mem.placement,
143 vma->vm_page_prot); 177 cvma.vm_page_prot);
144 } else { 178 } else {
145 ttm = bo->ttm; 179 ttm = bo->ttm;
146 vma->vm_page_prot = (bo->mem.placement & TTM_PL_FLAG_CACHED) ? 180 if (!(bo->mem.placement & TTM_PL_FLAG_CACHED))
147 vm_get_page_prot(vma->vm_flags) : 181 cvma.vm_page_prot = ttm_io_prot(bo->mem.placement,
148 ttm_io_prot(bo->mem.placement, vma->vm_page_prot); 182 cvma.vm_page_prot);
149 183
150 /* Allocate all page at once, most common usage */ 184 /* Allocate all page at once, most common usage */
151 if (ttm->bdev->driver->ttm_tt_populate(ttm)) { 185 if (ttm->bdev->driver->ttm_tt_populate(ttm)) {
@@ -172,7 +206,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
172 pfn = page_to_pfn(page); 206 pfn = page_to_pfn(page);
173 } 207 }
174 208
175 ret = vm_insert_mixed(vma, address, pfn); 209 ret = vm_insert_mixed(&cvma, address, pfn);
176 /* 210 /*
177 * Somebody beat us to this PTE or prefaulting to 211 * Somebody beat us to this PTE or prefaulting to
178 * an already populated PTE, or prefaulting error. 212 * an already populated PTE, or prefaulting error.
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
index 7957beeeaf73..fb8259f69839 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
@@ -33,6 +33,7 @@
33 * when freed). 33 * when freed).
34 */ 34 */
35 35
36#if defined(CONFIG_SWIOTLB) || defined(CONFIG_INTEL_IOMMU)
36#define pr_fmt(fmt) "[TTM] " fmt 37#define pr_fmt(fmt) "[TTM] " fmt
37 38
38#include <linux/dma-mapping.h> 39#include <linux/dma-mapping.h>
@@ -1142,3 +1143,5 @@ int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data)
1142 return 0; 1143 return 0;
1143} 1144}
1144EXPORT_SYMBOL_GPL(ttm_dma_page_alloc_debugfs); 1145EXPORT_SYMBOL_GPL(ttm_dma_page_alloc_debugfs);
1146
1147#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
index 96dc84dc34d0..7776e6f0aef6 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
@@ -141,37 +141,374 @@ struct ttm_placement vmw_srf_placement = {
141}; 141};
142 142
143struct vmw_ttm_tt { 143struct vmw_ttm_tt {
144 struct ttm_tt ttm; 144 struct ttm_dma_tt dma_ttm;
145 struct vmw_private *dev_priv; 145 struct vmw_private *dev_priv;
146 int gmr_id; 146 int gmr_id;
147 struct sg_table sgt;
148 struct vmw_sg_table vsgt;
149 uint64_t sg_alloc_size;
150 bool mapped;
147}; 151};
148 152
153/**
154 * Helper functions to advance a struct vmw_piter iterator.
155 *
156 * @viter: Pointer to the iterator.
157 *
158 * These functions return false if past the end of the list,
159 * true otherwise. Functions are selected depending on the current
160 * DMA mapping mode.
161 */
162static bool __vmw_piter_non_sg_next(struct vmw_piter *viter)
163{
164 return ++(viter->i) < viter->num_pages;
165}
166
167static bool __vmw_piter_sg_next(struct vmw_piter *viter)
168{
169 return __sg_page_iter_next(&viter->iter);
170}
171
172
173/**
174 * Helper functions to return a pointer to the current page.
175 *
176 * @viter: Pointer to the iterator
177 *
178 * These functions return a pointer to the page currently
179 * pointed to by @viter. Functions are selected depending on the
180 * current mapping mode.
181 */
182static struct page *__vmw_piter_non_sg_page(struct vmw_piter *viter)
183{
184 return viter->pages[viter->i];
185}
186
187static struct page *__vmw_piter_sg_page(struct vmw_piter *viter)
188{
189 return sg_page_iter_page(&viter->iter);
190}
191
192
193/**
194 * Helper functions to return the DMA address of the current page.
195 *
196 * @viter: Pointer to the iterator
197 *
198 * These functions return the DMA address of the page currently
199 * pointed to by @viter. Functions are selected depending on the
200 * current mapping mode.
201 */
202static dma_addr_t __vmw_piter_phys_addr(struct vmw_piter *viter)
203{
204 return page_to_phys(viter->pages[viter->i]);
205}
206
207static dma_addr_t __vmw_piter_dma_addr(struct vmw_piter *viter)
208{
209 return viter->addrs[viter->i];
210}
211
212static dma_addr_t __vmw_piter_sg_addr(struct vmw_piter *viter)
213{
214 return sg_page_iter_dma_address(&viter->iter);
215}
216
217
218/**
219 * vmw_piter_start - Initialize a struct vmw_piter.
220 *
221 * @viter: Pointer to the iterator to initialize
222 * @vsgt: Pointer to a struct vmw_sg_table to initialize from
223 *
224 * Note that we're following the convention of __sg_page_iter_start, so that
225 * the iterator doesn't point to a valid page after initialization; it has
226 * to be advanced one step first.
227 */
228void vmw_piter_start(struct vmw_piter *viter, const struct vmw_sg_table *vsgt,
229 unsigned long p_offset)
230{
231 viter->i = p_offset - 1;
232 viter->num_pages = vsgt->num_pages;
233 switch (vsgt->mode) {
234 case vmw_dma_phys:
235 viter->next = &__vmw_piter_non_sg_next;
236 viter->dma_address = &__vmw_piter_phys_addr;
237 viter->page = &__vmw_piter_non_sg_page;
238 viter->pages = vsgt->pages;
239 break;
240 case vmw_dma_alloc_coherent:
241 viter->next = &__vmw_piter_non_sg_next;
242 viter->dma_address = &__vmw_piter_dma_addr;
243 viter->page = &__vmw_piter_non_sg_page;
244 viter->addrs = vsgt->addrs;
245 break;
246 case vmw_dma_map_populate:
247 case vmw_dma_map_bind:
248 viter->next = &__vmw_piter_sg_next;
249 viter->dma_address = &__vmw_piter_sg_addr;
250 viter->page = &__vmw_piter_sg_page;
251 __sg_page_iter_start(&viter->iter, vsgt->sgt->sgl,
252 vsgt->sgt->orig_nents, p_offset);
253 break;
254 default:
255 BUG();
256 }
257}
258
259/**
260 * vmw_ttm_unmap_from_dma - unmap device addresses previsouly mapped for
261 * TTM pages
262 *
263 * @vmw_tt: Pointer to a struct vmw_ttm_backend
264 *
265 * Used to free dma mappings previously mapped by vmw_ttm_map_for_dma.
266 */
267static void vmw_ttm_unmap_from_dma(struct vmw_ttm_tt *vmw_tt)
268{
269 struct device *dev = vmw_tt->dev_priv->dev->dev;
270
271 dma_unmap_sg(dev, vmw_tt->sgt.sgl, vmw_tt->sgt.nents,
272 DMA_BIDIRECTIONAL);
273 vmw_tt->sgt.nents = vmw_tt->sgt.orig_nents;
274}
275
276/**
277 * vmw_ttm_map_for_dma - map TTM pages to get device addresses
278 *
279 * @vmw_tt: Pointer to a struct vmw_ttm_backend
280 *
281 * This function is used to get device addresses from the kernel DMA layer.
282 * However, it's violating the DMA API in that when this operation has been
283 * performed, it's illegal for the CPU to write to the pages without first
284 * unmapping the DMA mappings, or calling dma_sync_sg_for_cpu(). It is
285 * therefore only legal to call this function if we know that the function
286 * dma_sync_sg_for_cpu() is a NOP, and dma_sync_sg_for_device() is at most
287 * a CPU write buffer flush.
288 */
289static int vmw_ttm_map_for_dma(struct vmw_ttm_tt *vmw_tt)
290{
291 struct device *dev = vmw_tt->dev_priv->dev->dev;
292 int ret;
293
294 ret = dma_map_sg(dev, vmw_tt->sgt.sgl, vmw_tt->sgt.orig_nents,
295 DMA_BIDIRECTIONAL);
296 if (unlikely(ret == 0))
297 return -ENOMEM;
298
299 vmw_tt->sgt.nents = ret;
300
301 return 0;
302}
303
304/**
305 * vmw_ttm_map_dma - Make sure TTM pages are visible to the device
306 *
307 * @vmw_tt: Pointer to a struct vmw_ttm_tt
308 *
309 * Select the correct function for and make sure the TTM pages are
310 * visible to the device. Allocate storage for the device mappings.
311 * If a mapping has already been performed, indicated by the storage
312 * pointer being non NULL, the function returns success.
313 */
314static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
315{
316 struct vmw_private *dev_priv = vmw_tt->dev_priv;
317 struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
318 struct vmw_sg_table *vsgt = &vmw_tt->vsgt;
319 struct vmw_piter iter;
320 dma_addr_t old;
321 int ret = 0;
322 static size_t sgl_size;
323 static size_t sgt_size;
324
325 if (vmw_tt->mapped)
326 return 0;
327
328 vsgt->mode = dev_priv->map_mode;
329 vsgt->pages = vmw_tt->dma_ttm.ttm.pages;
330 vsgt->num_pages = vmw_tt->dma_ttm.ttm.num_pages;
331 vsgt->addrs = vmw_tt->dma_ttm.dma_address;
332 vsgt->sgt = &vmw_tt->sgt;
333
334 switch (dev_priv->map_mode) {
335 case vmw_dma_map_bind:
336 case vmw_dma_map_populate:
337 if (unlikely(!sgl_size)) {
338 sgl_size = ttm_round_pot(sizeof(struct scatterlist));
339 sgt_size = ttm_round_pot(sizeof(struct sg_table));
340 }
341 vmw_tt->sg_alloc_size = sgt_size + sgl_size * vsgt->num_pages;
342 ret = ttm_mem_global_alloc(glob, vmw_tt->sg_alloc_size, false,
343 true);
344 if (unlikely(ret != 0))
345 return ret;
346
347 ret = sg_alloc_table_from_pages(&vmw_tt->sgt, vsgt->pages,
348 vsgt->num_pages, 0,
349 (unsigned long)
350 vsgt->num_pages << PAGE_SHIFT,
351 GFP_KERNEL);
352 if (unlikely(ret != 0))
353 goto out_sg_alloc_fail;
354
355 if (vsgt->num_pages > vmw_tt->sgt.nents) {
356 uint64_t over_alloc =
357 sgl_size * (vsgt->num_pages -
358 vmw_tt->sgt.nents);
359
360 ttm_mem_global_free(glob, over_alloc);
361 vmw_tt->sg_alloc_size -= over_alloc;
362 }
363
364 ret = vmw_ttm_map_for_dma(vmw_tt);
365 if (unlikely(ret != 0))
366 goto out_map_fail;
367
368 break;
369 default:
370 break;
371 }
372
373 old = ~((dma_addr_t) 0);
374 vmw_tt->vsgt.num_regions = 0;
375 for (vmw_piter_start(&iter, vsgt, 0); vmw_piter_next(&iter);) {
376 dma_addr_t cur = vmw_piter_dma_addr(&iter);
377
378 if (cur != old + PAGE_SIZE)
379 vmw_tt->vsgt.num_regions++;
380 old = cur;
381 }
382
383 vmw_tt->mapped = true;
384 return 0;
385
386out_map_fail:
387 sg_free_table(vmw_tt->vsgt.sgt);
388 vmw_tt->vsgt.sgt = NULL;
389out_sg_alloc_fail:
390 ttm_mem_global_free(glob, vmw_tt->sg_alloc_size);
391 return ret;
392}
393
394/**
395 * vmw_ttm_unmap_dma - Tear down any TTM page device mappings
396 *
397 * @vmw_tt: Pointer to a struct vmw_ttm_tt
398 *
399 * Tear down any previously set up device DMA mappings and free
400 * any storage space allocated for them. If there are no mappings set up,
401 * this function is a NOP.
402 */
403static void vmw_ttm_unmap_dma(struct vmw_ttm_tt *vmw_tt)
404{
405 struct vmw_private *dev_priv = vmw_tt->dev_priv;
406
407 if (!vmw_tt->vsgt.sgt)
408 return;
409
410 switch (dev_priv->map_mode) {
411 case vmw_dma_map_bind:
412 case vmw_dma_map_populate:
413 vmw_ttm_unmap_from_dma(vmw_tt);
414 sg_free_table(vmw_tt->vsgt.sgt);
415 vmw_tt->vsgt.sgt = NULL;
416 ttm_mem_global_free(vmw_mem_glob(dev_priv),
417 vmw_tt->sg_alloc_size);
418 break;
419 default:
420 break;
421 }
422 vmw_tt->mapped = false;
423}
424
149static int vmw_ttm_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem) 425static int vmw_ttm_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
150{ 426{
151 struct vmw_ttm_tt *vmw_be = container_of(ttm, struct vmw_ttm_tt, ttm); 427 struct vmw_ttm_tt *vmw_be =
428 container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
429 int ret;
430
431 ret = vmw_ttm_map_dma(vmw_be);
432 if (unlikely(ret != 0))
433 return ret;
152 434
153 vmw_be->gmr_id = bo_mem->start; 435 vmw_be->gmr_id = bo_mem->start;
154 436
155 return vmw_gmr_bind(vmw_be->dev_priv, ttm->pages, 437 return vmw_gmr_bind(vmw_be->dev_priv, &vmw_be->vsgt,
156 ttm->num_pages, vmw_be->gmr_id); 438 ttm->num_pages, vmw_be->gmr_id);
157} 439}
158 440
159static int vmw_ttm_unbind(struct ttm_tt *ttm) 441static int vmw_ttm_unbind(struct ttm_tt *ttm)
160{ 442{
161 struct vmw_ttm_tt *vmw_be = container_of(ttm, struct vmw_ttm_tt, ttm); 443 struct vmw_ttm_tt *vmw_be =
444 container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
162 445
163 vmw_gmr_unbind(vmw_be->dev_priv, vmw_be->gmr_id); 446 vmw_gmr_unbind(vmw_be->dev_priv, vmw_be->gmr_id);
447
448 if (vmw_be->dev_priv->map_mode == vmw_dma_map_bind)
449 vmw_ttm_unmap_dma(vmw_be);
450
164 return 0; 451 return 0;
165} 452}
166 453
167static void vmw_ttm_destroy(struct ttm_tt *ttm) 454static void vmw_ttm_destroy(struct ttm_tt *ttm)
168{ 455{
169 struct vmw_ttm_tt *vmw_be = container_of(ttm, struct vmw_ttm_tt, ttm); 456 struct vmw_ttm_tt *vmw_be =
170 457 container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
171 ttm_tt_fini(ttm); 458
459 vmw_ttm_unmap_dma(vmw_be);
460 if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent)
461 ttm_dma_tt_fini(&vmw_be->dma_ttm);
462 else
463 ttm_tt_fini(ttm);
172 kfree(vmw_be); 464 kfree(vmw_be);
173} 465}
174 466
467static int vmw_ttm_populate(struct ttm_tt *ttm)
468{
469 struct vmw_ttm_tt *vmw_tt =
470 container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
471 struct vmw_private *dev_priv = vmw_tt->dev_priv;
472 struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
473 int ret;
474
475 if (ttm->state != tt_unpopulated)
476 return 0;
477
478 if (dev_priv->map_mode == vmw_dma_alloc_coherent) {
479 size_t size =
480 ttm_round_pot(ttm->num_pages * sizeof(dma_addr_t));
481 ret = ttm_mem_global_alloc(glob, size, false, true);
482 if (unlikely(ret != 0))
483 return ret;
484
485 ret = ttm_dma_populate(&vmw_tt->dma_ttm, dev_priv->dev->dev);
486 if (unlikely(ret != 0))
487 ttm_mem_global_free(glob, size);
488 } else
489 ret = ttm_pool_populate(ttm);
490
491 return ret;
492}
493
494static void vmw_ttm_unpopulate(struct ttm_tt *ttm)
495{
496 struct vmw_ttm_tt *vmw_tt = container_of(ttm, struct vmw_ttm_tt,
497 dma_ttm.ttm);
498 struct vmw_private *dev_priv = vmw_tt->dev_priv;
499 struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
500
501 vmw_ttm_unmap_dma(vmw_tt);
502 if (dev_priv->map_mode == vmw_dma_alloc_coherent) {
503 size_t size =
504 ttm_round_pot(ttm->num_pages * sizeof(dma_addr_t));
505
506 ttm_dma_unpopulate(&vmw_tt->dma_ttm, dev_priv->dev->dev);
507 ttm_mem_global_free(glob, size);
508 } else
509 ttm_pool_unpopulate(ttm);
510}
511
175static struct ttm_backend_func vmw_ttm_func = { 512static struct ttm_backend_func vmw_ttm_func = {
176 .bind = vmw_ttm_bind, 513 .bind = vmw_ttm_bind,
177 .unbind = vmw_ttm_unbind, 514 .unbind = vmw_ttm_unbind,
@@ -183,20 +520,28 @@ struct ttm_tt *vmw_ttm_tt_create(struct ttm_bo_device *bdev,
183 struct page *dummy_read_page) 520 struct page *dummy_read_page)
184{ 521{
185 struct vmw_ttm_tt *vmw_be; 522 struct vmw_ttm_tt *vmw_be;
523 int ret;
186 524
187 vmw_be = kmalloc(sizeof(*vmw_be), GFP_KERNEL); 525 vmw_be = kzalloc(sizeof(*vmw_be), GFP_KERNEL);
188 if (!vmw_be) 526 if (!vmw_be)
189 return NULL; 527 return NULL;
190 528
191 vmw_be->ttm.func = &vmw_ttm_func; 529 vmw_be->dma_ttm.ttm.func = &vmw_ttm_func;
192 vmw_be->dev_priv = container_of(bdev, struct vmw_private, bdev); 530 vmw_be->dev_priv = container_of(bdev, struct vmw_private, bdev);
193 531
194 if (ttm_tt_init(&vmw_be->ttm, bdev, size, page_flags, dummy_read_page)) { 532 if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent)
195 kfree(vmw_be); 533 ret = ttm_dma_tt_init(&vmw_be->dma_ttm, bdev, size, page_flags,
196 return NULL; 534 dummy_read_page);
197 } 535 else
198 536 ret = ttm_tt_init(&vmw_be->dma_ttm.ttm, bdev, size, page_flags,
199 return &vmw_be->ttm; 537 dummy_read_page);
538 if (unlikely(ret != 0))
539 goto out_no_init;
540
541 return &vmw_be->dma_ttm.ttm;
542out_no_init:
543 kfree(vmw_be);
544 return NULL;
200} 545}
201 546
202int vmw_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags) 547int vmw_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
@@ -332,8 +677,8 @@ static int vmw_sync_obj_wait(void *sync_obj, bool lazy, bool interruptible)
332 677
333struct ttm_bo_driver vmw_bo_driver = { 678struct ttm_bo_driver vmw_bo_driver = {
334 .ttm_tt_create = &vmw_ttm_tt_create, 679 .ttm_tt_create = &vmw_ttm_tt_create,
335 .ttm_tt_populate = &ttm_pool_populate, 680 .ttm_tt_populate = &vmw_ttm_populate,
336 .ttm_tt_unpopulate = &ttm_pool_unpopulate, 681 .ttm_tt_unpopulate = &vmw_ttm_unpopulate,
337 .invalidate_caches = vmw_invalidate_caches, 682 .invalidate_caches = vmw_invalidate_caches,
338 .init_mem_type = vmw_init_mem_type, 683 .init_mem_type = vmw_init_mem_type,
339 .evict_flags = vmw_evict_flags, 684 .evict_flags = vmw_evict_flags,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 0508f93b9795..20d5485eaf98 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -32,6 +32,7 @@
32#include <drm/ttm/ttm_bo_driver.h> 32#include <drm/ttm/ttm_bo_driver.h>
33#include <drm/ttm/ttm_object.h> 33#include <drm/ttm/ttm_object.h>
34#include <drm/ttm/ttm_module.h> 34#include <drm/ttm/ttm_module.h>
35#include <linux/dma_remapping.h>
35 36
36#define VMWGFX_DRIVER_NAME "vmwgfx" 37#define VMWGFX_DRIVER_NAME "vmwgfx"
37#define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices" 38#define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices"
@@ -185,6 +186,9 @@ static struct pci_device_id vmw_pci_id_list[] = {
185MODULE_DEVICE_TABLE(pci, vmw_pci_id_list); 186MODULE_DEVICE_TABLE(pci, vmw_pci_id_list);
186 187
187static int enable_fbdev = IS_ENABLED(CONFIG_DRM_VMWGFX_FBCON); 188static int enable_fbdev = IS_ENABLED(CONFIG_DRM_VMWGFX_FBCON);
189static int vmw_force_iommu;
190static int vmw_restrict_iommu;
191static int vmw_force_coherent;
188 192
189static int vmw_probe(struct pci_dev *, const struct pci_device_id *); 193static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
190static void vmw_master_init(struct vmw_master *); 194static void vmw_master_init(struct vmw_master *);
@@ -193,6 +197,13 @@ static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
193 197
194MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev"); 198MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev");
195module_param_named(enable_fbdev, enable_fbdev, int, 0600); 199module_param_named(enable_fbdev, enable_fbdev, int, 0600);
200MODULE_PARM_DESC(force_dma_api, "Force using the DMA API for TTM pages");
201module_param_named(force_dma_api, vmw_force_iommu, int, 0600);
202MODULE_PARM_DESC(restrict_iommu, "Try to limit IOMMU usage for TTM pages");
203module_param_named(restrict_iommu, vmw_restrict_iommu, int, 0600);
204MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages");
205module_param_named(force_coherent, vmw_force_coherent, int, 0600);
206
196 207
197static void vmw_print_capabilities(uint32_t capabilities) 208static void vmw_print_capabilities(uint32_t capabilities)
198{ 209{
@@ -427,12 +438,85 @@ static void vmw_get_initial_size(struct vmw_private *dev_priv)
427 dev_priv->initial_height = height; 438 dev_priv->initial_height = height;
428} 439}
429 440
441/**
442 * vmw_dma_select_mode - Determine how DMA mappings should be set up for this
443 * system.
444 *
445 * @dev_priv: Pointer to a struct vmw_private
446 *
447 * This functions tries to determine the IOMMU setup and what actions
448 * need to be taken by the driver to make system pages visible to the
449 * device.
450 * If this function decides that DMA is not possible, it returns -EINVAL.
451 * The driver may then try to disable features of the device that require
452 * DMA.
453 */
454static int vmw_dma_select_mode(struct vmw_private *dev_priv)
455{
456 static const char *names[vmw_dma_map_max] = {
457 [vmw_dma_phys] = "Using physical TTM page addresses.",
458 [vmw_dma_alloc_coherent] = "Using coherent TTM pages.",
459 [vmw_dma_map_populate] = "Keeping DMA mappings.",
460 [vmw_dma_map_bind] = "Giving up DMA mappings early."};
461#ifdef CONFIG_X86
462 const struct dma_map_ops *dma_ops = get_dma_ops(dev_priv->dev->dev);
463
464#ifdef CONFIG_INTEL_IOMMU
465 if (intel_iommu_enabled) {
466 dev_priv->map_mode = vmw_dma_map_populate;
467 goto out_fixup;
468 }
469#endif
470
471 if (!(vmw_force_iommu || vmw_force_coherent)) {
472 dev_priv->map_mode = vmw_dma_phys;
473 DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]);
474 return 0;
475 }
476
477 dev_priv->map_mode = vmw_dma_map_populate;
478
479 if (dma_ops->sync_single_for_cpu)
480 dev_priv->map_mode = vmw_dma_alloc_coherent;
481#ifdef CONFIG_SWIOTLB
482 if (swiotlb_nr_tbl() == 0)
483 dev_priv->map_mode = vmw_dma_map_populate;
484#endif
485
486#ifdef CONFIG_INTEL_IOMMU
487out_fixup:
488#endif
489 if (dev_priv->map_mode == vmw_dma_map_populate &&
490 vmw_restrict_iommu)
491 dev_priv->map_mode = vmw_dma_map_bind;
492
493 if (vmw_force_coherent)
494 dev_priv->map_mode = vmw_dma_alloc_coherent;
495
496#if !defined(CONFIG_SWIOTLB) && !defined(CONFIG_INTEL_IOMMU)
497 /*
498 * No coherent page pool
499 */
500 if (dev_priv->map_mode == vmw_dma_alloc_coherent)
501 return -EINVAL;
502#endif
503
504#else /* CONFIG_X86 */
505 dev_priv->map_mode = vmw_dma_map_populate;
506#endif /* CONFIG_X86 */
507
508 DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]);
509
510 return 0;
511}
512
430static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) 513static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
431{ 514{
432 struct vmw_private *dev_priv; 515 struct vmw_private *dev_priv;
433 int ret; 516 int ret;
434 uint32_t svga_id; 517 uint32_t svga_id;
435 enum vmw_res_type i; 518 enum vmw_res_type i;
519 bool refuse_dma = false;
436 520
437 dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL); 521 dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
438 if (unlikely(dev_priv == NULL)) { 522 if (unlikely(dev_priv == NULL)) {
@@ -481,6 +565,11 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
481 } 565 }
482 566
483 dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES); 567 dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES);
568 ret = vmw_dma_select_mode(dev_priv);
569 if (unlikely(ret != 0)) {
570 DRM_INFO("Restricting capabilities due to IOMMU setup.\n");
571 refuse_dma = true;
572 }
484 573
485 dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE); 574 dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE);
486 dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE); 575 dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE);
@@ -558,8 +647,9 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
558 } 647 }
559 648
560 dev_priv->has_gmr = true; 649 dev_priv->has_gmr = true;
561 if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR, 650 if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) ||
562 dev_priv->max_gmr_ids) != 0) { 651 refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR,
652 dev_priv->max_gmr_ids) != 0) {
563 DRM_INFO("No GMR memory available. " 653 DRM_INFO("No GMR memory available. "
564 "Graphics memory resources are very limited.\n"); 654 "Graphics memory resources are very limited.\n");
565 dev_priv->has_gmr = false; 655 dev_priv->has_gmr = false;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 150ec64af617..e401d5dbcb96 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -177,6 +177,58 @@ struct vmw_res_cache_entry {
177 struct vmw_resource_val_node *node; 177 struct vmw_resource_val_node *node;
178}; 178};
179 179
180/**
181 * enum vmw_dma_map_mode - indicate how to perform TTM page dma mappings.
182 */
183enum vmw_dma_map_mode {
184 vmw_dma_phys, /* Use physical page addresses */
185 vmw_dma_alloc_coherent, /* Use TTM coherent pages */
186 vmw_dma_map_populate, /* Unmap from DMA just after unpopulate */
187 vmw_dma_map_bind, /* Unmap from DMA just before unbind */
188 vmw_dma_map_max
189};
190
191/**
192 * struct vmw_sg_table - Scatter/gather table for binding, with additional
193 * device-specific information.
194 *
195 * @sgt: Pointer to a struct sg_table with binding information
196 * @num_regions: Number of regions with device-address contigous pages
197 */
198struct vmw_sg_table {
199 enum vmw_dma_map_mode mode;
200 struct page **pages;
201 const dma_addr_t *addrs;
202 struct sg_table *sgt;
203 unsigned long num_regions;
204 unsigned long num_pages;
205};
206
207/**
208 * struct vmw_piter - Page iterator that iterates over a list of pages
209 * and DMA addresses that could be either a scatter-gather list or
210 * arrays
211 *
212 * @pages: Array of page pointers to the pages.
213 * @addrs: DMA addresses to the pages if coherent pages are used.
214 * @iter: Scatter-gather page iterator. Current position in SG list.
215 * @i: Current position in arrays.
216 * @num_pages: Number of pages total.
217 * @next: Function to advance the iterator. Returns false if past the list
218 * of pages, true otherwise.
219 * @dma_address: Function to return the DMA address of the current page.
220 */
221struct vmw_piter {
222 struct page **pages;
223 const dma_addr_t *addrs;
224 struct sg_page_iter iter;
225 unsigned long i;
226 unsigned long num_pages;
227 bool (*next)(struct vmw_piter *);
228 dma_addr_t (*dma_address)(struct vmw_piter *);
229 struct page *(*page)(struct vmw_piter *);
230};
231
180struct vmw_sw_context{ 232struct vmw_sw_context{
181 struct drm_open_hash res_ht; 233 struct drm_open_hash res_ht;
182 bool res_ht_initialized; 234 bool res_ht_initialized;
@@ -358,6 +410,11 @@ struct vmw_private {
358 410
359 struct list_head res_lru[vmw_res_max]; 411 struct list_head res_lru[vmw_res_max];
360 uint32_t used_memory_size; 412 uint32_t used_memory_size;
413
414 /*
415 * DMA mapping stuff.
416 */
417 enum vmw_dma_map_mode map_mode;
361}; 418};
362 419
363static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res) 420static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res)
@@ -405,7 +462,7 @@ void vmw_3d_resource_dec(struct vmw_private *dev_priv, bool hide_svga);
405 */ 462 */
406 463
407extern int vmw_gmr_bind(struct vmw_private *dev_priv, 464extern int vmw_gmr_bind(struct vmw_private *dev_priv,
408 struct page *pages[], 465 const struct vmw_sg_table *vsgt,
409 unsigned long num_pages, 466 unsigned long num_pages,
410 int gmr_id); 467 int gmr_id);
411extern void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id); 468extern void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id);
@@ -568,6 +625,45 @@ extern struct ttm_placement vmw_evictable_placement;
568extern struct ttm_placement vmw_srf_placement; 625extern struct ttm_placement vmw_srf_placement;
569extern struct ttm_bo_driver vmw_bo_driver; 626extern struct ttm_bo_driver vmw_bo_driver;
570extern int vmw_dma_quiescent(struct drm_device *dev); 627extern int vmw_dma_quiescent(struct drm_device *dev);
628extern void vmw_piter_start(struct vmw_piter *viter,
629 const struct vmw_sg_table *vsgt,
630 unsigned long p_offs);
631
632/**
633 * vmw_piter_next - Advance the iterator one page.
634 *
635 * @viter: Pointer to the iterator to advance.
636 *
637 * Returns false if past the list of pages, true otherwise.
638 */
639static inline bool vmw_piter_next(struct vmw_piter *viter)
640{
641 return viter->next(viter);
642}
643
644/**
645 * vmw_piter_dma_addr - Return the DMA address of the current page.
646 *
647 * @viter: Pointer to the iterator
648 *
649 * Returns the DMA address of the page pointed to by @viter.
650 */
651static inline dma_addr_t vmw_piter_dma_addr(struct vmw_piter *viter)
652{
653 return viter->dma_address(viter);
654}
655
656/**
657 * vmw_piter_page - Return a pointer to the current page.
658 *
659 * @viter: Pointer to the iterator
660 *
661 * Returns the DMA address of the page pointed to by @viter.
662 */
663static inline struct page *vmw_piter_page(struct vmw_piter *viter)
664{
665 return viter->page(viter);
666}
571 667
572/** 668/**
573 * Command submission - vmwgfx_execbuf.c 669 * Command submission - vmwgfx_execbuf.c
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
index 1a0bf07fe54b..6ef0b035becb 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
@@ -32,9 +32,11 @@
32#define VMW_PPN_SIZE (sizeof(unsigned long)) 32#define VMW_PPN_SIZE (sizeof(unsigned long))
33/* A future safe maximum remap size. */ 33/* A future safe maximum remap size. */
34#define VMW_PPN_PER_REMAP ((31 * 1024) / VMW_PPN_SIZE) 34#define VMW_PPN_PER_REMAP ((31 * 1024) / VMW_PPN_SIZE)
35#define DMA_ADDR_INVALID ((dma_addr_t) 0)
36#define DMA_PAGE_INVALID 0UL
35 37
36static int vmw_gmr2_bind(struct vmw_private *dev_priv, 38static int vmw_gmr2_bind(struct vmw_private *dev_priv,
37 struct page *pages[], 39 struct vmw_piter *iter,
38 unsigned long num_pages, 40 unsigned long num_pages,
39 int gmr_id) 41 int gmr_id)
40{ 42{
@@ -81,11 +83,13 @@ static int vmw_gmr2_bind(struct vmw_private *dev_priv,
81 83
82 for (i = 0; i < nr; ++i) { 84 for (i = 0; i < nr; ++i) {
83 if (VMW_PPN_SIZE <= 4) 85 if (VMW_PPN_SIZE <= 4)
84 *cmd = page_to_pfn(*pages++); 86 *cmd = vmw_piter_dma_addr(iter) >> PAGE_SHIFT;
85 else 87 else
86 *((uint64_t *)cmd) = page_to_pfn(*pages++); 88 *((uint64_t *)cmd) = vmw_piter_dma_addr(iter) >>
89 PAGE_SHIFT;
87 90
88 cmd += VMW_PPN_SIZE / sizeof(*cmd); 91 cmd += VMW_PPN_SIZE / sizeof(*cmd);
92 vmw_piter_next(iter);
89 } 93 }
90 94
91 num_pages -= nr; 95 num_pages -= nr;
@@ -120,22 +124,56 @@ static void vmw_gmr2_unbind(struct vmw_private *dev_priv,
120 vmw_fifo_commit(dev_priv, define_size); 124 vmw_fifo_commit(dev_priv, define_size);
121} 125}
122 126
127
128static void vmw_gmr_free_descriptors(struct device *dev, dma_addr_t desc_dma,
129 struct list_head *desc_pages)
130{
131 struct page *page, *next;
132 struct svga_guest_mem_descriptor *page_virtual;
133 unsigned int desc_per_page = PAGE_SIZE /
134 sizeof(struct svga_guest_mem_descriptor) - 1;
135
136 if (list_empty(desc_pages))
137 return;
138
139 list_for_each_entry_safe(page, next, desc_pages, lru) {
140 list_del_init(&page->lru);
141
142 if (likely(desc_dma != DMA_ADDR_INVALID)) {
143 dma_unmap_page(dev, desc_dma, PAGE_SIZE,
144 DMA_TO_DEVICE);
145 }
146
147 page_virtual = kmap_atomic(page);
148 desc_dma = (dma_addr_t)
149 le32_to_cpu(page_virtual[desc_per_page].ppn) <<
150 PAGE_SHIFT;
151 kunmap_atomic(page_virtual);
152
153 __free_page(page);
154 }
155}
156
123/** 157/**
124 * FIXME: Adjust to the ttm lowmem / highmem storage to minimize 158 * FIXME: Adjust to the ttm lowmem / highmem storage to minimize
125 * the number of used descriptors. 159 * the number of used descriptors.
160 *
126 */ 161 */
127 162
128static int vmw_gmr_build_descriptors(struct list_head *desc_pages, 163static int vmw_gmr_build_descriptors(struct device *dev,
129 struct page *pages[], 164 struct list_head *desc_pages,
130 unsigned long num_pages) 165 struct vmw_piter *iter,
166 unsigned long num_pages,
167 dma_addr_t *first_dma)
131{ 168{
132 struct page *page, *next; 169 struct page *page;
133 struct svga_guest_mem_descriptor *page_virtual = NULL; 170 struct svga_guest_mem_descriptor *page_virtual = NULL;
134 struct svga_guest_mem_descriptor *desc_virtual = NULL; 171 struct svga_guest_mem_descriptor *desc_virtual = NULL;
135 unsigned int desc_per_page; 172 unsigned int desc_per_page;
136 unsigned long prev_pfn; 173 unsigned long prev_pfn;
137 unsigned long pfn; 174 unsigned long pfn;
138 int ret; 175 int ret;
176 dma_addr_t desc_dma;
139 177
140 desc_per_page = PAGE_SIZE / 178 desc_per_page = PAGE_SIZE /
141 sizeof(struct svga_guest_mem_descriptor) - 1; 179 sizeof(struct svga_guest_mem_descriptor) - 1;
@@ -148,23 +186,12 @@ static int vmw_gmr_build_descriptors(struct list_head *desc_pages,
148 } 186 }
149 187
150 list_add_tail(&page->lru, desc_pages); 188 list_add_tail(&page->lru, desc_pages);
151
152 /*
153 * Point previous page terminating descriptor to this
154 * page before unmapping it.
155 */
156
157 if (likely(page_virtual != NULL)) {
158 desc_virtual->ppn = page_to_pfn(page);
159 kunmap_atomic(page_virtual);
160 }
161
162 page_virtual = kmap_atomic(page); 189 page_virtual = kmap_atomic(page);
163 desc_virtual = page_virtual - 1; 190 desc_virtual = page_virtual - 1;
164 prev_pfn = ~(0UL); 191 prev_pfn = ~(0UL);
165 192
166 while (likely(num_pages != 0)) { 193 while (likely(num_pages != 0)) {
167 pfn = page_to_pfn(*pages); 194 pfn = vmw_piter_dma_addr(iter) >> PAGE_SHIFT;
168 195
169 if (pfn != prev_pfn + 1) { 196 if (pfn != prev_pfn + 1) {
170 197
@@ -181,104 +208,82 @@ static int vmw_gmr_build_descriptors(struct list_head *desc_pages,
181 } 208 }
182 prev_pfn = pfn; 209 prev_pfn = pfn;
183 --num_pages; 210 --num_pages;
184 ++pages; 211 vmw_piter_next(iter);
185 } 212 }
186 213
187 (++desc_virtual)->ppn = cpu_to_le32(0); 214 (++desc_virtual)->ppn = DMA_PAGE_INVALID;
188 desc_virtual->num_pages = cpu_to_le32(0); 215 desc_virtual->num_pages = cpu_to_le32(0);
216 kunmap_atomic(page_virtual);
189 } 217 }
190 218
191 if (likely(page_virtual != NULL)) 219 desc_dma = 0;
220 list_for_each_entry_reverse(page, desc_pages, lru) {
221 page_virtual = kmap_atomic(page);
222 page_virtual[desc_per_page].ppn = cpu_to_le32
223 (desc_dma >> PAGE_SHIFT);
192 kunmap_atomic(page_virtual); 224 kunmap_atomic(page_virtual);
225 desc_dma = dma_map_page(dev, page, 0, PAGE_SIZE,
226 DMA_TO_DEVICE);
227
228 if (unlikely(dma_mapping_error(dev, desc_dma)))
229 goto out_err;
230 }
231 *first_dma = desc_dma;
193 232
194 return 0; 233 return 0;
195out_err: 234out_err:
196 list_for_each_entry_safe(page, next, desc_pages, lru) { 235 vmw_gmr_free_descriptors(dev, DMA_ADDR_INVALID, desc_pages);
197 list_del_init(&page->lru);
198 __free_page(page);
199 }
200 return ret; 236 return ret;
201} 237}
202 238
203static inline void vmw_gmr_free_descriptors(struct list_head *desc_pages)
204{
205 struct page *page, *next;
206
207 list_for_each_entry_safe(page, next, desc_pages, lru) {
208 list_del_init(&page->lru);
209 __free_page(page);
210 }
211}
212
213static void vmw_gmr_fire_descriptors(struct vmw_private *dev_priv, 239static void vmw_gmr_fire_descriptors(struct vmw_private *dev_priv,
214 int gmr_id, struct list_head *desc_pages) 240 int gmr_id, dma_addr_t desc_dma)
215{ 241{
216 struct page *page;
217
218 if (unlikely(list_empty(desc_pages)))
219 return;
220
221 page = list_entry(desc_pages->next, struct page, lru);
222
223 mutex_lock(&dev_priv->hw_mutex); 242 mutex_lock(&dev_priv->hw_mutex);
224 243
225 vmw_write(dev_priv, SVGA_REG_GMR_ID, gmr_id); 244 vmw_write(dev_priv, SVGA_REG_GMR_ID, gmr_id);
226 wmb(); 245 wmb();
227 vmw_write(dev_priv, SVGA_REG_GMR_DESCRIPTOR, page_to_pfn(page)); 246 vmw_write(dev_priv, SVGA_REG_GMR_DESCRIPTOR, desc_dma >> PAGE_SHIFT);
228 mb(); 247 mb();
229 248
230 mutex_unlock(&dev_priv->hw_mutex); 249 mutex_unlock(&dev_priv->hw_mutex);
231 250
232} 251}
233 252
234/**
235 * FIXME: Adjust to the ttm lowmem / highmem storage to minimize
236 * the number of used descriptors.
237 */
238
239static unsigned long vmw_gmr_count_descriptors(struct page *pages[],
240 unsigned long num_pages)
241{
242 unsigned long prev_pfn = ~(0UL);
243 unsigned long pfn;
244 unsigned long descriptors = 0;
245
246 while (num_pages--) {
247 pfn = page_to_pfn(*pages++);
248 if (prev_pfn + 1 != pfn)
249 ++descriptors;
250 prev_pfn = pfn;
251 }
252
253 return descriptors;
254}
255
256int vmw_gmr_bind(struct vmw_private *dev_priv, 253int vmw_gmr_bind(struct vmw_private *dev_priv,
257 struct page *pages[], 254 const struct vmw_sg_table *vsgt,
258 unsigned long num_pages, 255 unsigned long num_pages,
259 int gmr_id) 256 int gmr_id)
260{ 257{
261 struct list_head desc_pages; 258 struct list_head desc_pages;
259 dma_addr_t desc_dma = 0;
260 struct device *dev = dev_priv->dev->dev;
261 struct vmw_piter data_iter;
262 int ret; 262 int ret;
263 263
264 vmw_piter_start(&data_iter, vsgt, 0);
265
266 if (unlikely(!vmw_piter_next(&data_iter)))
267 return 0;
268
264 if (likely(dev_priv->capabilities & SVGA_CAP_GMR2)) 269 if (likely(dev_priv->capabilities & SVGA_CAP_GMR2))
265 return vmw_gmr2_bind(dev_priv, pages, num_pages, gmr_id); 270 return vmw_gmr2_bind(dev_priv, &data_iter, num_pages, gmr_id);
266 271
267 if (unlikely(!(dev_priv->capabilities & SVGA_CAP_GMR))) 272 if (unlikely(!(dev_priv->capabilities & SVGA_CAP_GMR)))
268 return -EINVAL; 273 return -EINVAL;
269 274
270 if (vmw_gmr_count_descriptors(pages, num_pages) > 275 if (vsgt->num_regions > dev_priv->max_gmr_descriptors)
271 dev_priv->max_gmr_descriptors)
272 return -EINVAL; 276 return -EINVAL;
273 277
274 INIT_LIST_HEAD(&desc_pages); 278 INIT_LIST_HEAD(&desc_pages);
275 279
276 ret = vmw_gmr_build_descriptors(&desc_pages, pages, num_pages); 280 ret = vmw_gmr_build_descriptors(dev, &desc_pages, &data_iter,
281 num_pages, &desc_dma);
277 if (unlikely(ret != 0)) 282 if (unlikely(ret != 0))
278 return ret; 283 return ret;
279 284
280 vmw_gmr_fire_descriptors(dev_priv, gmr_id, &desc_pages); 285 vmw_gmr_fire_descriptors(dev_priv, gmr_id, desc_dma);
281 vmw_gmr_free_descriptors(&desc_pages); 286 vmw_gmr_free_descriptors(dev, desc_dma, &desc_pages);
282 287
283 return 0; 288 return 0;
284} 289}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index c509d40c4897..a51f48e3e917 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -168,7 +168,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
168 fb = drm_framebuffer_lookup(dev, arg->fb_id); 168 fb = drm_framebuffer_lookup(dev, arg->fb_id);
169 if (!fb) { 169 if (!fb) {
170 DRM_ERROR("Invalid framebuffer id.\n"); 170 DRM_ERROR("Invalid framebuffer id.\n");
171 ret = -EINVAL; 171 ret = -ENOENT;
172 goto out_no_fb; 172 goto out_no_fb;
173 } 173 }
174 vfb = vmw_framebuffer_to_vfb(fb); 174 vfb = vmw_framebuffer_to_vfb(fb);
@@ -252,7 +252,7 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
252 fb = drm_framebuffer_lookup(dev, arg->fb_id); 252 fb = drm_framebuffer_lookup(dev, arg->fb_id);
253 if (!fb) { 253 if (!fb) {
254 DRM_ERROR("Invalid framebuffer id.\n"); 254 DRM_ERROR("Invalid framebuffer id.\n");
255 ret = -EINVAL; 255 ret = -ENOENT;
256 goto out_no_fb; 256 goto out_no_fb;
257 } 257 }
258 258
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index fc43c0601236..ecb3d867b426 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -1508,7 +1508,7 @@ int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
1508 1508
1509 obj = drm_mode_object_find(dev, arg->crtc_id, DRM_MODE_OBJECT_CRTC); 1509 obj = drm_mode_object_find(dev, arg->crtc_id, DRM_MODE_OBJECT_CRTC);
1510 if (!obj) { 1510 if (!obj) {
1511 ret = -EINVAL; 1511 ret = -ENOENT;
1512 goto out; 1512 goto out;
1513 } 1513 }
1514 1514
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 37fb4befec82..252501a54def 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -32,6 +32,8 @@
32#include <drm/drmP.h> 32#include <drm/drmP.h>
33#include "vmwgfx_resource_priv.h" 33#include "vmwgfx_resource_priv.h"
34 34
35#define VMW_RES_EVICT_ERR_COUNT 10
36
35struct vmw_user_dma_buffer { 37struct vmw_user_dma_buffer {
36 struct ttm_base_object base; 38 struct ttm_base_object base;
37 struct vmw_dma_buffer dma; 39 struct vmw_dma_buffer dma;
@@ -1091,8 +1093,9 @@ vmw_resource_backoff_reservation(struct ww_acquire_ctx *ticket,
1091 * to a backup buffer. 1093 * to a backup buffer.
1092 * 1094 *
1093 * @res: The resource to evict. 1095 * @res: The resource to evict.
1096 * @interruptible: Whether to wait interruptible.
1094 */ 1097 */
1095int vmw_resource_do_evict(struct vmw_resource *res) 1098int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible)
1096{ 1099{
1097 struct ttm_validate_buffer val_buf; 1100 struct ttm_validate_buffer val_buf;
1098 const struct vmw_res_func *func = res->func; 1101 const struct vmw_res_func *func = res->func;
@@ -1102,7 +1105,8 @@ int vmw_resource_do_evict(struct vmw_resource *res)
1102 BUG_ON(!func->may_evict); 1105 BUG_ON(!func->may_evict);
1103 1106
1104 val_buf.bo = NULL; 1107 val_buf.bo = NULL;
1105 ret = vmw_resource_check_buffer(res, &ticket, true, &val_buf); 1108 ret = vmw_resource_check_buffer(res, &ticket, interruptible,
1109 &val_buf);
1106 if (unlikely(ret != 0)) 1110 if (unlikely(ret != 0))
1107 return ret; 1111 return ret;
1108 1112
@@ -1141,6 +1145,7 @@ int vmw_resource_validate(struct vmw_resource *res)
1141 struct vmw_private *dev_priv = res->dev_priv; 1145 struct vmw_private *dev_priv = res->dev_priv;
1142 struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type]; 1146 struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type];
1143 struct ttm_validate_buffer val_buf; 1147 struct ttm_validate_buffer val_buf;
1148 unsigned err_count = 0;
1144 1149
1145 if (likely(!res->func->may_evict)) 1150 if (likely(!res->func->may_evict))
1146 return 0; 1151 return 0;
@@ -1155,7 +1160,7 @@ int vmw_resource_validate(struct vmw_resource *res)
1155 1160
1156 write_lock(&dev_priv->resource_lock); 1161 write_lock(&dev_priv->resource_lock);
1157 if (list_empty(lru_list) || !res->func->may_evict) { 1162 if (list_empty(lru_list) || !res->func->may_evict) {
1158 DRM_ERROR("Out of device device id entries " 1163 DRM_ERROR("Out of device device resources "
1159 "for %s.\n", res->func->type_name); 1164 "for %s.\n", res->func->type_name);
1160 ret = -EBUSY; 1165 ret = -EBUSY;
1161 write_unlock(&dev_priv->resource_lock); 1166 write_unlock(&dev_priv->resource_lock);
@@ -1168,7 +1173,19 @@ int vmw_resource_validate(struct vmw_resource *res)
1168 list_del_init(&evict_res->lru_head); 1173 list_del_init(&evict_res->lru_head);
1169 1174
1170 write_unlock(&dev_priv->resource_lock); 1175 write_unlock(&dev_priv->resource_lock);
1171 vmw_resource_do_evict(evict_res); 1176
1177 ret = vmw_resource_do_evict(evict_res, true);
1178 if (unlikely(ret != 0)) {
1179 write_lock(&dev_priv->resource_lock);
1180 list_add_tail(&evict_res->lru_head, lru_list);
1181 write_unlock(&dev_priv->resource_lock);
1182 if (ret == -ERESTARTSYS ||
1183 ++err_count > VMW_RES_EVICT_ERR_COUNT) {
1184 vmw_resource_unreference(&evict_res);
1185 goto out_no_validate;
1186 }
1187 }
1188
1172 vmw_resource_unreference(&evict_res); 1189 vmw_resource_unreference(&evict_res);
1173 } while (1); 1190 } while (1);
1174 1191
@@ -1253,13 +1270,15 @@ bool vmw_resource_needs_backup(const struct vmw_resource *res)
1253 * @type: The resource type to evict 1270 * @type: The resource type to evict
1254 * 1271 *
1255 * To avoid thrashing starvation or as part of the hibernation sequence, 1272 * To avoid thrashing starvation or as part of the hibernation sequence,
1256 * evict all evictable resources of a specific type. 1273 * try to evict all evictable resources of a specific type.
1257 */ 1274 */
1258static void vmw_resource_evict_type(struct vmw_private *dev_priv, 1275static void vmw_resource_evict_type(struct vmw_private *dev_priv,
1259 enum vmw_res_type type) 1276 enum vmw_res_type type)
1260{ 1277{
1261 struct list_head *lru_list = &dev_priv->res_lru[type]; 1278 struct list_head *lru_list = &dev_priv->res_lru[type];
1262 struct vmw_resource *evict_res; 1279 struct vmw_resource *evict_res;
1280 unsigned err_count = 0;
1281 int ret;
1263 1282
1264 do { 1283 do {
1265 write_lock(&dev_priv->resource_lock); 1284 write_lock(&dev_priv->resource_lock);
@@ -1272,7 +1291,18 @@ static void vmw_resource_evict_type(struct vmw_private *dev_priv,
1272 lru_head)); 1291 lru_head));
1273 list_del_init(&evict_res->lru_head); 1292 list_del_init(&evict_res->lru_head);
1274 write_unlock(&dev_priv->resource_lock); 1293 write_unlock(&dev_priv->resource_lock);
1275 vmw_resource_do_evict(evict_res); 1294
1295 ret = vmw_resource_do_evict(evict_res, false);
1296 if (unlikely(ret != 0)) {
1297 write_lock(&dev_priv->resource_lock);
1298 list_add_tail(&evict_res->lru_head, lru_list);
1299 write_unlock(&dev_priv->resource_lock);
1300 if (++err_count > VMW_RES_EVICT_ERR_COUNT) {
1301 vmw_resource_unreference(&evict_res);
1302 return;
1303 }
1304 }
1305
1276 vmw_resource_unreference(&evict_res); 1306 vmw_resource_unreference(&evict_res);
1277 } while (1); 1307 } while (1);
1278 1308
diff --git a/drivers/gpu/host1x/Kconfig b/drivers/gpu/host1x/Kconfig
index ccfd42b23606..7d6bed222542 100644
--- a/drivers/gpu/host1x/Kconfig
+++ b/drivers/gpu/host1x/Kconfig
@@ -19,6 +19,4 @@ config TEGRA_HOST1X_FIREWALL
19 19
20 If unsure, choose Y. 20 If unsure, choose Y.
21 21
22source "drivers/gpu/host1x/drm/Kconfig"
23
24endif 22endif
diff --git a/drivers/gpu/host1x/Makefile b/drivers/gpu/host1x/Makefile
index 3b037b6e0298..afa1e9e4e512 100644
--- a/drivers/gpu/host1x/Makefile
+++ b/drivers/gpu/host1x/Makefile
@@ -1,6 +1,5 @@
1ccflags-y = -Idrivers/gpu/host1x
2
3host1x-y = \ 1host1x-y = \
2 bus.o \
4 syncpt.o \ 3 syncpt.o \
5 dev.o \ 4 dev.o \
6 intr.o \ 5 intr.o \
@@ -8,13 +7,7 @@ host1x-y = \
8 channel.o \ 7 channel.o \
9 job.o \ 8 job.o \
10 debug.o \ 9 debug.o \
11 hw/host1x01.o 10 hw/host1x01.o \
12 11 hw/host1x02.o
13ccflags-y += -Iinclude/drm
14ccflags-$(CONFIG_DRM_TEGRA_DEBUG) += -DDEBUG
15 12
16host1x-$(CONFIG_DRM_TEGRA) += drm/drm.o drm/fb.o drm/dc.o
17host1x-$(CONFIG_DRM_TEGRA) += drm/output.o drm/rgb.o drm/hdmi.o
18host1x-$(CONFIG_DRM_TEGRA) += drm/gem.o
19host1x-$(CONFIG_DRM_TEGRA) += drm/gr2d.o
20obj-$(CONFIG_TEGRA_HOST1X) += host1x.o 13obj-$(CONFIG_TEGRA_HOST1X) += host1x.o
diff --git a/drivers/gpu/host1x/bus.c b/drivers/gpu/host1x/bus.c
new file mode 100644
index 000000000000..509383f8be03
--- /dev/null
+++ b/drivers/gpu/host1x/bus.c
@@ -0,0 +1,550 @@
1/*
2 * Copyright (C) 2012 Avionic Design GmbH
3 * Copyright (C) 2012-2013, NVIDIA Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include <linux/host1x.h>
19#include <linux/of.h>
20#include <linux/slab.h>
21
22#include "dev.h"
23
24static DEFINE_MUTEX(clients_lock);
25static LIST_HEAD(clients);
26
27static DEFINE_MUTEX(drivers_lock);
28static LIST_HEAD(drivers);
29
30static DEFINE_MUTEX(devices_lock);
31static LIST_HEAD(devices);
32
33struct host1x_subdev {
34 struct host1x_client *client;
35 struct device_node *np;
36 struct list_head list;
37};
38
39/**
40 * host1x_subdev_add() - add a new subdevice with an associated device node
41 */
42static int host1x_subdev_add(struct host1x_device *device,
43 struct device_node *np)
44{
45 struct host1x_subdev *subdev;
46
47 subdev = kzalloc(sizeof(*subdev), GFP_KERNEL);
48 if (!subdev)
49 return -ENOMEM;
50
51 INIT_LIST_HEAD(&subdev->list);
52 subdev->np = of_node_get(np);
53
54 mutex_lock(&device->subdevs_lock);
55 list_add_tail(&subdev->list, &device->subdevs);
56 mutex_unlock(&device->subdevs_lock);
57
58 return 0;
59}
60
61/**
62 * host1x_subdev_del() - remove subdevice
63 */
64static void host1x_subdev_del(struct host1x_subdev *subdev)
65{
66 list_del(&subdev->list);
67 of_node_put(subdev->np);
68 kfree(subdev);
69}
70
71/**
72 * host1x_device_parse_dt() - scan device tree and add matching subdevices
73 */
74static int host1x_device_parse_dt(struct host1x_device *device)
75{
76 struct device_node *np;
77 int err;
78
79 for_each_child_of_node(device->dev.parent->of_node, np) {
80 if (of_match_node(device->driver->subdevs, np) &&
81 of_device_is_available(np)) {
82 err = host1x_subdev_add(device, np);
83 if (err < 0)
84 return err;
85 }
86 }
87
88 return 0;
89}
90
91static void host1x_subdev_register(struct host1x_device *device,
92 struct host1x_subdev *subdev,
93 struct host1x_client *client)
94{
95 int err;
96
97 /*
98 * Move the subdevice to the list of active (registered) subdevices
99 * and associate it with a client. At the same time, associate the
100 * client with its parent device.
101 */
102 mutex_lock(&device->subdevs_lock);
103 mutex_lock(&device->clients_lock);
104 list_move_tail(&client->list, &device->clients);
105 list_move_tail(&subdev->list, &device->active);
106 client->parent = &device->dev;
107 subdev->client = client;
108 mutex_unlock(&device->clients_lock);
109 mutex_unlock(&device->subdevs_lock);
110
111 /*
112 * When all subdevices have been registered, the composite device is
113 * ready to be probed.
114 */
115 if (list_empty(&device->subdevs)) {
116 err = device->driver->probe(device);
117 if (err < 0)
118 dev_err(&device->dev, "probe failed: %d\n", err);
119 }
120}
121
122static void __host1x_subdev_unregister(struct host1x_device *device,
123 struct host1x_subdev *subdev)
124{
125 struct host1x_client *client = subdev->client;
126 int err;
127
128 /*
129 * If all subdevices have been activated, we're about to remove the
130 * first active subdevice, so unload the driver first.
131 */
132 if (list_empty(&device->subdevs)) {
133 err = device->driver->remove(device);
134 if (err < 0)
135 dev_err(&device->dev, "remove failed: %d\n", err);
136 }
137
138 /*
139 * Move the subdevice back to the list of idle subdevices and remove
140 * it from list of clients.
141 */
142 mutex_lock(&device->clients_lock);
143 subdev->client = NULL;
144 client->parent = NULL;
145 list_move_tail(&subdev->list, &device->subdevs);
146 /*
147 * XXX: Perhaps don't do this here, but rather explicitly remove it
148 * when the device is about to be deleted.
149 *
150 * This is somewhat complicated by the fact that this function is
151 * used to remove the subdevice when a client is unregistered but
152 * also when the composite device is about to be removed.
153 */
154 list_del_init(&client->list);
155 mutex_unlock(&device->clients_lock);
156}
157
158static void host1x_subdev_unregister(struct host1x_device *device,
159 struct host1x_subdev *subdev)
160{
161 mutex_lock(&device->subdevs_lock);
162 __host1x_subdev_unregister(device, subdev);
163 mutex_unlock(&device->subdevs_lock);
164}
165
166int host1x_device_init(struct host1x_device *device)
167{
168 struct host1x_client *client;
169 int err;
170
171 mutex_lock(&device->clients_lock);
172
173 list_for_each_entry(client, &device->clients, list) {
174 if (client->ops && client->ops->init) {
175 err = client->ops->init(client);
176 if (err < 0) {
177 dev_err(&device->dev,
178 "failed to initialize %s: %d\n",
179 dev_name(client->dev), err);
180 mutex_unlock(&device->clients_lock);
181 return err;
182 }
183 }
184 }
185
186 mutex_unlock(&device->clients_lock);
187
188 return 0;
189}
190
191int host1x_device_exit(struct host1x_device *device)
192{
193 struct host1x_client *client;
194 int err;
195
196 mutex_lock(&device->clients_lock);
197
198 list_for_each_entry_reverse(client, &device->clients, list) {
199 if (client->ops && client->ops->exit) {
200 err = client->ops->exit(client);
201 if (err < 0) {
202 dev_err(&device->dev,
203 "failed to cleanup %s: %d\n",
204 dev_name(client->dev), err);
205 mutex_unlock(&device->clients_lock);
206 return err;
207 }
208 }
209 }
210
211 mutex_unlock(&device->clients_lock);
212
213 return 0;
214}
215
216static int host1x_register_client(struct host1x *host1x,
217 struct host1x_client *client)
218{
219 struct host1x_device *device;
220 struct host1x_subdev *subdev;
221
222 mutex_lock(&host1x->devices_lock);
223
224 list_for_each_entry(device, &host1x->devices, list) {
225 list_for_each_entry(subdev, &device->subdevs, list) {
226 if (subdev->np == client->dev->of_node) {
227 host1x_subdev_register(device, subdev, client);
228 mutex_unlock(&host1x->devices_lock);
229 return 0;
230 }
231 }
232 }
233
234 mutex_unlock(&host1x->devices_lock);
235 return -ENODEV;
236}
237
238static int host1x_unregister_client(struct host1x *host1x,
239 struct host1x_client *client)
240{
241 struct host1x_device *device, *dt;
242 struct host1x_subdev *subdev;
243
244 mutex_lock(&host1x->devices_lock);
245
246 list_for_each_entry_safe(device, dt, &host1x->devices, list) {
247 list_for_each_entry(subdev, &device->active, list) {
248 if (subdev->client == client) {
249 host1x_subdev_unregister(device, subdev);
250 mutex_unlock(&host1x->devices_lock);
251 return 0;
252 }
253 }
254 }
255
256 mutex_unlock(&host1x->devices_lock);
257 return -ENODEV;
258}
259
260struct bus_type host1x_bus_type = {
261 .name = "host1x",
262};
263
264int host1x_bus_init(void)
265{
266 return bus_register(&host1x_bus_type);
267}
268
269void host1x_bus_exit(void)
270{
271 bus_unregister(&host1x_bus_type);
272}
273
274static void host1x_device_release(struct device *dev)
275{
276 struct host1x_device *device = to_host1x_device(dev);
277
278 kfree(device);
279}
280
281static int host1x_device_add(struct host1x *host1x,
282 struct host1x_driver *driver)
283{
284 struct host1x_client *client, *tmp;
285 struct host1x_subdev *subdev;
286 struct host1x_device *device;
287 int err;
288
289 device = kzalloc(sizeof(*device), GFP_KERNEL);
290 if (!device)
291 return -ENOMEM;
292
293 mutex_init(&device->subdevs_lock);
294 INIT_LIST_HEAD(&device->subdevs);
295 INIT_LIST_HEAD(&device->active);
296 mutex_init(&device->clients_lock);
297 INIT_LIST_HEAD(&device->clients);
298 INIT_LIST_HEAD(&device->list);
299 device->driver = driver;
300
301 device->dev.coherent_dma_mask = host1x->dev->coherent_dma_mask;
302 device->dev.dma_mask = &device->dev.coherent_dma_mask;
303 device->dev.release = host1x_device_release;
304 dev_set_name(&device->dev, driver->name);
305 device->dev.bus = &host1x_bus_type;
306 device->dev.parent = host1x->dev;
307
308 err = device_register(&device->dev);
309 if (err < 0)
310 return err;
311
312 err = host1x_device_parse_dt(device);
313 if (err < 0) {
314 device_unregister(&device->dev);
315 return err;
316 }
317
318 mutex_lock(&host1x->devices_lock);
319 list_add_tail(&device->list, &host1x->devices);
320 mutex_unlock(&host1x->devices_lock);
321
322 mutex_lock(&clients_lock);
323
324 list_for_each_entry_safe(client, tmp, &clients, list) {
325 list_for_each_entry(subdev, &device->subdevs, list) {
326 if (subdev->np == client->dev->of_node) {
327 host1x_subdev_register(device, subdev, client);
328 break;
329 }
330 }
331 }
332
333 mutex_unlock(&clients_lock);
334
335 return 0;
336}
337
338/*
339 * Removes a device by first unregistering any subdevices and then removing
340 * itself from the list of devices.
341 *
342 * This function must be called with the host1x->devices_lock held.
343 */
344static void host1x_device_del(struct host1x *host1x,
345 struct host1x_device *device)
346{
347 struct host1x_subdev *subdev, *sd;
348 struct host1x_client *client, *cl;
349
350 mutex_lock(&device->subdevs_lock);
351
352 /* unregister subdevices */
353 list_for_each_entry_safe(subdev, sd, &device->active, list) {
354 /*
355 * host1x_subdev_unregister() will remove the client from
356 * any lists, so we'll need to manually add it back to the
357 * list of idle clients.
358 *
359 * XXX: Alternatively, perhaps don't remove the client from
360 * any lists in host1x_subdev_unregister() and instead do
361 * that explicitly from host1x_unregister_client()?
362 */
363 client = subdev->client;
364
365 __host1x_subdev_unregister(device, subdev);
366
367 /* add the client to the list of idle clients */
368 mutex_lock(&clients_lock);
369 list_add_tail(&client->list, &clients);
370 mutex_unlock(&clients_lock);
371 }
372
373 /* remove subdevices */
374 list_for_each_entry_safe(subdev, sd, &device->subdevs, list)
375 host1x_subdev_del(subdev);
376
377 mutex_unlock(&device->subdevs_lock);
378
379 /* move clients to idle list */
380 mutex_lock(&clients_lock);
381 mutex_lock(&device->clients_lock);
382
383 list_for_each_entry_safe(client, cl, &device->clients, list)
384 list_move_tail(&client->list, &clients);
385
386 mutex_unlock(&device->clients_lock);
387 mutex_unlock(&clients_lock);
388
389 /* finally remove the device */
390 list_del_init(&device->list);
391 device_unregister(&device->dev);
392}
393
394static void host1x_attach_driver(struct host1x *host1x,
395 struct host1x_driver *driver)
396{
397 struct host1x_device *device;
398 int err;
399
400 mutex_lock(&host1x->devices_lock);
401
402 list_for_each_entry(device, &host1x->devices, list) {
403 if (device->driver == driver) {
404 mutex_unlock(&host1x->devices_lock);
405 return;
406 }
407 }
408
409 mutex_unlock(&host1x->devices_lock);
410
411 err = host1x_device_add(host1x, driver);
412 if (err < 0)
413 dev_err(host1x->dev, "failed to allocate device: %d\n", err);
414}
415
416static void host1x_detach_driver(struct host1x *host1x,
417 struct host1x_driver *driver)
418{
419 struct host1x_device *device, *tmp;
420
421 mutex_lock(&host1x->devices_lock);
422
423 list_for_each_entry_safe(device, tmp, &host1x->devices, list)
424 if (device->driver == driver)
425 host1x_device_del(host1x, device);
426
427 mutex_unlock(&host1x->devices_lock);
428}
429
430int host1x_register(struct host1x *host1x)
431{
432 struct host1x_driver *driver;
433
434 mutex_lock(&devices_lock);
435 list_add_tail(&host1x->list, &devices);
436 mutex_unlock(&devices_lock);
437
438 mutex_lock(&drivers_lock);
439
440 list_for_each_entry(driver, &drivers, list)
441 host1x_attach_driver(host1x, driver);
442
443 mutex_unlock(&drivers_lock);
444
445 return 0;
446}
447
448int host1x_unregister(struct host1x *host1x)
449{
450 struct host1x_driver *driver;
451
452 mutex_lock(&drivers_lock);
453
454 list_for_each_entry(driver, &drivers, list)
455 host1x_detach_driver(host1x, driver);
456
457 mutex_unlock(&drivers_lock);
458
459 mutex_lock(&devices_lock);
460 list_del_init(&host1x->list);
461 mutex_unlock(&devices_lock);
462
463 return 0;
464}
465
466int host1x_driver_register(struct host1x_driver *driver)
467{
468 struct host1x *host1x;
469
470 INIT_LIST_HEAD(&driver->list);
471
472 mutex_lock(&drivers_lock);
473 list_add_tail(&driver->list, &drivers);
474 mutex_unlock(&drivers_lock);
475
476 mutex_lock(&devices_lock);
477
478 list_for_each_entry(host1x, &devices, list)
479 host1x_attach_driver(host1x, driver);
480
481 mutex_unlock(&devices_lock);
482
483 return 0;
484}
485EXPORT_SYMBOL(host1x_driver_register);
486
487void host1x_driver_unregister(struct host1x_driver *driver)
488{
489 mutex_lock(&drivers_lock);
490 list_del_init(&driver->list);
491 mutex_unlock(&drivers_lock);
492}
493EXPORT_SYMBOL(host1x_driver_unregister);
494
495int host1x_client_register(struct host1x_client *client)
496{
497 struct host1x *host1x;
498 int err;
499
500 mutex_lock(&devices_lock);
501
502 list_for_each_entry(host1x, &devices, list) {
503 err = host1x_register_client(host1x, client);
504 if (!err) {
505 mutex_unlock(&devices_lock);
506 return 0;
507 }
508 }
509
510 mutex_unlock(&devices_lock);
511
512 mutex_lock(&clients_lock);
513 list_add_tail(&client->list, &clients);
514 mutex_unlock(&clients_lock);
515
516 return 0;
517}
518EXPORT_SYMBOL(host1x_client_register);
519
520int host1x_client_unregister(struct host1x_client *client)
521{
522 struct host1x_client *c;
523 struct host1x *host1x;
524 int err;
525
526 mutex_lock(&devices_lock);
527
528 list_for_each_entry(host1x, &devices, list) {
529 err = host1x_unregister_client(host1x, client);
530 if (!err) {
531 mutex_unlock(&devices_lock);
532 return 0;
533 }
534 }
535
536 mutex_unlock(&devices_lock);
537 mutex_lock(&clients_lock);
538
539 list_for_each_entry(c, &clients, list) {
540 if (c == client) {
541 list_del_init(&c->list);
542 break;
543 }
544 }
545
546 mutex_unlock(&clients_lock);
547
548 return 0;
549}
550EXPORT_SYMBOL(host1x_client_unregister);
diff --git a/drivers/gpu/host1x/host1x_client.h b/drivers/gpu/host1x/bus.h
index 9b85f10f4a44..4099e99212c8 100644
--- a/drivers/gpu/host1x/host1x_client.h
+++ b/drivers/gpu/host1x/bus.h
@@ -1,5 +1,6 @@
1/* 1/*
2 * Copyright (c) 2013, NVIDIA Corporation. 2 * Copyright (C) 2012 Avionic Design GmbH
3 * Copyright (C) 2012-2013, NVIDIA Corporation
3 * 4 *
4 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License, 6 * under the terms and conditions of the GNU General Public License,
@@ -14,22 +15,15 @@
14 * along with this program. If not, see <http://www.gnu.org/licenses/>. 15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */ 16 */
16 17
17#ifndef HOST1X_CLIENT_H 18#ifndef HOST1X_BUS_H
18#define HOST1X_CLIENT_H 19#define HOST1X_BUS_H
19 20
20struct device; 21struct host1x;
21struct platform_device;
22 22
23#ifdef CONFIG_DRM_TEGRA 23int host1x_bus_init(void);
24int host1x_drm_alloc(struct platform_device *pdev); 24void host1x_bus_exit(void);
25#else
26static inline int host1x_drm_alloc(struct platform_device *pdev)
27{
28 return 0;
29}
30#endif
31 25
32void host1x_set_drm_data(struct device *dev, void *data); 26int host1x_register(struct host1x *host1x);
33void *host1x_get_drm_data(struct device *dev); 27int host1x_unregister(struct host1x *host1x);
34 28
35#endif 29#endif
diff --git a/drivers/gpu/host1x/cdma.c b/drivers/gpu/host1x/cdma.c
index de72172d3b5f..3995255b16c7 100644
--- a/drivers/gpu/host1x/cdma.c
+++ b/drivers/gpu/host1x/cdma.c
@@ -20,6 +20,7 @@
20#include <asm/cacheflush.h> 20#include <asm/cacheflush.h>
21#include <linux/device.h> 21#include <linux/device.h>
22#include <linux/dma-mapping.h> 22#include <linux/dma-mapping.h>
23#include <linux/host1x.h>
23#include <linux/interrupt.h> 24#include <linux/interrupt.h>
24#include <linux/kernel.h> 25#include <linux/kernel.h>
25#include <linux/kfifo.h> 26#include <linux/kfifo.h>
@@ -30,7 +31,6 @@
30#include "channel.h" 31#include "channel.h"
31#include "dev.h" 32#include "dev.h"
32#include "debug.h" 33#include "debug.h"
33#include "host1x_bo.h"
34#include "job.h" 34#include "job.h"
35 35
36/* 36/*
diff --git a/drivers/gpu/host1x/channel.h b/drivers/gpu/host1x/channel.h
index 48723b8eea42..df767cf90d51 100644
--- a/drivers/gpu/host1x/channel.h
+++ b/drivers/gpu/host1x/channel.h
@@ -40,12 +40,6 @@ struct host1x_channel {
40/* channel list operations */ 40/* channel list operations */
41int host1x_channel_list_init(struct host1x *host); 41int host1x_channel_list_init(struct host1x *host);
42 42
43struct host1x_channel *host1x_channel_request(struct device *dev);
44void host1x_channel_free(struct host1x_channel *channel);
45struct host1x_channel *host1x_channel_get(struct host1x_channel *channel);
46void host1x_channel_put(struct host1x_channel *channel);
47int host1x_job_submit(struct host1x_job *job);
48
49#define host1x_for_each_channel(host, channel) \ 43#define host1x_for_each_channel(host, channel) \
50 list_for_each_entry(channel, &host->chlist.list, list) 44 list_for_each_entry(channel, &host->chlist.list, list)
51 45
diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c
index 471630299878..80da003d63de 100644
--- a/drivers/gpu/host1x/dev.c
+++ b/drivers/gpu/host1x/dev.c
@@ -27,24 +27,13 @@
27#define CREATE_TRACE_POINTS 27#define CREATE_TRACE_POINTS
28#include <trace/events/host1x.h> 28#include <trace/events/host1x.h>
29 29
30#include "bus.h"
30#include "dev.h" 31#include "dev.h"
31#include "intr.h" 32#include "intr.h"
32#include "channel.h" 33#include "channel.h"
33#include "debug.h" 34#include "debug.h"
34#include "hw/host1x01.h" 35#include "hw/host1x01.h"
35#include "host1x_client.h" 36#include "hw/host1x02.h"
36
37void host1x_set_drm_data(struct device *dev, void *data)
38{
39 struct host1x *host1x = dev_get_drvdata(dev);
40 host1x->drm_data = data;
41}
42
43void *host1x_get_drm_data(struct device *dev)
44{
45 struct host1x *host1x = dev_get_drvdata(dev);
46 return host1x ? host1x->drm_data : NULL;
47}
48 37
49void host1x_sync_writel(struct host1x *host1x, u32 v, u32 r) 38void host1x_sync_writel(struct host1x *host1x, u32 v, u32 r)
50{ 39{
@@ -79,7 +68,17 @@ static const struct host1x_info host1x01_info = {
79 .sync_offset = 0x3000, 68 .sync_offset = 0x3000,
80}; 69};
81 70
71static const struct host1x_info host1x02_info = {
72 .nb_channels = 9,
73 .nb_pts = 32,
74 .nb_mlocks = 16,
75 .nb_bases = 12,
76 .init = host1x02_init,
77 .sync_offset = 0x3000,
78};
79
82static struct of_device_id host1x_of_match[] = { 80static struct of_device_id host1x_of_match[] = {
81 { .compatible = "nvidia,tegra114-host1x", .data = &host1x02_info, },
83 { .compatible = "nvidia,tegra30-host1x", .data = &host1x01_info, }, 82 { .compatible = "nvidia,tegra30-host1x", .data = &host1x01_info, },
84 { .compatible = "nvidia,tegra20-host1x", .data = &host1x01_info, }, 83 { .compatible = "nvidia,tegra20-host1x", .data = &host1x01_info, },
85 { }, 84 { },
@@ -114,6 +113,9 @@ static int host1x_probe(struct platform_device *pdev)
114 if (!host) 113 if (!host)
115 return -ENOMEM; 114 return -ENOMEM;
116 115
116 mutex_init(&host->devices_lock);
117 INIT_LIST_HEAD(&host->devices);
118 INIT_LIST_HEAD(&host->list);
117 host->dev = &pdev->dev; 119 host->dev = &pdev->dev;
118 host->info = id->data; 120 host->info = id->data;
119 121
@@ -152,7 +154,7 @@ static int host1x_probe(struct platform_device *pdev)
152 err = host1x_syncpt_init(host); 154 err = host1x_syncpt_init(host);
153 if (err) { 155 if (err) {
154 dev_err(&pdev->dev, "failed to initialize syncpts\n"); 156 dev_err(&pdev->dev, "failed to initialize syncpts\n");
155 return err; 157 goto fail_unprepare_disable;
156 } 158 }
157 159
158 err = host1x_intr_init(host, syncpt_irq); 160 err = host1x_intr_init(host, syncpt_irq);
@@ -163,19 +165,26 @@ static int host1x_probe(struct platform_device *pdev)
163 165
164 host1x_debug_init(host); 166 host1x_debug_init(host);
165 167
166 host1x_drm_alloc(pdev); 168 err = host1x_register(host);
169 if (err < 0)
170 goto fail_deinit_intr;
167 171
168 return 0; 172 return 0;
169 173
174fail_deinit_intr:
175 host1x_intr_deinit(host);
170fail_deinit_syncpt: 176fail_deinit_syncpt:
171 host1x_syncpt_deinit(host); 177 host1x_syncpt_deinit(host);
178fail_unprepare_disable:
179 clk_disable_unprepare(host->clk);
172 return err; 180 return err;
173} 181}
174 182
175static int __exit host1x_remove(struct platform_device *pdev) 183static int host1x_remove(struct platform_device *pdev)
176{ 184{
177 struct host1x *host = platform_get_drvdata(pdev); 185 struct host1x *host = platform_get_drvdata(pdev);
178 186
187 host1x_unregister(host);
179 host1x_intr_deinit(host); 188 host1x_intr_deinit(host);
180 host1x_syncpt_deinit(host); 189 host1x_syncpt_deinit(host);
181 clk_disable_unprepare(host->clk); 190 clk_disable_unprepare(host->clk);
@@ -184,59 +193,36 @@ static int __exit host1x_remove(struct platform_device *pdev)
184} 193}
185 194
186static struct platform_driver tegra_host1x_driver = { 195static struct platform_driver tegra_host1x_driver = {
187 .probe = host1x_probe,
188 .remove = __exit_p(host1x_remove),
189 .driver = { 196 .driver = {
190 .owner = THIS_MODULE,
191 .name = "tegra-host1x", 197 .name = "tegra-host1x",
192 .of_match_table = host1x_of_match, 198 .of_match_table = host1x_of_match,
193 }, 199 },
200 .probe = host1x_probe,
201 .remove = host1x_remove,
194}; 202};
195 203
196static int __init tegra_host1x_init(void) 204static int __init tegra_host1x_init(void)
197{ 205{
198 int err; 206 int err;
199 207
200 err = platform_driver_register(&tegra_host1x_driver); 208 err = host1x_bus_init();
201 if (err < 0) 209 if (err < 0)
202 return err; 210 return err;
203 211
204#ifdef CONFIG_DRM_TEGRA 212 err = platform_driver_register(&tegra_host1x_driver);
205 err = platform_driver_register(&tegra_dc_driver); 213 if (err < 0) {
206 if (err < 0) 214 host1x_bus_exit();
207 goto unregister_host1x; 215 return err;
208 216 }
209 err = platform_driver_register(&tegra_hdmi_driver);
210 if (err < 0)
211 goto unregister_dc;
212
213 err = platform_driver_register(&tegra_gr2d_driver);
214 if (err < 0)
215 goto unregister_hdmi;
216#endif
217 217
218 return 0; 218 return 0;
219
220#ifdef CONFIG_DRM_TEGRA
221unregister_hdmi:
222 platform_driver_unregister(&tegra_hdmi_driver);
223unregister_dc:
224 platform_driver_unregister(&tegra_dc_driver);
225unregister_host1x:
226 platform_driver_unregister(&tegra_host1x_driver);
227 return err;
228#endif
229} 219}
230module_init(tegra_host1x_init); 220module_init(tegra_host1x_init);
231 221
232static void __exit tegra_host1x_exit(void) 222static void __exit tegra_host1x_exit(void)
233{ 223{
234#ifdef CONFIG_DRM_TEGRA
235 platform_driver_unregister(&tegra_gr2d_driver);
236 platform_driver_unregister(&tegra_hdmi_driver);
237 platform_driver_unregister(&tegra_dc_driver);
238#endif
239 platform_driver_unregister(&tegra_host1x_driver); 224 platform_driver_unregister(&tegra_host1x_driver);
225 host1x_bus_exit();
240} 226}
241module_exit(tegra_host1x_exit); 227module_exit(tegra_host1x_exit);
242 228
diff --git a/drivers/gpu/host1x/dev.h b/drivers/gpu/host1x/dev.h
index bed90a8131be..a61a976e7a42 100644
--- a/drivers/gpu/host1x/dev.h
+++ b/drivers/gpu/host1x/dev.h
@@ -27,6 +27,7 @@
27#include "job.h" 27#include "job.h"
28 28
29struct host1x_syncpt; 29struct host1x_syncpt;
30struct host1x_syncpt_base;
30struct host1x_channel; 31struct host1x_channel;
31struct host1x_cdma; 32struct host1x_cdma;
32struct host1x_job; 33struct host1x_job;
@@ -102,6 +103,7 @@ struct host1x {
102 103
103 void __iomem *regs; 104 void __iomem *regs;
104 struct host1x_syncpt *syncpt; 105 struct host1x_syncpt *syncpt;
106 struct host1x_syncpt_base *bases;
105 struct device *dev; 107 struct device *dev;
106 struct clk *clk; 108 struct clk *clk;
107 109
@@ -125,7 +127,10 @@ struct host1x {
125 127
126 struct dentry *debugfs; 128 struct dentry *debugfs;
127 129
128 void *drm_data; 130 struct mutex devices_lock;
131 struct list_head devices;
132
133 struct list_head list;
129}; 134};
130 135
131void host1x_sync_writel(struct host1x *host1x, u32 r, u32 v); 136void host1x_sync_writel(struct host1x *host1x, u32 r, u32 v);
@@ -301,8 +306,4 @@ static inline void host1x_hw_show_mlocks(struct host1x *host, struct output *o)
301 host->debug_op->show_mlocks(host, o); 306 host->debug_op->show_mlocks(host, o);
302} 307}
303 308
304extern struct platform_driver tegra_dc_driver;
305extern struct platform_driver tegra_hdmi_driver;
306extern struct platform_driver tegra_gr2d_driver;
307
308#endif 309#endif
diff --git a/drivers/gpu/host1x/drm/gr2d.c b/drivers/gpu/host1x/drm/gr2d.c
deleted file mode 100644
index 27ffcf15a4b4..000000000000
--- a/drivers/gpu/host1x/drm/gr2d.c
+++ /dev/null
@@ -1,343 +0,0 @@
1/*
2 * drivers/video/tegra/host/gr2d/gr2d.c
3 *
4 * Tegra Graphics 2D
5 *
6 * Copyright (c) 2012-2013, NVIDIA Corporation.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 */
20
21#include <linux/export.h>
22#include <linux/of.h>
23#include <linux/of_device.h>
24#include <linux/clk.h>
25
26#include "channel.h"
27#include "drm.h"
28#include "gem.h"
29#include "job.h"
30#include "host1x.h"
31#include "host1x_bo.h"
32#include "host1x_client.h"
33#include "syncpt.h"
34
35struct gr2d {
36 struct host1x_client client;
37 struct clk *clk;
38 struct host1x_channel *channel;
39 unsigned long *addr_regs;
40};
41
42static inline struct gr2d *to_gr2d(struct host1x_client *client)
43{
44 return container_of(client, struct gr2d, client);
45}
46
47static int gr2d_is_addr_reg(struct device *dev, u32 class, u32 reg);
48
49static int gr2d_client_init(struct host1x_client *client,
50 struct drm_device *drm)
51{
52 return 0;
53}
54
55static int gr2d_client_exit(struct host1x_client *client)
56{
57 return 0;
58}
59
60static int gr2d_open_channel(struct host1x_client *client,
61 struct host1x_drm_context *context)
62{
63 struct gr2d *gr2d = to_gr2d(client);
64
65 context->channel = host1x_channel_get(gr2d->channel);
66
67 if (!context->channel)
68 return -ENOMEM;
69
70 return 0;
71}
72
73static void gr2d_close_channel(struct host1x_drm_context *context)
74{
75 host1x_channel_put(context->channel);
76}
77
78static struct host1x_bo *host1x_bo_lookup(struct drm_device *drm,
79 struct drm_file *file,
80 u32 handle)
81{
82 struct drm_gem_object *gem;
83 struct tegra_bo *bo;
84
85 gem = drm_gem_object_lookup(drm, file, handle);
86 if (!gem)
87 return NULL;
88
89 mutex_lock(&drm->struct_mutex);
90 drm_gem_object_unreference(gem);
91 mutex_unlock(&drm->struct_mutex);
92
93 bo = to_tegra_bo(gem);
94 return &bo->base;
95}
96
97static int gr2d_submit(struct host1x_drm_context *context,
98 struct drm_tegra_submit *args, struct drm_device *drm,
99 struct drm_file *file)
100{
101 struct host1x_job *job;
102 unsigned int num_cmdbufs = args->num_cmdbufs;
103 unsigned int num_relocs = args->num_relocs;
104 unsigned int num_waitchks = args->num_waitchks;
105 struct drm_tegra_cmdbuf __user *cmdbufs =
106 (void * __user)(uintptr_t)args->cmdbufs;
107 struct drm_tegra_reloc __user *relocs =
108 (void * __user)(uintptr_t)args->relocs;
109 struct drm_tegra_waitchk __user *waitchks =
110 (void * __user)(uintptr_t)args->waitchks;
111 struct drm_tegra_syncpt syncpt;
112 int err;
113
114 /* We don't yet support other than one syncpt_incr struct per submit */
115 if (args->num_syncpts != 1)
116 return -EINVAL;
117
118 job = host1x_job_alloc(context->channel, args->num_cmdbufs,
119 args->num_relocs, args->num_waitchks);
120 if (!job)
121 return -ENOMEM;
122
123 job->num_relocs = args->num_relocs;
124 job->num_waitchk = args->num_waitchks;
125 job->client = (u32)args->context;
126 job->class = context->client->class;
127 job->serialize = true;
128
129 while (num_cmdbufs) {
130 struct drm_tegra_cmdbuf cmdbuf;
131 struct host1x_bo *bo;
132
133 err = copy_from_user(&cmdbuf, cmdbufs, sizeof(cmdbuf));
134 if (err)
135 goto fail;
136
137 bo = host1x_bo_lookup(drm, file, cmdbuf.handle);
138 if (!bo) {
139 err = -ENOENT;
140 goto fail;
141 }
142
143 host1x_job_add_gather(job, bo, cmdbuf.words, cmdbuf.offset);
144 num_cmdbufs--;
145 cmdbufs++;
146 }
147
148 err = copy_from_user(job->relocarray, relocs,
149 sizeof(*relocs) * num_relocs);
150 if (err)
151 goto fail;
152
153 while (num_relocs--) {
154 struct host1x_reloc *reloc = &job->relocarray[num_relocs];
155 struct host1x_bo *cmdbuf, *target;
156
157 cmdbuf = host1x_bo_lookup(drm, file, (u32)reloc->cmdbuf);
158 target = host1x_bo_lookup(drm, file, (u32)reloc->target);
159
160 reloc->cmdbuf = cmdbuf;
161 reloc->target = target;
162
163 if (!reloc->target || !reloc->cmdbuf) {
164 err = -ENOENT;
165 goto fail;
166 }
167 }
168
169 err = copy_from_user(job->waitchk, waitchks,
170 sizeof(*waitchks) * num_waitchks);
171 if (err)
172 goto fail;
173
174 err = copy_from_user(&syncpt, (void * __user)(uintptr_t)args->syncpts,
175 sizeof(syncpt));
176 if (err)
177 goto fail;
178
179 job->syncpt_id = syncpt.id;
180 job->syncpt_incrs = syncpt.incrs;
181 job->timeout = 10000;
182 job->is_addr_reg = gr2d_is_addr_reg;
183
184 if (args->timeout && args->timeout < 10000)
185 job->timeout = args->timeout;
186
187 err = host1x_job_pin(job, context->client->dev);
188 if (err)
189 goto fail;
190
191 err = host1x_job_submit(job);
192 if (err)
193 goto fail_submit;
194
195 args->fence = job->syncpt_end;
196
197 host1x_job_put(job);
198 return 0;
199
200fail_submit:
201 host1x_job_unpin(job);
202fail:
203 host1x_job_put(job);
204 return err;
205}
206
207static struct host1x_client_ops gr2d_client_ops = {
208 .drm_init = gr2d_client_init,
209 .drm_exit = gr2d_client_exit,
210 .open_channel = gr2d_open_channel,
211 .close_channel = gr2d_close_channel,
212 .submit = gr2d_submit,
213};
214
215static void gr2d_init_addr_reg_map(struct device *dev, struct gr2d *gr2d)
216{
217 const u32 gr2d_addr_regs[] = {0x1a, 0x1b, 0x26, 0x2b, 0x2c, 0x2d, 0x31,
218 0x32, 0x48, 0x49, 0x4a, 0x4b, 0x4c};
219 unsigned long *bitmap;
220 int i;
221
222 bitmap = devm_kzalloc(dev, DIV_ROUND_UP(256, BITS_PER_BYTE),
223 GFP_KERNEL);
224
225 for (i = 0; i < ARRAY_SIZE(gr2d_addr_regs); ++i) {
226 u32 reg = gr2d_addr_regs[i];
227 bitmap[BIT_WORD(reg)] |= BIT_MASK(reg);
228 }
229
230 gr2d->addr_regs = bitmap;
231}
232
233static int gr2d_is_addr_reg(struct device *dev, u32 class, u32 reg)
234{
235 struct gr2d *gr2d = dev_get_drvdata(dev);
236
237 switch (class) {
238 case HOST1X_CLASS_HOST1X:
239 return reg == 0x2b;
240 case HOST1X_CLASS_GR2D:
241 case HOST1X_CLASS_GR2D_SB:
242 reg &= 0xff;
243 if (gr2d->addr_regs[BIT_WORD(reg)] & BIT_MASK(reg))
244 return 1;
245 default:
246 return 0;
247 }
248}
249
250static const struct of_device_id gr2d_match[] = {
251 { .compatible = "nvidia,tegra30-gr2d" },
252 { .compatible = "nvidia,tegra20-gr2d" },
253 { },
254};
255
256static int gr2d_probe(struct platform_device *pdev)
257{
258 struct device *dev = &pdev->dev;
259 struct host1x_drm *host1x = host1x_get_drm_data(dev->parent);
260 int err;
261 struct gr2d *gr2d = NULL;
262 struct host1x_syncpt **syncpts;
263
264 gr2d = devm_kzalloc(dev, sizeof(*gr2d), GFP_KERNEL);
265 if (!gr2d)
266 return -ENOMEM;
267
268 syncpts = devm_kzalloc(dev, sizeof(*syncpts), GFP_KERNEL);
269 if (!syncpts)
270 return -ENOMEM;
271
272 gr2d->clk = devm_clk_get(dev, NULL);
273 if (IS_ERR(gr2d->clk)) {
274 dev_err(dev, "cannot get clock\n");
275 return PTR_ERR(gr2d->clk);
276 }
277
278 err = clk_prepare_enable(gr2d->clk);
279 if (err) {
280 dev_err(dev, "cannot turn on clock\n");
281 return err;
282 }
283
284 gr2d->channel = host1x_channel_request(dev);
285 if (!gr2d->channel)
286 return -ENOMEM;
287
288 *syncpts = host1x_syncpt_request(dev, false);
289 if (!(*syncpts)) {
290 host1x_channel_free(gr2d->channel);
291 return -ENOMEM;
292 }
293
294 gr2d->client.ops = &gr2d_client_ops;
295 gr2d->client.dev = dev;
296 gr2d->client.class = HOST1X_CLASS_GR2D;
297 gr2d->client.syncpts = syncpts;
298 gr2d->client.num_syncpts = 1;
299
300 err = host1x_register_client(host1x, &gr2d->client);
301 if (err < 0) {
302 dev_err(dev, "failed to register host1x client: %d\n", err);
303 return err;
304 }
305
306 gr2d_init_addr_reg_map(dev, gr2d);
307
308 platform_set_drvdata(pdev, gr2d);
309
310 return 0;
311}
312
313static int __exit gr2d_remove(struct platform_device *pdev)
314{
315 struct host1x_drm *host1x = host1x_get_drm_data(pdev->dev.parent);
316 struct gr2d *gr2d = platform_get_drvdata(pdev);
317 unsigned int i;
318 int err;
319
320 err = host1x_unregister_client(host1x, &gr2d->client);
321 if (err < 0) {
322 dev_err(&pdev->dev, "failed to unregister client: %d\n", err);
323 return err;
324 }
325
326 for (i = 0; i < gr2d->client.num_syncpts; i++)
327 host1x_syncpt_free(gr2d->client.syncpts[i]);
328
329 host1x_channel_free(gr2d->channel);
330 clk_disable_unprepare(gr2d->clk);
331
332 return 0;
333}
334
335struct platform_driver tegra_gr2d_driver = {
336 .probe = gr2d_probe,
337 .remove = __exit_p(gr2d_remove),
338 .driver = {
339 .owner = THIS_MODULE,
340 .name = "gr2d",
341 .of_match_table = gr2d_match,
342 }
343};
diff --git a/drivers/gpu/host1x/host1x.h b/drivers/gpu/host1x/host1x.h
deleted file mode 100644
index a2bc1e65e972..000000000000
--- a/drivers/gpu/host1x/host1x.h
+++ /dev/null
@@ -1,30 +0,0 @@
1/*
2 * Tegra host1x driver
3 *
4 * Copyright (c) 2009-2013, NVIDIA Corporation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program; if not, write to the Free Software Foundation, Inc.,
18 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
19 */
20
21#ifndef __LINUX_HOST1X_H
22#define __LINUX_HOST1X_H
23
24enum host1x_class {
25 HOST1X_CLASS_HOST1X = 0x1,
26 HOST1X_CLASS_GR2D = 0x51,
27 HOST1X_CLASS_GR2D_SB = 0x52
28};
29
30#endif
diff --git a/drivers/gpu/host1x/host1x_bo.h b/drivers/gpu/host1x/host1x_bo.h
deleted file mode 100644
index 4c1f10bd773d..000000000000
--- a/drivers/gpu/host1x/host1x_bo.h
+++ /dev/null
@@ -1,87 +0,0 @@
1/*
2 * Tegra host1x Memory Management Abstraction header
3 *
4 * Copyright (c) 2012-2013, NVIDIA Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19#ifndef _HOST1X_BO_H
20#define _HOST1X_BO_H
21
22struct host1x_bo;
23
24struct host1x_bo_ops {
25 struct host1x_bo *(*get)(struct host1x_bo *bo);
26 void (*put)(struct host1x_bo *bo);
27 dma_addr_t (*pin)(struct host1x_bo *bo, struct sg_table **sgt);
28 void (*unpin)(struct host1x_bo *bo, struct sg_table *sgt);
29 void *(*mmap)(struct host1x_bo *bo);
30 void (*munmap)(struct host1x_bo *bo, void *addr);
31 void *(*kmap)(struct host1x_bo *bo, unsigned int pagenum);
32 void (*kunmap)(struct host1x_bo *bo, unsigned int pagenum, void *addr);
33};
34
35struct host1x_bo {
36 const struct host1x_bo_ops *ops;
37};
38
39static inline void host1x_bo_init(struct host1x_bo *bo,
40 const struct host1x_bo_ops *ops)
41{
42 bo->ops = ops;
43}
44
45static inline struct host1x_bo *host1x_bo_get(struct host1x_bo *bo)
46{
47 return bo->ops->get(bo);
48}
49
50static inline void host1x_bo_put(struct host1x_bo *bo)
51{
52 bo->ops->put(bo);
53}
54
55static inline dma_addr_t host1x_bo_pin(struct host1x_bo *bo,
56 struct sg_table **sgt)
57{
58 return bo->ops->pin(bo, sgt);
59}
60
61static inline void host1x_bo_unpin(struct host1x_bo *bo, struct sg_table *sgt)
62{
63 bo->ops->unpin(bo, sgt);
64}
65
66static inline void *host1x_bo_mmap(struct host1x_bo *bo)
67{
68 return bo->ops->mmap(bo);
69}
70
71static inline void host1x_bo_munmap(struct host1x_bo *bo, void *addr)
72{
73 bo->ops->munmap(bo, addr);
74}
75
76static inline void *host1x_bo_kmap(struct host1x_bo *bo, unsigned int pagenum)
77{
78 return bo->ops->kmap(bo, pagenum);
79}
80
81static inline void host1x_bo_kunmap(struct host1x_bo *bo,
82 unsigned int pagenum, void *addr)
83{
84 bo->ops->kunmap(bo, pagenum, addr);
85}
86
87#endif
diff --git a/drivers/gpu/host1x/hw/Makefile b/drivers/gpu/host1x/hw/Makefile
deleted file mode 100644
index 9b50863a2236..000000000000
--- a/drivers/gpu/host1x/hw/Makefile
+++ /dev/null
@@ -1,6 +0,0 @@
1ccflags-y = -Idrivers/gpu/host1x
2
3host1x-hw-objs = \
4 host1x01.o
5
6obj-$(CONFIG_TEGRA_HOST1X) += host1x-hw.o
diff --git a/drivers/gpu/host1x/hw/cdma_hw.c b/drivers/gpu/host1x/hw/cdma_hw.c
index 2ee4ad55c4db..37e2a63241a9 100644
--- a/drivers/gpu/host1x/hw/cdma_hw.c
+++ b/drivers/gpu/host1x/hw/cdma_hw.c
@@ -20,10 +20,10 @@
20#include <linux/scatterlist.h> 20#include <linux/scatterlist.h>
21#include <linux/dma-mapping.h> 21#include <linux/dma-mapping.h>
22 22
23#include "cdma.h" 23#include "../cdma.h"
24#include "channel.h" 24#include "../channel.h"
25#include "dev.h" 25#include "../dev.h"
26#include "debug.h" 26#include "../debug.h"
27 27
28/* 28/*
29 * Put the restart at the end of pushbuffer memor 29 * Put the restart at the end of pushbuffer memor
diff --git a/drivers/gpu/host1x/hw/channel_hw.c b/drivers/gpu/host1x/hw/channel_hw.c
index ee199623e365..4608257ab656 100644
--- a/drivers/gpu/host1x/hw/channel_hw.c
+++ b/drivers/gpu/host1x/hw/channel_hw.c
@@ -16,15 +16,15 @@
16 * along with this program. If not, see <http://www.gnu.org/licenses/>. 16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */ 17 */
18 18
19#include <linux/host1x.h>
19#include <linux/slab.h> 20#include <linux/slab.h>
21
20#include <trace/events/host1x.h> 22#include <trace/events/host1x.h>
21 23
22#include "host1x.h" 24#include "../channel.h"
23#include "host1x_bo.h" 25#include "../dev.h"
24#include "channel.h" 26#include "../intr.h"
25#include "dev.h" 27#include "../job.h"
26#include "intr.h"
27#include "job.h"
28 28
29#define HOST1X_CHANNEL_SIZE 16384 29#define HOST1X_CHANNEL_SIZE 16384
30#define TRACE_MAX_LENGTH 128U 30#define TRACE_MAX_LENGTH 128U
@@ -67,6 +67,22 @@ static void submit_gathers(struct host1x_job *job)
67 } 67 }
68} 68}
69 69
70static inline void synchronize_syncpt_base(struct host1x_job *job)
71{
72 struct host1x *host = dev_get_drvdata(job->channel->dev->parent);
73 struct host1x_syncpt *sp = host->syncpt + job->syncpt_id;
74 u32 id, value;
75
76 value = host1x_syncpt_read_max(sp);
77 id = sp->base->id;
78
79 host1x_cdma_push(&job->channel->cdma,
80 host1x_opcode_setclass(HOST1X_CLASS_HOST1X,
81 HOST1X_UCLASS_LOAD_SYNCPT_BASE, 1),
82 HOST1X_UCLASS_LOAD_SYNCPT_BASE_BASE_INDX_F(id) |
83 HOST1X_UCLASS_LOAD_SYNCPT_BASE_VALUE_F(value));
84}
85
70static int channel_submit(struct host1x_job *job) 86static int channel_submit(struct host1x_job *job)
71{ 87{
72 struct host1x_channel *ch = job->channel; 88 struct host1x_channel *ch = job->channel;
@@ -118,6 +134,10 @@ static int channel_submit(struct host1x_job *job)
118 host1x_syncpt_read_max(sp))); 134 host1x_syncpt_read_max(sp)));
119 } 135 }
120 136
137 /* Synchronize base register to allow using it for relative waiting */
138 if (sp->base)
139 synchronize_syncpt_base(job);
140
121 syncval = host1x_syncpt_incr_max(sp, user_syncpt_incrs); 141 syncval = host1x_syncpt_incr_max(sp, user_syncpt_incrs);
122 142
123 job->syncpt_end = syncval; 143 job->syncpt_end = syncval;
diff --git a/drivers/gpu/host1x/hw/debug_hw.c b/drivers/gpu/host1x/hw/debug_hw.c
index 334c038052f5..640c75ca5a8b 100644
--- a/drivers/gpu/host1x/hw/debug_hw.c
+++ b/drivers/gpu/host1x/hw/debug_hw.c
@@ -15,18 +15,10 @@
15 * 15 *
16 */ 16 */
17 17
18#include <linux/debugfs.h> 18#include "../dev.h"
19#include <linux/seq_file.h> 19#include "../debug.h"
20#include <linux/mm.h> 20#include "../cdma.h"
21#include <linux/scatterlist.h> 21#include "../channel.h"
22
23#include <linux/io.h>
24
25#include "dev.h"
26#include "debug.h"
27#include "cdma.h"
28#include "channel.h"
29#include "host1x_bo.h"
30 22
31#define HOST1X_DEBUG_MAX_PAGE_OFFSET 102400 23#define HOST1X_DEBUG_MAX_PAGE_OFFSET 102400
32 24
diff --git a/drivers/gpu/host1x/hw/host1x01.c b/drivers/gpu/host1x/hw/host1x01.c
index a14e91cd1e58..859b73beb4d0 100644
--- a/drivers/gpu/host1x/hw/host1x01.c
+++ b/drivers/gpu/host1x/hw/host1x01.c
@@ -17,17 +17,17 @@
17 */ 17 */
18 18
19/* include hw specification */ 19/* include hw specification */
20#include "hw/host1x01.h" 20#include "host1x01.h"
21#include "hw/host1x01_hardware.h" 21#include "host1x01_hardware.h"
22 22
23/* include code */ 23/* include code */
24#include "hw/cdma_hw.c" 24#include "cdma_hw.c"
25#include "hw/channel_hw.c" 25#include "channel_hw.c"
26#include "hw/debug_hw.c" 26#include "debug_hw.c"
27#include "hw/intr_hw.c" 27#include "intr_hw.c"
28#include "hw/syncpt_hw.c" 28#include "syncpt_hw.c"
29 29
30#include "dev.h" 30#include "../dev.h"
31 31
32int host1x01_init(struct host1x *host) 32int host1x01_init(struct host1x *host)
33{ 33{
diff --git a/drivers/gpu/host1x/hw/host1x02.c b/drivers/gpu/host1x/hw/host1x02.c
new file mode 100644
index 000000000000..e98caca0ca42
--- /dev/null
+++ b/drivers/gpu/host1x/hw/host1x02.c
@@ -0,0 +1,42 @@
1/*
2 * Host1x init for Tegra114 SoCs
3 *
4 * Copyright (c) 2013 NVIDIA Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19/* include hw specification */
20#include "host1x01.h"
21#include "host1x01_hardware.h"
22
23/* include code */
24#include "cdma_hw.c"
25#include "channel_hw.c"
26#include "debug_hw.c"
27#include "intr_hw.c"
28#include "syncpt_hw.c"
29
30#include "../dev.h"
31
32int host1x02_init(struct host1x *host)
33{
34 host->channel_op = &host1x_channel_ops;
35 host->cdma_op = &host1x_cdma_ops;
36 host->cdma_pb_op = &host1x_pushbuffer_ops;
37 host->syncpt_op = &host1x_syncpt_ops;
38 host->intr_op = &host1x_intr_ops;
39 host->debug_op = &host1x_debug_ops;
40
41 return 0;
42}
diff --git a/drivers/gpu/host1x/hw/host1x02.h b/drivers/gpu/host1x/hw/host1x02.h
new file mode 100644
index 000000000000..f7486609a90e
--- /dev/null
+++ b/drivers/gpu/host1x/hw/host1x02.h
@@ -0,0 +1,26 @@
1/*
2 * Host1x init for Tegra114 SoCs
3 *
4 * Copyright (c) 2013 NVIDIA Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19#ifndef HOST1X_HOST1X02_H
20#define HOST1X_HOST1X02_H
21
22struct host1x;
23
24int host1x02_init(struct host1x *host);
25
26#endif
diff --git a/drivers/gpu/host1x/hw/hw_host1x01_uclass.h b/drivers/gpu/host1x/hw/hw_host1x01_uclass.h
index 42f3ce19ca32..f7553599ee27 100644
--- a/drivers/gpu/host1x/hw/hw_host1x01_uclass.h
+++ b/drivers/gpu/host1x/hw/hw_host1x01_uclass.h
@@ -111,6 +111,12 @@ static inline u32 host1x_uclass_wait_syncpt_base_offset_f(u32 v)
111} 111}
112#define HOST1X_UCLASS_WAIT_SYNCPT_BASE_OFFSET_F(v) \ 112#define HOST1X_UCLASS_WAIT_SYNCPT_BASE_OFFSET_F(v) \
113 host1x_uclass_wait_syncpt_base_offset_f(v) 113 host1x_uclass_wait_syncpt_base_offset_f(v)
114static inline u32 host1x_uclass_load_syncpt_base_r(void)
115{
116 return 0xb;
117}
118#define HOST1X_UCLASS_LOAD_SYNCPT_BASE \
119 host1x_uclass_load_syncpt_base_r()
114static inline u32 host1x_uclass_load_syncpt_base_base_indx_f(u32 v) 120static inline u32 host1x_uclass_load_syncpt_base_base_indx_f(u32 v)
115{ 121{
116 return (v & 0xff) << 24; 122 return (v & 0xff) << 24;
diff --git a/drivers/gpu/host1x/hw/hw_host1x02_channel.h b/drivers/gpu/host1x/hw/hw_host1x02_channel.h
new file mode 100644
index 000000000000..e490bcde33fe
--- /dev/null
+++ b/drivers/gpu/host1x/hw/hw_host1x02_channel.h
@@ -0,0 +1,121 @@
1/*
2 * Copyright (c) 2013 NVIDIA Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 *
16 */
17
18 /*
19 * Function naming determines intended use:
20 *
21 * <x>_r(void) : Returns the offset for register <x>.
22 *
23 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
24 *
25 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
26 *
27 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
28 * and masked to place it at field <y> of register <x>. This value
29 * can be |'d with others to produce a full register value for
30 * register <x>.
31 *
32 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
33 * value can be ~'d and then &'d to clear the value of field <y> for
34 * register <x>.
35 *
36 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
37 * to place it at field <y> of register <x>. This value can be |'d
38 * with others to produce a full register value for <x>.
39 *
40 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
41 * <x> value 'r' after being shifted to place its LSB at bit 0.
42 * This value is suitable for direct comparison with other unshifted
43 * values appropriate for use in field <y> of register <x>.
44 *
45 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
46 * field <y> of register <x>. This value is suitable for direct
47 * comparison with unshifted values appropriate for use in field <y>
48 * of register <x>.
49 */
50
51#ifndef HOST1X_HW_HOST1X02_CHANNEL_H
52#define HOST1X_HW_HOST1X02_CHANNEL_H
53
54static inline u32 host1x_channel_fifostat_r(void)
55{
56 return 0x0;
57}
58#define HOST1X_CHANNEL_FIFOSTAT \
59 host1x_channel_fifostat_r()
60static inline u32 host1x_channel_fifostat_cfempty_v(u32 r)
61{
62 return (r >> 11) & 0x1;
63}
64#define HOST1X_CHANNEL_FIFOSTAT_CFEMPTY_V(r) \
65 host1x_channel_fifostat_cfempty_v(r)
66static inline u32 host1x_channel_dmastart_r(void)
67{
68 return 0x14;
69}
70#define HOST1X_CHANNEL_DMASTART \
71 host1x_channel_dmastart_r()
72static inline u32 host1x_channel_dmaput_r(void)
73{
74 return 0x18;
75}
76#define HOST1X_CHANNEL_DMAPUT \
77 host1x_channel_dmaput_r()
78static inline u32 host1x_channel_dmaget_r(void)
79{
80 return 0x1c;
81}
82#define HOST1X_CHANNEL_DMAGET \
83 host1x_channel_dmaget_r()
84static inline u32 host1x_channel_dmaend_r(void)
85{
86 return 0x20;
87}
88#define HOST1X_CHANNEL_DMAEND \
89 host1x_channel_dmaend_r()
90static inline u32 host1x_channel_dmactrl_r(void)
91{
92 return 0x24;
93}
94#define HOST1X_CHANNEL_DMACTRL \
95 host1x_channel_dmactrl_r()
96static inline u32 host1x_channel_dmactrl_dmastop(void)
97{
98 return 1 << 0;
99}
100#define HOST1X_CHANNEL_DMACTRL_DMASTOP \
101 host1x_channel_dmactrl_dmastop()
102static inline u32 host1x_channel_dmactrl_dmastop_v(u32 r)
103{
104 return (r >> 0) & 0x1;
105}
106#define HOST1X_CHANNEL_DMACTRL_DMASTOP_V(r) \
107 host1x_channel_dmactrl_dmastop_v(r)
108static inline u32 host1x_channel_dmactrl_dmagetrst(void)
109{
110 return 1 << 1;
111}
112#define HOST1X_CHANNEL_DMACTRL_DMAGETRST \
113 host1x_channel_dmactrl_dmagetrst()
114static inline u32 host1x_channel_dmactrl_dmainitget(void)
115{
116 return 1 << 2;
117}
118#define HOST1X_CHANNEL_DMACTRL_DMAINITGET \
119 host1x_channel_dmactrl_dmainitget()
120
121#endif
diff --git a/drivers/gpu/host1x/hw/hw_host1x02_sync.h b/drivers/gpu/host1x/hw/hw_host1x02_sync.h
new file mode 100644
index 000000000000..4495401525e8
--- /dev/null
+++ b/drivers/gpu/host1x/hw/hw_host1x02_sync.h
@@ -0,0 +1,243 @@
1/*
2 * Copyright (c) 2013 NVIDIA Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 *
16 */
17
18 /*
19 * Function naming determines intended use:
20 *
21 * <x>_r(void) : Returns the offset for register <x>.
22 *
23 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
24 *
25 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
26 *
27 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
28 * and masked to place it at field <y> of register <x>. This value
29 * can be |'d with others to produce a full register value for
30 * register <x>.
31 *
32 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
33 * value can be ~'d and then &'d to clear the value of field <y> for
34 * register <x>.
35 *
36 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
37 * to place it at field <y> of register <x>. This value can be |'d
38 * with others to produce a full register value for <x>.
39 *
40 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
41 * <x> value 'r' after being shifted to place its LSB at bit 0.
42 * This value is suitable for direct comparison with other unshifted
43 * values appropriate for use in field <y> of register <x>.
44 *
45 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
46 * field <y> of register <x>. This value is suitable for direct
47 * comparison with unshifted values appropriate for use in field <y>
48 * of register <x>.
49 */
50
51#ifndef HOST1X_HW_HOST1X02_SYNC_H
52#define HOST1X_HW_HOST1X02_SYNC_H
53
54#define REGISTER_STRIDE 4
55
56static inline u32 host1x_sync_syncpt_r(unsigned int id)
57{
58 return 0x400 + id * REGISTER_STRIDE;
59}
60#define HOST1X_SYNC_SYNCPT(id) \
61 host1x_sync_syncpt_r(id)
62static inline u32 host1x_sync_syncpt_thresh_cpu0_int_status_r(unsigned int id)
63{
64 return 0x40 + id * REGISTER_STRIDE;
65}
66#define HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(id) \
67 host1x_sync_syncpt_thresh_cpu0_int_status_r(id)
68static inline u32 host1x_sync_syncpt_thresh_int_disable_r(unsigned int id)
69{
70 return 0x60 + id * REGISTER_STRIDE;
71}
72#define HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE(id) \
73 host1x_sync_syncpt_thresh_int_disable_r(id)
74static inline u32 host1x_sync_syncpt_thresh_int_enable_cpu0_r(unsigned int id)
75{
76 return 0x68 + id * REGISTER_STRIDE;
77}
78#define HOST1X_SYNC_SYNCPT_THRESH_INT_ENABLE_CPU0(id) \
79 host1x_sync_syncpt_thresh_int_enable_cpu0_r(id)
80static inline u32 host1x_sync_cf_setup_r(unsigned int channel)
81{
82 return 0x80 + channel * REGISTER_STRIDE;
83}
84#define HOST1X_SYNC_CF_SETUP(channel) \
85 host1x_sync_cf_setup_r(channel)
86static inline u32 host1x_sync_cf_setup_base_v(u32 r)
87{
88 return (r >> 0) & 0x3ff;
89}
90#define HOST1X_SYNC_CF_SETUP_BASE_V(r) \
91 host1x_sync_cf_setup_base_v(r)
92static inline u32 host1x_sync_cf_setup_limit_v(u32 r)
93{
94 return (r >> 16) & 0x3ff;
95}
96#define HOST1X_SYNC_CF_SETUP_LIMIT_V(r) \
97 host1x_sync_cf_setup_limit_v(r)
98static inline u32 host1x_sync_cmdproc_stop_r(void)
99{
100 return 0xac;
101}
102#define HOST1X_SYNC_CMDPROC_STOP \
103 host1x_sync_cmdproc_stop_r()
104static inline u32 host1x_sync_ch_teardown_r(void)
105{
106 return 0xb0;
107}
108#define HOST1X_SYNC_CH_TEARDOWN \
109 host1x_sync_ch_teardown_r()
110static inline u32 host1x_sync_usec_clk_r(void)
111{
112 return 0x1a4;
113}
114#define HOST1X_SYNC_USEC_CLK \
115 host1x_sync_usec_clk_r()
116static inline u32 host1x_sync_ctxsw_timeout_cfg_r(void)
117{
118 return 0x1a8;
119}
120#define HOST1X_SYNC_CTXSW_TIMEOUT_CFG \
121 host1x_sync_ctxsw_timeout_cfg_r()
122static inline u32 host1x_sync_ip_busy_timeout_r(void)
123{
124 return 0x1bc;
125}
126#define HOST1X_SYNC_IP_BUSY_TIMEOUT \
127 host1x_sync_ip_busy_timeout_r()
128static inline u32 host1x_sync_mlock_owner_r(unsigned int id)
129{
130 return 0x340 + id * REGISTER_STRIDE;
131}
132#define HOST1X_SYNC_MLOCK_OWNER(id) \
133 host1x_sync_mlock_owner_r(id)
134static inline u32 host1x_sync_mlock_owner_chid_f(u32 v)
135{
136 return (v & 0xf) << 8;
137}
138#define HOST1X_SYNC_MLOCK_OWNER_CHID_F(v) \
139 host1x_sync_mlock_owner_chid_f(v)
140static inline u32 host1x_sync_mlock_owner_cpu_owns_v(u32 r)
141{
142 return (r >> 1) & 0x1;
143}
144#define HOST1X_SYNC_MLOCK_OWNER_CPU_OWNS_V(r) \
145 host1x_sync_mlock_owner_cpu_owns_v(r)
146static inline u32 host1x_sync_mlock_owner_ch_owns_v(u32 r)
147{
148 return (r >> 0) & 0x1;
149}
150#define HOST1X_SYNC_MLOCK_OWNER_CH_OWNS_V(r) \
151 host1x_sync_mlock_owner_ch_owns_v(r)
152static inline u32 host1x_sync_syncpt_int_thresh_r(unsigned int id)
153{
154 return 0x500 + id * REGISTER_STRIDE;
155}
156#define HOST1X_SYNC_SYNCPT_INT_THRESH(id) \
157 host1x_sync_syncpt_int_thresh_r(id)
158static inline u32 host1x_sync_syncpt_base_r(unsigned int id)
159{
160 return 0x600 + id * REGISTER_STRIDE;
161}
162#define HOST1X_SYNC_SYNCPT_BASE(id) \
163 host1x_sync_syncpt_base_r(id)
164static inline u32 host1x_sync_syncpt_cpu_incr_r(unsigned int id)
165{
166 return 0x700 + id * REGISTER_STRIDE;
167}
168#define HOST1X_SYNC_SYNCPT_CPU_INCR(id) \
169 host1x_sync_syncpt_cpu_incr_r(id)
170static inline u32 host1x_sync_cbread_r(unsigned int channel)
171{
172 return 0x720 + channel * REGISTER_STRIDE;
173}
174#define HOST1X_SYNC_CBREAD(channel) \
175 host1x_sync_cbread_r(channel)
176static inline u32 host1x_sync_cfpeek_ctrl_r(void)
177{
178 return 0x74c;
179}
180#define HOST1X_SYNC_CFPEEK_CTRL \
181 host1x_sync_cfpeek_ctrl_r()
182static inline u32 host1x_sync_cfpeek_ctrl_addr_f(u32 v)
183{
184 return (v & 0x3ff) << 0;
185}
186#define HOST1X_SYNC_CFPEEK_CTRL_ADDR_F(v) \
187 host1x_sync_cfpeek_ctrl_addr_f(v)
188static inline u32 host1x_sync_cfpeek_ctrl_channr_f(u32 v)
189{
190 return (v & 0xf) << 16;
191}
192#define HOST1X_SYNC_CFPEEK_CTRL_CHANNR_F(v) \
193 host1x_sync_cfpeek_ctrl_channr_f(v)
194static inline u32 host1x_sync_cfpeek_ctrl_ena_f(u32 v)
195{
196 return (v & 0x1) << 31;
197}
198#define HOST1X_SYNC_CFPEEK_CTRL_ENA_F(v) \
199 host1x_sync_cfpeek_ctrl_ena_f(v)
200static inline u32 host1x_sync_cfpeek_read_r(void)
201{
202 return 0x750;
203}
204#define HOST1X_SYNC_CFPEEK_READ \
205 host1x_sync_cfpeek_read_r()
206static inline u32 host1x_sync_cfpeek_ptrs_r(void)
207{
208 return 0x754;
209}
210#define HOST1X_SYNC_CFPEEK_PTRS \
211 host1x_sync_cfpeek_ptrs_r()
212static inline u32 host1x_sync_cfpeek_ptrs_cf_rd_ptr_v(u32 r)
213{
214 return (r >> 0) & 0x3ff;
215}
216#define HOST1X_SYNC_CFPEEK_PTRS_CF_RD_PTR_V(r) \
217 host1x_sync_cfpeek_ptrs_cf_rd_ptr_v(r)
218static inline u32 host1x_sync_cfpeek_ptrs_cf_wr_ptr_v(u32 r)
219{
220 return (r >> 16) & 0x3ff;
221}
222#define HOST1X_SYNC_CFPEEK_PTRS_CF_WR_PTR_V(r) \
223 host1x_sync_cfpeek_ptrs_cf_wr_ptr_v(r)
224static inline u32 host1x_sync_cbstat_r(unsigned int channel)
225{
226 return 0x758 + channel * REGISTER_STRIDE;
227}
228#define HOST1X_SYNC_CBSTAT(channel) \
229 host1x_sync_cbstat_r(channel)
230static inline u32 host1x_sync_cbstat_cboffset_v(u32 r)
231{
232 return (r >> 0) & 0xffff;
233}
234#define HOST1X_SYNC_CBSTAT_CBOFFSET_V(r) \
235 host1x_sync_cbstat_cboffset_v(r)
236static inline u32 host1x_sync_cbstat_cbclass_v(u32 r)
237{
238 return (r >> 16) & 0x3ff;
239}
240#define HOST1X_SYNC_CBSTAT_CBCLASS_V(r) \
241 host1x_sync_cbstat_cbclass_v(r)
242
243#endif
diff --git a/drivers/gpu/host1x/hw/hw_host1x02_uclass.h b/drivers/gpu/host1x/hw/hw_host1x02_uclass.h
new file mode 100644
index 000000000000..a3b3c9874413
--- /dev/null
+++ b/drivers/gpu/host1x/hw/hw_host1x02_uclass.h
@@ -0,0 +1,175 @@
1/*
2 * Copyright (c) 2013 NVIDIA Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 *
16 */
17
18 /*
19 * Function naming determines intended use:
20 *
21 * <x>_r(void) : Returns the offset for register <x>.
22 *
23 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
24 *
25 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
26 *
27 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
28 * and masked to place it at field <y> of register <x>. This value
29 * can be |'d with others to produce a full register value for
30 * register <x>.
31 *
32 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
33 * value can be ~'d and then &'d to clear the value of field <y> for
34 * register <x>.
35 *
36 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
37 * to place it at field <y> of register <x>. This value can be |'d
38 * with others to produce a full register value for <x>.
39 *
40 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
41 * <x> value 'r' after being shifted to place its LSB at bit 0.
42 * This value is suitable for direct comparison with other unshifted
43 * values appropriate for use in field <y> of register <x>.
44 *
45 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
46 * field <y> of register <x>. This value is suitable for direct
47 * comparison with unshifted values appropriate for use in field <y>
48 * of register <x>.
49 */
50
51#ifndef HOST1X_HW_HOST1X02_UCLASS_H
52#define HOST1X_HW_HOST1X02_UCLASS_H
53
54static inline u32 host1x_uclass_incr_syncpt_r(void)
55{
56 return 0x0;
57}
58#define HOST1X_UCLASS_INCR_SYNCPT \
59 host1x_uclass_incr_syncpt_r()
60static inline u32 host1x_uclass_incr_syncpt_cond_f(u32 v)
61{
62 return (v & 0xff) << 8;
63}
64#define HOST1X_UCLASS_INCR_SYNCPT_COND_F(v) \
65 host1x_uclass_incr_syncpt_cond_f(v)
66static inline u32 host1x_uclass_incr_syncpt_indx_f(u32 v)
67{
68 return (v & 0xff) << 0;
69}
70#define HOST1X_UCLASS_INCR_SYNCPT_INDX_F(v) \
71 host1x_uclass_incr_syncpt_indx_f(v)
72static inline u32 host1x_uclass_wait_syncpt_r(void)
73{
74 return 0x8;
75}
76#define HOST1X_UCLASS_WAIT_SYNCPT \
77 host1x_uclass_wait_syncpt_r()
78static inline u32 host1x_uclass_wait_syncpt_indx_f(u32 v)
79{
80 return (v & 0xff) << 24;
81}
82#define HOST1X_UCLASS_WAIT_SYNCPT_INDX_F(v) \
83 host1x_uclass_wait_syncpt_indx_f(v)
84static inline u32 host1x_uclass_wait_syncpt_thresh_f(u32 v)
85{
86 return (v & 0xffffff) << 0;
87}
88#define HOST1X_UCLASS_WAIT_SYNCPT_THRESH_F(v) \
89 host1x_uclass_wait_syncpt_thresh_f(v)
90static inline u32 host1x_uclass_wait_syncpt_base_r(void)
91{
92 return 0x9;
93}
94#define HOST1X_UCLASS_WAIT_SYNCPT_BASE \
95 host1x_uclass_wait_syncpt_base_r()
96static inline u32 host1x_uclass_wait_syncpt_base_indx_f(u32 v)
97{
98 return (v & 0xff) << 24;
99}
100#define HOST1X_UCLASS_WAIT_SYNCPT_BASE_INDX_F(v) \
101 host1x_uclass_wait_syncpt_base_indx_f(v)
102static inline u32 host1x_uclass_wait_syncpt_base_base_indx_f(u32 v)
103{
104 return (v & 0xff) << 16;
105}
106#define HOST1X_UCLASS_WAIT_SYNCPT_BASE_BASE_INDX_F(v) \
107 host1x_uclass_wait_syncpt_base_base_indx_f(v)
108static inline u32 host1x_uclass_wait_syncpt_base_offset_f(u32 v)
109{
110 return (v & 0xffff) << 0;
111}
112#define HOST1X_UCLASS_WAIT_SYNCPT_BASE_OFFSET_F(v) \
113 host1x_uclass_wait_syncpt_base_offset_f(v)
114static inline u32 host1x_uclass_load_syncpt_base_base_indx_f(u32 v)
115{
116 return (v & 0xff) << 24;
117}
118#define HOST1X_UCLASS_LOAD_SYNCPT_BASE_BASE_INDX_F(v) \
119 host1x_uclass_load_syncpt_base_base_indx_f(v)
120static inline u32 host1x_uclass_load_syncpt_base_value_f(u32 v)
121{
122 return (v & 0xffffff) << 0;
123}
124#define HOST1X_UCLASS_LOAD_SYNCPT_BASE_VALUE_F(v) \
125 host1x_uclass_load_syncpt_base_value_f(v)
126static inline u32 host1x_uclass_incr_syncpt_base_base_indx_f(u32 v)
127{
128 return (v & 0xff) << 24;
129}
130#define HOST1X_UCLASS_INCR_SYNCPT_BASE_BASE_INDX_F(v) \
131 host1x_uclass_incr_syncpt_base_base_indx_f(v)
132static inline u32 host1x_uclass_incr_syncpt_base_offset_f(u32 v)
133{
134 return (v & 0xffffff) << 0;
135}
136#define HOST1X_UCLASS_INCR_SYNCPT_BASE_OFFSET_F(v) \
137 host1x_uclass_incr_syncpt_base_offset_f(v)
138static inline u32 host1x_uclass_indoff_r(void)
139{
140 return 0x2d;
141}
142#define HOST1X_UCLASS_INDOFF \
143 host1x_uclass_indoff_r()
144static inline u32 host1x_uclass_indoff_indbe_f(u32 v)
145{
146 return (v & 0xf) << 28;
147}
148#define HOST1X_UCLASS_INDOFF_INDBE_F(v) \
149 host1x_uclass_indoff_indbe_f(v)
150static inline u32 host1x_uclass_indoff_autoinc_f(u32 v)
151{
152 return (v & 0x1) << 27;
153}
154#define HOST1X_UCLASS_INDOFF_AUTOINC_F(v) \
155 host1x_uclass_indoff_autoinc_f(v)
156static inline u32 host1x_uclass_indoff_indmodid_f(u32 v)
157{
158 return (v & 0xff) << 18;
159}
160#define HOST1X_UCLASS_INDOFF_INDMODID_F(v) \
161 host1x_uclass_indoff_indmodid_f(v)
162static inline u32 host1x_uclass_indoff_indroffset_f(u32 v)
163{
164 return (v & 0xffff) << 2;
165}
166#define HOST1X_UCLASS_INDOFF_INDROFFSET_F(v) \
167 host1x_uclass_indoff_indroffset_f(v)
168static inline u32 host1x_uclass_indoff_rwn_read_v(void)
169{
170 return 1;
171}
172#define HOST1X_UCLASS_INDOFF_INDROFFSET_F(v) \
173 host1x_uclass_indoff_indroffset_f(v)
174
175#endif
diff --git a/drivers/gpu/host1x/hw/intr_hw.c b/drivers/gpu/host1x/hw/intr_hw.c
index b592eef1efcb..b26dcc83bc1b 100644
--- a/drivers/gpu/host1x/hw/intr_hw.c
+++ b/drivers/gpu/host1x/hw/intr_hw.c
@@ -22,8 +22,8 @@
22#include <linux/io.h> 22#include <linux/io.h>
23#include <asm/mach/irq.h> 23#include <asm/mach/irq.h>
24 24
25#include "intr.h" 25#include "../intr.h"
26#include "dev.h" 26#include "../dev.h"
27 27
28/* 28/*
29 * Sync point threshold interrupt service function 29 * Sync point threshold interrupt service function
diff --git a/drivers/gpu/host1x/hw/syncpt_hw.c b/drivers/gpu/host1x/hw/syncpt_hw.c
index 0cf6095d3367..56e85395ac24 100644
--- a/drivers/gpu/host1x/hw/syncpt_hw.c
+++ b/drivers/gpu/host1x/hw/syncpt_hw.c
@@ -18,8 +18,8 @@
18 18
19#include <linux/io.h> 19#include <linux/io.h>
20 20
21#include "dev.h" 21#include "../dev.h"
22#include "syncpt.h" 22#include "../syncpt.h"
23 23
24/* 24/*
25 * Write the current syncpoint value back to hw. 25 * Write the current syncpoint value back to hw.
diff --git a/drivers/gpu/host1x/job.c b/drivers/gpu/host1x/job.c
index c4e1050f2252..de5ec333ce1a 100644
--- a/drivers/gpu/host1x/job.c
+++ b/drivers/gpu/host1x/job.c
@@ -18,6 +18,7 @@
18 18
19#include <linux/dma-mapping.h> 19#include <linux/dma-mapping.h>
20#include <linux/err.h> 20#include <linux/err.h>
21#include <linux/host1x.h>
21#include <linux/kref.h> 22#include <linux/kref.h>
22#include <linux/module.h> 23#include <linux/module.h>
23#include <linux/scatterlist.h> 24#include <linux/scatterlist.h>
@@ -27,7 +28,6 @@
27 28
28#include "channel.h" 29#include "channel.h"
29#include "dev.h" 30#include "dev.h"
30#include "host1x_bo.h"
31#include "job.h" 31#include "job.h"
32#include "syncpt.h" 32#include "syncpt.h"
33 33
@@ -264,7 +264,7 @@ static unsigned int do_relocs(struct host1x_job *job, struct host1x_bo *cmdbuf)
264} 264}
265 265
266static bool check_reloc(struct host1x_reloc *reloc, struct host1x_bo *cmdbuf, 266static bool check_reloc(struct host1x_reloc *reloc, struct host1x_bo *cmdbuf,
267 unsigned int offset) 267 unsigned int offset)
268{ 268{
269 offset *= sizeof(u32); 269 offset *= sizeof(u32);
270 270
@@ -281,7 +281,7 @@ struct host1x_firewall {
281 unsigned int num_relocs; 281 unsigned int num_relocs;
282 struct host1x_reloc *reloc; 282 struct host1x_reloc *reloc;
283 283
284 struct host1x_bo *cmdbuf_id; 284 struct host1x_bo *cmdbuf;
285 unsigned int offset; 285 unsigned int offset;
286 286
287 u32 words; 287 u32 words;
@@ -291,25 +291,37 @@ struct host1x_firewall {
291 u32 count; 291 u32 count;
292}; 292};
293 293
294static int check_register(struct host1x_firewall *fw, unsigned long offset)
295{
296 if (fw->job->is_addr_reg(fw->dev, fw->class, offset)) {
297 if (!fw->num_relocs)
298 return -EINVAL;
299
300 if (!check_reloc(fw->reloc, fw->cmdbuf, fw->offset))
301 return -EINVAL;
302
303 fw->num_relocs--;
304 fw->reloc++;
305 }
306
307 return 0;
308}
309
294static int check_mask(struct host1x_firewall *fw) 310static int check_mask(struct host1x_firewall *fw)
295{ 311{
296 u32 mask = fw->mask; 312 u32 mask = fw->mask;
297 u32 reg = fw->reg; 313 u32 reg = fw->reg;
314 int ret;
298 315
299 while (mask) { 316 while (mask) {
300 if (fw->words == 0) 317 if (fw->words == 0)
301 return -EINVAL; 318 return -EINVAL;
302 319
303 if (mask & 1) { 320 if (mask & 1) {
304 if (fw->job->is_addr_reg(fw->dev, fw->class, reg)) { 321 ret = check_register(fw, reg);
305 if (!fw->num_relocs) 322 if (ret < 0)
306 return -EINVAL; 323 return ret;
307 if (!check_reloc(fw->reloc, fw->cmdbuf_id, 324
308 fw->offset))
309 return -EINVAL;
310 fw->reloc++;
311 fw->num_relocs--;
312 }
313 fw->words--; 325 fw->words--;
314 fw->offset++; 326 fw->offset++;
315 } 327 }
@@ -324,19 +336,16 @@ static int check_incr(struct host1x_firewall *fw)
324{ 336{
325 u32 count = fw->count; 337 u32 count = fw->count;
326 u32 reg = fw->reg; 338 u32 reg = fw->reg;
339 int ret;
327 340
328 while (count) { 341 while (count) {
329 if (fw->words == 0) 342 if (fw->words == 0)
330 return -EINVAL; 343 return -EINVAL;
331 344
332 if (fw->job->is_addr_reg(fw->dev, fw->class, reg)) { 345 ret = check_register(fw, reg);
333 if (!fw->num_relocs) 346 if (ret < 0)
334 return -EINVAL; 347 return ret;
335 if (!check_reloc(fw->reloc, fw->cmdbuf_id, fw->offset)) 348
336 return -EINVAL;
337 fw->reloc++;
338 fw->num_relocs--;
339 }
340 reg++; 349 reg++;
341 fw->words--; 350 fw->words--;
342 fw->offset++; 351 fw->offset++;
@@ -348,21 +357,17 @@ static int check_incr(struct host1x_firewall *fw)
348 357
349static int check_nonincr(struct host1x_firewall *fw) 358static int check_nonincr(struct host1x_firewall *fw)
350{ 359{
351 int is_addr_reg = fw->job->is_addr_reg(fw->dev, fw->class, fw->reg);
352 u32 count = fw->count; 360 u32 count = fw->count;
361 int ret;
353 362
354 while (count) { 363 while (count) {
355 if (fw->words == 0) 364 if (fw->words == 0)
356 return -EINVAL; 365 return -EINVAL;
357 366
358 if (is_addr_reg) { 367 ret = check_register(fw, fw->reg);
359 if (!fw->num_relocs) 368 if (ret < 0)
360 return -EINVAL; 369 return ret;
361 if (!check_reloc(fw->reloc, fw->cmdbuf_id, fw->offset)) 370
362 return -EINVAL;
363 fw->reloc++;
364 fw->num_relocs--;
365 }
366 fw->words--; 371 fw->words--;
367 fw->offset++; 372 fw->offset++;
368 count--; 373 count--;
@@ -381,7 +386,7 @@ static int validate(struct host1x_firewall *fw, struct host1x_job_gather *g)
381 return 0; 386 return 0;
382 387
383 fw->words = g->words; 388 fw->words = g->words;
384 fw->cmdbuf_id = g->bo; 389 fw->cmdbuf = g->bo;
385 fw->offset = 0; 390 fw->offset = 0;
386 391
387 while (fw->words && !err) { 392 while (fw->words && !err) {
@@ -436,10 +441,6 @@ static int validate(struct host1x_firewall *fw, struct host1x_job_gather *g)
436 } 441 }
437 } 442 }
438 443
439 /* No relocs should remain at this point */
440 if (fw->num_relocs)
441 err = -EINVAL;
442
443out: 444out:
444 return err; 445 return err;
445} 446}
@@ -493,6 +494,10 @@ static inline int copy_gathers(struct host1x_job *job, struct device *dev)
493 offset += g->words * sizeof(u32); 494 offset += g->words * sizeof(u32);
494 } 495 }
495 496
497 /* No relocs should remain at this point */
498 if (fw.num_relocs)
499 return -EINVAL;
500
496 return 0; 501 return 0;
497} 502}
498 503
diff --git a/drivers/gpu/host1x/job.h b/drivers/gpu/host1x/job.h
index fba45f20458e..33a697d6dcef 100644
--- a/drivers/gpu/host1x/job.h
+++ b/drivers/gpu/host1x/job.h
@@ -34,15 +34,6 @@ struct host1x_cmdbuf {
34 u32 pad; 34 u32 pad;
35}; 35};
36 36
37struct host1x_reloc {
38 struct host1x_bo *cmdbuf;
39 u32 cmdbuf_offset;
40 struct host1x_bo *target;
41 u32 target_offset;
42 u32 shift;
43 u32 pad;
44};
45
46struct host1x_waitchk { 37struct host1x_waitchk {
47 struct host1x_bo *bo; 38 struct host1x_bo *bo;
48 u32 offset; 39 u32 offset;
@@ -56,105 +47,6 @@ struct host1x_job_unpin_data {
56}; 47};
57 48
58/* 49/*
59 * Each submit is tracked as a host1x_job.
60 */
61struct host1x_job {
62 /* When refcount goes to zero, job can be freed */
63 struct kref ref;
64
65 /* List entry */
66 struct list_head list;
67
68 /* Channel where job is submitted to */
69 struct host1x_channel *channel;
70
71 u32 client;
72
73 /* Gathers and their memory */
74 struct host1x_job_gather *gathers;
75 unsigned int num_gathers;
76
77 /* Wait checks to be processed at submit time */
78 struct host1x_waitchk *waitchk;
79 unsigned int num_waitchk;
80 u32 waitchk_mask;
81
82 /* Array of handles to be pinned & unpinned */
83 struct host1x_reloc *relocarray;
84 unsigned int num_relocs;
85 struct host1x_job_unpin_data *unpins;
86 unsigned int num_unpins;
87
88 dma_addr_t *addr_phys;
89 dma_addr_t *gather_addr_phys;
90 dma_addr_t *reloc_addr_phys;
91
92 /* Sync point id, number of increments and end related to the submit */
93 u32 syncpt_id;
94 u32 syncpt_incrs;
95 u32 syncpt_end;
96
97 /* Maximum time to wait for this job */
98 unsigned int timeout;
99
100 /* Index and number of slots used in the push buffer */
101 unsigned int first_get;
102 unsigned int num_slots;
103
104 /* Copy of gathers */
105 size_t gather_copy_size;
106 dma_addr_t gather_copy;
107 u8 *gather_copy_mapped;
108
109 /* Check if register is marked as an address reg */
110 int (*is_addr_reg)(struct device *dev, u32 reg, u32 class);
111
112 /* Request a SETCLASS to this class */
113 u32 class;
114
115 /* Add a channel wait for previous ops to complete */
116 bool serialize;
117};
118/*
119 * Allocate memory for a job. Just enough memory will be allocated to
120 * accomodate the submit.
121 */
122struct host1x_job *host1x_job_alloc(struct host1x_channel *ch,
123 u32 num_cmdbufs, u32 num_relocs,
124 u32 num_waitchks);
125
126/*
127 * Add a gather to a job.
128 */
129void host1x_job_add_gather(struct host1x_job *job, struct host1x_bo *mem_id,
130 u32 words, u32 offset);
131
132/*
133 * Increment reference going to host1x_job.
134 */
135struct host1x_job *host1x_job_get(struct host1x_job *job);
136
137/*
138 * Decrement reference job, free if goes to zero.
139 */
140void host1x_job_put(struct host1x_job *job);
141
142/*
143 * Pin memory related to job. This handles relocation of addresses to the
144 * host1x address space. Handles both the gather memory and any other memory
145 * referred to from the gather buffers.
146 *
147 * Handles also patching out host waits that would wait for an expired sync
148 * point value.
149 */
150int host1x_job_pin(struct host1x_job *job, struct device *dev);
151
152/*
153 * Unpin memory related to job.
154 */
155void host1x_job_unpin(struct host1x_job *job);
156
157/*
158 * Dump contents of job to debug output. 50 * Dump contents of job to debug output.
159 */ 51 */
160void host1x_job_dump(struct device *dev, struct host1x_job *job); 52void host1x_job_dump(struct device *dev, struct host1x_job *job);
diff --git a/drivers/gpu/host1x/syncpt.c b/drivers/gpu/host1x/syncpt.c
index 409745b949db..159c479829c9 100644
--- a/drivers/gpu/host1x/syncpt.c
+++ b/drivers/gpu/host1x/syncpt.c
@@ -30,9 +30,32 @@
30#define SYNCPT_CHECK_PERIOD (2 * HZ) 30#define SYNCPT_CHECK_PERIOD (2 * HZ)
31#define MAX_STUCK_CHECK_COUNT 15 31#define MAX_STUCK_CHECK_COUNT 15
32 32
33static struct host1x_syncpt *_host1x_syncpt_alloc(struct host1x *host, 33static struct host1x_syncpt_base *
34 struct device *dev, 34host1x_syncpt_base_request(struct host1x *host)
35 bool client_managed) 35{
36 struct host1x_syncpt_base *bases = host->bases;
37 unsigned int i;
38
39 for (i = 0; i < host->info->nb_bases; i++)
40 if (!bases[i].requested)
41 break;
42
43 if (i >= host->info->nb_bases)
44 return NULL;
45
46 bases[i].requested = true;
47 return &bases[i];
48}
49
50static void host1x_syncpt_base_free(struct host1x_syncpt_base *base)
51{
52 if (base)
53 base->requested = false;
54}
55
56static struct host1x_syncpt *host1x_syncpt_alloc(struct host1x *host,
57 struct device *dev,
58 unsigned long flags)
36{ 59{
37 int i; 60 int i;
38 struct host1x_syncpt *sp = host->syncpt; 61 struct host1x_syncpt *sp = host->syncpt;
@@ -44,6 +67,12 @@ static struct host1x_syncpt *_host1x_syncpt_alloc(struct host1x *host,
44 if (i >= host->info->nb_pts) 67 if (i >= host->info->nb_pts)
45 return NULL; 68 return NULL;
46 69
70 if (flags & HOST1X_SYNCPT_HAS_BASE) {
71 sp->base = host1x_syncpt_base_request(host);
72 if (!sp->base)
73 return NULL;
74 }
75
47 name = kasprintf(GFP_KERNEL, "%02d-%s", sp->id, 76 name = kasprintf(GFP_KERNEL, "%02d-%s", sp->id,
48 dev ? dev_name(dev) : NULL); 77 dev ? dev_name(dev) : NULL);
49 if (!name) 78 if (!name)
@@ -51,7 +80,11 @@ static struct host1x_syncpt *_host1x_syncpt_alloc(struct host1x *host,
51 80
52 sp->dev = dev; 81 sp->dev = dev;
53 sp->name = name; 82 sp->name = name;
54 sp->client_managed = client_managed; 83
84 if (flags & HOST1X_SYNCPT_CLIENT_MANAGED)
85 sp->client_managed = true;
86 else
87 sp->client_managed = false;
55 88
56 return sp; 89 return sp;
57} 90}
@@ -303,25 +336,35 @@ int host1x_syncpt_patch_wait(struct host1x_syncpt *sp, void *patch_addr)
303 336
304int host1x_syncpt_init(struct host1x *host) 337int host1x_syncpt_init(struct host1x *host)
305{ 338{
339 struct host1x_syncpt_base *bases;
306 struct host1x_syncpt *syncpt; 340 struct host1x_syncpt *syncpt;
307 int i; 341 int i;
308 342
309 syncpt = devm_kzalloc(host->dev, sizeof(*syncpt) * host->info->nb_pts, 343 syncpt = devm_kzalloc(host->dev, sizeof(*syncpt) * host->info->nb_pts,
310 GFP_KERNEL); 344 GFP_KERNEL);
311 if (!syncpt) 345 if (!syncpt)
312 return -ENOMEM; 346 return -ENOMEM;
313 347
314 for (i = 0; i < host->info->nb_pts; ++i) { 348 bases = devm_kzalloc(host->dev, sizeof(*bases) * host->info->nb_bases,
349 GFP_KERNEL);
350 if (!bases)
351 return -ENOMEM;
352
353 for (i = 0; i < host->info->nb_pts; i++) {
315 syncpt[i].id = i; 354 syncpt[i].id = i;
316 syncpt[i].host = host; 355 syncpt[i].host = host;
317 } 356 }
318 357
358 for (i = 0; i < host->info->nb_bases; i++)
359 bases[i].id = i;
360
319 host->syncpt = syncpt; 361 host->syncpt = syncpt;
362 host->bases = bases;
320 363
321 host1x_syncpt_restore(host); 364 host1x_syncpt_restore(host);
322 365
323 /* Allocate sync point to use for clearing waits for expired fences */ 366 /* Allocate sync point to use for clearing waits for expired fences */
324 host->nop_sp = _host1x_syncpt_alloc(host, NULL, false); 367 host->nop_sp = host1x_syncpt_alloc(host, NULL, 0);
325 if (!host->nop_sp) 368 if (!host->nop_sp)
326 return -ENOMEM; 369 return -ENOMEM;
327 370
@@ -329,10 +372,10 @@ int host1x_syncpt_init(struct host1x *host)
329} 372}
330 373
331struct host1x_syncpt *host1x_syncpt_request(struct device *dev, 374struct host1x_syncpt *host1x_syncpt_request(struct device *dev,
332 bool client_managed) 375 unsigned long flags)
333{ 376{
334 struct host1x *host = dev_get_drvdata(dev->parent); 377 struct host1x *host = dev_get_drvdata(dev->parent);
335 return _host1x_syncpt_alloc(host, dev, client_managed); 378 return host1x_syncpt_alloc(host, dev, flags);
336} 379}
337 380
338void host1x_syncpt_free(struct host1x_syncpt *sp) 381void host1x_syncpt_free(struct host1x_syncpt *sp)
@@ -340,7 +383,9 @@ void host1x_syncpt_free(struct host1x_syncpt *sp)
340 if (!sp) 383 if (!sp)
341 return; 384 return;
342 385
386 host1x_syncpt_base_free(sp->base);
343 kfree(sp->name); 387 kfree(sp->name);
388 sp->base = NULL;
344 sp->dev = NULL; 389 sp->dev = NULL;
345 sp->name = NULL; 390 sp->name = NULL;
346 sp->client_managed = false; 391 sp->client_managed = false;
@@ -354,6 +399,25 @@ void host1x_syncpt_deinit(struct host1x *host)
354 kfree(sp->name); 399 kfree(sp->name);
355} 400}
356 401
402/*
403 * Read max. It indicates how many operations there are in queue, either in
404 * channel or in a software thread.
405 * */
406u32 host1x_syncpt_read_max(struct host1x_syncpt *sp)
407{
408 smp_rmb();
409 return (u32)atomic_read(&sp->max_val);
410}
411
412/*
413 * Read min, which is a shadow of the current sync point value in hardware.
414 */
415u32 host1x_syncpt_read_min(struct host1x_syncpt *sp)
416{
417 smp_rmb();
418 return (u32)atomic_read(&sp->min_val);
419}
420
357int host1x_syncpt_nb_pts(struct host1x *host) 421int host1x_syncpt_nb_pts(struct host1x *host)
358{ 422{
359 return host->info->nb_pts; 423 return host->info->nb_pts;
@@ -375,3 +439,13 @@ struct host1x_syncpt *host1x_syncpt_get(struct host1x *host, u32 id)
375 return NULL; 439 return NULL;
376 return host->syncpt + id; 440 return host->syncpt + id;
377} 441}
442
443struct host1x_syncpt_base *host1x_syncpt_get_base(struct host1x_syncpt *sp)
444{
445 return sp ? sp->base : NULL;
446}
447
448u32 host1x_syncpt_base_id(struct host1x_syncpt_base *base)
449{
450 return base->id;
451}
diff --git a/drivers/gpu/host1x/syncpt.h b/drivers/gpu/host1x/syncpt.h
index 267c0b9d3647..9056465ecd3f 100644
--- a/drivers/gpu/host1x/syncpt.h
+++ b/drivers/gpu/host1x/syncpt.h
@@ -20,6 +20,7 @@
20#define __HOST1X_SYNCPT_H 20#define __HOST1X_SYNCPT_H
21 21
22#include <linux/atomic.h> 22#include <linux/atomic.h>
23#include <linux/host1x.h>
23#include <linux/kernel.h> 24#include <linux/kernel.h>
24#include <linux/sched.h> 25#include <linux/sched.h>
25 26
@@ -30,6 +31,11 @@ struct host1x;
30/* Reserved for replacing an expired wait with a NOP */ 31/* Reserved for replacing an expired wait with a NOP */
31#define HOST1X_SYNCPT_RESERVED 0 32#define HOST1X_SYNCPT_RESERVED 0
32 33
34struct host1x_syncpt_base {
35 unsigned int id;
36 bool requested;
37};
38
33struct host1x_syncpt { 39struct host1x_syncpt {
34 int id; 40 int id;
35 atomic_t min_val; 41 atomic_t min_val;
@@ -39,6 +45,7 @@ struct host1x_syncpt {
39 bool client_managed; 45 bool client_managed;
40 struct host1x *host; 46 struct host1x *host;
41 struct device *dev; 47 struct device *dev;
48 struct host1x_syncpt_base *base;
42 49
43 /* interrupt data */ 50 /* interrupt data */
44 struct host1x_syncpt_intr intr; 51 struct host1x_syncpt_intr intr;
@@ -50,25 +57,6 @@ int host1x_syncpt_init(struct host1x *host);
50/* Free sync point array */ 57/* Free sync point array */
51void host1x_syncpt_deinit(struct host1x *host); 58void host1x_syncpt_deinit(struct host1x *host);
52 59
53/*
54 * Read max. It indicates how many operations there are in queue, either in
55 * channel or in a software thread.
56 * */
57static inline u32 host1x_syncpt_read_max(struct host1x_syncpt *sp)
58{
59 smp_rmb();
60 return (u32)atomic_read(&sp->max_val);
61}
62
63/*
64 * Read min, which is a shadow of the current sync point value in hardware.
65 */
66static inline u32 host1x_syncpt_read_min(struct host1x_syncpt *sp)
67{
68 smp_rmb();
69 return (u32)atomic_read(&sp->min_val);
70}
71
72/* Return number of sync point supported. */ 60/* Return number of sync point supported. */
73int host1x_syncpt_nb_pts(struct host1x *host); 61int host1x_syncpt_nb_pts(struct host1x *host);
74 62
@@ -112,9 +100,6 @@ static inline bool host1x_syncpt_idle(struct host1x_syncpt *sp)
112 return (min == max); 100 return (min == max);
113} 101}
114 102
115/* Return pointer to struct denoting sync point id. */
116struct host1x_syncpt *host1x_syncpt_get(struct host1x *host, u32 id);
117
118/* Load current value from hardware to the shadow register. */ 103/* Load current value from hardware to the shadow register. */
119u32 host1x_syncpt_load(struct host1x_syncpt *sp); 104u32 host1x_syncpt_load(struct host1x_syncpt *sp);
120 105
@@ -130,16 +115,9 @@ void host1x_syncpt_restore(struct host1x *host);
130/* Read current wait base value into shadow register and return it. */ 115/* Read current wait base value into shadow register and return it. */
131u32 host1x_syncpt_load_wait_base(struct host1x_syncpt *sp); 116u32 host1x_syncpt_load_wait_base(struct host1x_syncpt *sp);
132 117
133/* Request incrementing a sync point. */
134int host1x_syncpt_incr(struct host1x_syncpt *sp);
135
136/* Indicate future operations by incrementing the sync point max. */ 118/* Indicate future operations by incrementing the sync point max. */
137u32 host1x_syncpt_incr_max(struct host1x_syncpt *sp, u32 incrs); 119u32 host1x_syncpt_incr_max(struct host1x_syncpt *sp, u32 incrs);
138 120
139/* Wait until sync point reaches a threshold value, or a timeout. */
140int host1x_syncpt_wait(struct host1x_syncpt *sp, u32 thresh,
141 long timeout, u32 *value);
142
143/* Check if sync point id is valid. */ 121/* Check if sync point id is valid. */
144static inline int host1x_syncpt_is_valid(struct host1x_syncpt *sp) 122static inline int host1x_syncpt_is_valid(struct host1x_syncpt *sp)
145{ 123{
@@ -149,14 +127,4 @@ static inline int host1x_syncpt_is_valid(struct host1x_syncpt *sp)
149/* Patch a wait by replacing it with a wait for syncpt 0 value 0 */ 127/* Patch a wait by replacing it with a wait for syncpt 0 value 0 */
150int host1x_syncpt_patch_wait(struct host1x_syncpt *sp, void *patch_addr); 128int host1x_syncpt_patch_wait(struct host1x_syncpt *sp, void *patch_addr);
151 129
152/* Return id of the sync point */
153u32 host1x_syncpt_id(struct host1x_syncpt *sp);
154
155/* Allocate a sync point for a device. */
156struct host1x_syncpt *host1x_syncpt_request(struct device *dev,
157 bool client_managed);
158
159/* Free a sync point. */
160void host1x_syncpt_free(struct host1x_syncpt *sp);
161
162#endif 130#endif